diff --git a/base/daemon/BaseDaemon.cpp b/base/daemon/BaseDaemon.cpp index f25bcdb91e1..39506186732 100644 --- a/base/daemon/BaseDaemon.cpp +++ b/base/daemon/BaseDaemon.cpp @@ -962,7 +962,7 @@ void BaseDaemon::setupWatchdog() if (WIFEXITED(status)) { logger().information(fmt::format("Child process exited normally with code {}.", WEXITSTATUS(status))); - _exit(status); + _exit(WEXITSTATUS(status)); } if (WIFSIGNALED(status)) @@ -980,7 +980,7 @@ void BaseDaemon::setupWatchdog() logger().fatal(fmt::format("Child process was terminated by signal {}.", sig)); if (sig == SIGINT || sig == SIGTERM || sig == SIGQUIT) - _exit(status); + _exit(128 + sig); } } else diff --git a/docker/test/coverage/run.sh b/docker/test/coverage/run.sh index e2369a28a9a..11b6ce13ea1 100755 --- a/docker/test/coverage/run.sh +++ b/docker/test/coverage/run.sh @@ -102,11 +102,11 @@ else echo "No failed tests" fi -mkdir -p $COVERAGE_DIR -mv /*.profraw $COVERAGE_DIR +mkdir -p "$COVERAGE_DIR" +mv /*.profraw "$COVERAGE_DIR" -mkdir -p $SOURCE_DIR/obj-x86_64-linux-gnu -cd $SOURCE_DIR/obj-x86_64-linux-gnu && CC=clang-11 CXX=clang++-11 cmake .. && cd / -llvm-profdata-11 merge -sparse ${COVERAGE_DIR}/* -o clickhouse.profdata -llvm-cov-11 export /usr/bin/clickhouse -instr-profile=clickhouse.profdata -j=16 -format=lcov -skip-functions -ignore-filename-regex $IGNORE > output.lcov -genhtml output.lcov --ignore-errors source --output-directory ${OUTPUT_DIR} +mkdir -p "$SOURCE_DIR"/obj-x86_64-linux-gnu +cd "$SOURCE_DIR"/obj-x86_64-linux-gnu && CC=clang-11 CXX=clang++-11 cmake .. && cd / +llvm-profdata-11 merge -sparse "${COVERAGE_DIR}"/* -o clickhouse.profdata +llvm-cov-11 export /usr/bin/clickhouse -instr-profile=clickhouse.profdata -j=16 -format=lcov -skip-functions -ignore-filename-regex "$IGNORE" > output.lcov +genhtml output.lcov --ignore-errors source --output-directory "${OUTPUT_DIR}" diff --git a/docker/test/fasttest/run.sh b/docker/test/fasttest/run.sh index a918cc44420..c782ac49d27 100755 --- a/docker/test/fasttest/run.sh +++ b/docker/test/fasttest/run.sh @@ -65,7 +65,7 @@ function start_server { set -m # Spawn server in its own process groups local opts=( - --config-file="$FASTTEST_DATA/config.xml" + --config-file "$FASTTEST_DATA/config.xml" -- --path "$FASTTEST_DATA" --user_files_path "$FASTTEST_DATA/user_files" diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index e6e987e1d94..309328bc8e2 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -55,9 +55,9 @@ function run_tests() ADDITIONAL_OPTIONS+=('00000_no_tests_to_skip') fi - for i in $(seq 1 $NUM_TRIES); do + for _ in $(seq 1 "$NUM_TRIES"); do clickhouse-test --testname --shard --zookeeper --hung-check --print-time "$SKIP_LIST_OPT" "${ADDITIONAL_OPTIONS[@]}" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt - if [ ${PIPESTATUS[0]} -ne "0" ]; then + if [ "${PIPESTATUS[0]}" -ne "0" ]; then break; fi done @@ -65,4 +65,4 @@ function run_tests() export -f run_tests -timeout $MAX_RUN_TIME bash -c run_tests ||: +timeout "$MAX_RUN_TIME" bash -c run_tests ||: diff --git a/docker/test/style/Dockerfile b/docker/test/style/Dockerfile index 239a074969c..f7555231ffb 100644 --- a/docker/test/style/Dockerfile +++ b/docker/test/style/Dockerfile @@ -8,4 +8,5 @@ CMD cd /ClickHouse/utils/check-style && \ ./check-style -n | tee /test_output/style_output.txt && \ ./check-typos | tee /test_output/typos_output.txt && \ ./check-whitespaces -n | tee /test_output/whitespaces_output.txt && \ - ./check-duplicate-includes.sh | tee /test_output/duplicate_output.txt + ./check-duplicate-includes.sh | tee /test_output/duplicate_output.txt && \ + ./shellcheck-run.sh | tee /test_output/shellcheck_output.txt diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 71788e9eff4..e5f836e3b5a 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -726,7 +726,7 @@ log_queries=1 ## log_queries_min_query_duration_ms {#settings-log-queries-min-query-duration-ms} -Minimal time for the query to run to get to the following tables: +If enabled (non-zero), queries faster then the value of this setting will not be logged (you can think about this as a `long_query_time` for [MySQL Slow Query Log](https://dev.mysql.com/doc/refman/5.7/en/slow-query-log.html)), and this basically means that you will not find them in the following tables: - `system.query_log` - `system.query_thread_log` @@ -2470,6 +2470,31 @@ Possible values: Default value: `0`. +## union_default_mode {#union-default-mode} + +Sets a mode for combining `SELECT` query results. The setting is only used when shared with [UNION](../../sql-reference/statements/select/union.md) without explicitly specifying the `UNION ALL` or `UNION DISTINCT`. + +Possible values: + +- `'DISTINCT'` — ClickHouse outputs rows as a result of combining queries removing duplicate rows. +- `'ALL'` — ClickHouse outputs all rows as a result of combining queries including duplicate rows. +- `''` — Clickhouse generates an exception when used with `UNION`. + +Default value: `''`. + +See examples in [UNION](../../sql-reference/statements/select/union.md). + +## data_type_default_nullable {#data_type_default_nullable} + +Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable). + +Possible values: + +- 1 — The data types in column definitions are set to `Nullable` by default. +- 0 — The data types in column definitions are set to not `Nullable` by default. + +Default value: `0`. + ## execute_merges_on_single_replica_time_threshold {#execute-merges-on-single-replica-time-threshold} Enables special logic to perform merges on replicas. diff --git a/docs/en/operations/system-tables/errors.md b/docs/en/operations/system-tables/errors.md index 53e8a397217..ec874efd711 100644 --- a/docs/en/operations/system-tables/errors.md +++ b/docs/en/operations/system-tables/errors.md @@ -1,12 +1,12 @@ # system.errors {#system_tables-errors} -Contains error codes with number of times they have been triggered. +Contains error codes with the number of times they have been triggered. Columns: - `name` ([String](../../sql-reference/data-types/string.md)) — name of the error (`errorCodeToName`). - `code` ([Int32](../../sql-reference/data-types/int-uint.md)) — code number of the error. -- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) - number of times this error has been happened. +- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — the number of times this error has been happened. **Example** diff --git a/docs/en/sql-reference/functions/hash-functions.md b/docs/en/sql-reference/functions/hash-functions.md index 3594737c18a..9394426b20b 100644 --- a/docs/en/sql-reference/functions/hash-functions.md +++ b/docs/en/sql-reference/functions/hash-functions.md @@ -157,14 +157,14 @@ Levels are the same as in URLHierarchy. This function is specific to Yandex.Metr ## farmHash64 {#farmhash64} -Produces a 64-bit [FarmHash](https://github.com/google/farmhash) or Fingerprint value. Prefer `farmFingerprint64` for a stable and portable value. +Produces a 64-bit [FarmHash](https://github.com/google/farmhash) or Fingerprint value. `farmFingerprint64` is preferred for a stable and portable value. ``` sql farmFingerprint64(par1, ...) farmHash64(par1, ...) ``` -These functions use the `Fingerprint64` and `Hash64` method respectively from all [available methods](https://github.com/google/farmhash/blob/master/src/farmhash.h). +These functions use the `Fingerprint64` and `Hash64` methods respectively from all [available methods](https://github.com/google/farmhash/blob/master/src/farmhash.h). **Parameters** diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md index a846a01f11f..83f2705693a 100644 --- a/docs/en/sql-reference/functions/string-functions.md +++ b/docs/en/sql-reference/functions/string-functions.md @@ -558,4 +558,46 @@ Result: └─────┘ ``` +## encodeXMLComponent {#encode-xml-component} + +Escapes characters to place string into XML text node or attribute. + +The following five XML predefined entities will be replaced: `<`, `&`, `>`, `"`, `'`. + +**Syntax** + +``` sql +encodeXMLComponent(x) +``` + +**Parameters** + +- `x` — The sequence of characters. [String](../../sql-reference/data-types/string.md). + +**Returned value(s)** + +- The sequence of characters with escape characters. + +Type: [String](../../sql-reference/data-types/string.md). + +**Example** + +Query: + +``` sql +SELECT encodeXMLComponent('Hello, "world"!'); +SELECT encodeXMLComponent('<123>'); +SELECT encodeXMLComponent('&clickhouse'); +SELECT encodeXMLComponent('\'foo\''); +``` + +Result: + +``` text +Hello, "world"! +<123> +&clickhouse +'foo' +``` + [Original article](https://clickhouse.tech/docs/en/query_language/functions/string_functions/) diff --git a/docs/en/sql-reference/functions/string-search-functions.md b/docs/en/sql-reference/functions/string-search-functions.md index 82ef5610868..4036974dd37 100644 --- a/docs/en/sql-reference/functions/string-search-functions.md +++ b/docs/en/sql-reference/functions/string-search-functions.md @@ -400,7 +400,8 @@ Result: └──────────────────────────────────────────────────────────────────────────────────────────┘ ``` -**See also** +**See Also** + - [extractAllGroupsVertical](#extractallgroups-vertical) ## extractAllGroupsVertical {#extractallgroups-vertical} @@ -440,7 +441,8 @@ Result: └────────────────────────────────────────────────────────────────────────────────────────┘ ``` -**See also** +**See Also** + - [extractAllGroupsHorizontal](#extractallgroups-horizontal) ## like(haystack, pattern), haystack LIKE pattern operator {#function-like} @@ -726,4 +728,51 @@ Result: Returns the number of regular expression matches for a `pattern` in a `haystack`. +**Syntax** + +``` sql +countMatches(haystack, pattern) +``` + +**Parameters** + +- `haystack` — The string to search in. [String](../../sql-reference/syntax.md#syntax-string-literal). +- `pattern` — The regular expression with [re2 syntax](https://github.com/google/re2/wiki/Syntax). [String](../../sql-reference/data-types/string.md). + +**Returned value** + +- The number of matches. + +Type: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Examples** + +Query: + +``` sql +SELECT countMatches('foobar.com', 'o+'); +``` + +Result: + +``` text +┌─countMatches('foobar.com', 'o+')─┐ +│ 2 │ +└──────────────────────────────────┘ +``` + +Query: + +``` sql +SELECT countMatches('aaaa', 'aa'); +``` + +Result: + +``` text +┌─countMatches('aaaa', 'aa')────┐ +│ 2 │ +└───────────────────────────────┘ +``` + [Original article](https://clickhouse.tech/docs/en/query_language/functions/string_search_functions/) diff --git a/docs/en/sql-reference/statements/create/table.md b/docs/en/sql-reference/statements/create/table.md index e9952fc76fd..b1a5fdb19b5 100644 --- a/docs/en/sql-reference/statements/create/table.md +++ b/docs/en/sql-reference/statements/create/table.md @@ -16,8 +16,8 @@ By default, tables are created only on the current server. Distributed DDL queri ``` sql CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster] ( - name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], - name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2] [compression_codec] [TTL expr2], + name1 [type1] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|ALIAS expr1] [compression_codec] [TTL expr1], + name2 [type2] [NULL|NOT NULL] [DEFAULT|MATERIALIZED|ALIAS expr2] [compression_codec] [TTL expr2], ... ) ENGINE = engine ``` @@ -57,6 +57,14 @@ In all cases, if `IF NOT EXISTS` is specified, the query won’t return an error There can be other clauses after the `ENGINE` clause in the query. See detailed documentation on how to create tables in the descriptions of [table engines](../../../engines/table-engines/index.md#table_engines). +## NULL Or NOT NULL Modifiers {#null-modifiers} + +`NULL` and `NOT NULL` modifiers after data type in column definition allow or do not allow it to be [Nullable](../../../sql-reference/data-types/nullable.md#data_type-nullable). + +If the type is not `Nullable` and if `NULL` is specified, it will be treated as `Nullable`; if `NOT NULL` is specified, then no. For example, `INT NULL` is the same as `Nullable(INT)`. If the type is `Nullable` and `NULL` or `NOT NULL` modifiers are specified, the exception will be thrown. + +See also [data_type_default_nullable](../../../operations/settings/settings.md#data_type_default_nullable) setting. + ## Default Values {#create-default-values} The column description can specify an expression for a default value, in one of the following ways: `DEFAULT expr`, `MATERIALIZED expr`, `ALIAS expr`. diff --git a/docs/en/sql-reference/statements/select/index.md b/docs/en/sql-reference/statements/select/index.md index 60c769c4660..ed69198ed4d 100644 --- a/docs/en/sql-reference/statements/select/index.md +++ b/docs/en/sql-reference/statements/select/index.md @@ -46,7 +46,7 @@ Specifics of each optional clause are covered in separate sections, which are li - [SELECT clause](#select-clause) - [DISTINCT clause](../../../sql-reference/statements/select/distinct.md) - [LIMIT clause](../../../sql-reference/statements/select/limit.md) -- [UNION clause](../../../sql-reference/statements/select/union-all.md) +- [UNION clause](../../../sql-reference/statements/select/union.md) - [INTO OUTFILE clause](../../../sql-reference/statements/select/into-outfile.md) - [FORMAT clause](../../../sql-reference/statements/select/format.md) diff --git a/docs/en/sql-reference/statements/select/union-all.md b/docs/en/sql-reference/statements/select/union-all.md deleted file mode 100644 index f150efbdc80..00000000000 --- a/docs/en/sql-reference/statements/select/union-all.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -toc_title: UNION ---- - -# UNION ALL Clause {#union-all-clause} - -You can use `UNION ALL` to combine any number of `SELECT` queries by extending their results. Example: - -``` sql -SELECT CounterID, 1 AS table, toInt64(count()) AS c - FROM test.hits - GROUP BY CounterID - -UNION ALL - -SELECT CounterID, 2 AS table, sum(Sign) AS c - FROM test.visits - GROUP BY CounterID - HAVING c > 0 -``` - -Result columns are matched by their index (order inside `SELECT`). If column names do not match, names for the final result are taken from the first query. - -Type casting is performed for unions. For example, if two queries being combined have the same field with non-`Nullable` and `Nullable` types from a compatible type, the resulting `UNION ALL` has a `Nullable` type field. - -Queries that are parts of `UNION ALL` can’t be enclosed in round brackets. [ORDER BY](../../../sql-reference/statements/select/order-by.md) and [LIMIT](../../../sql-reference/statements/select/limit.md) are applied to separate queries, not to the final result. If you need to apply a conversion to the final result, you can put all the queries with `UNION ALL` in a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause. - -# UNION DISTINCT Clause {#union-distinct-clause} -The difference between `UNION ALL` and `UNION DISTINCT` is that `UNION DISTINCT` will do a distinct transform for union result, it is equivalent to `SELECT DISTINCT` from a subquery containing `UNION ALL`. - -# UNION Clause {#union-clause} -By default, `UNION` has the same behavior as `UNION DISTINCT`, but you can specify union mode by setting `union_default_mode`, values can be 'ALL', 'DISTINCT' or empty string. However, if you use `UNION` with setting `union_default_mode` to empty string, it will throw an exception. - - -## Implementation Details {#implementation-details} - -Queries that are parts of `UNION/UNION ALL/UNION DISTINCT` can be run simultaneously, and their results can be mixed together. diff --git a/docs/en/sql-reference/statements/select/union.md b/docs/en/sql-reference/statements/select/union.md new file mode 100644 index 00000000000..cf18ff7a4a2 --- /dev/null +++ b/docs/en/sql-reference/statements/select/union.md @@ -0,0 +1,81 @@ +--- +toc_title: UNION +--- + +# UNION Clause {#union-clause} + +You can use `UNION` with explicitly specifying `UNION ALL` or `UNION DISTINCT`. + +If you don't specify `ALL` or `DISTINCT`, it will depend on the `union_default_mode` setting. The difference between `UNION ALL` and `UNION DISTINCT` is that `UNION DISTINCT` will do a distinct transform for union result, it is equivalent to `SELECT DISTINCT` from a subquery containing `UNION ALL`. + +You can use `UNION` to combine any number of `SELECT` queries by extending their results. Example: + +``` sql +SELECT CounterID, 1 AS table, toInt64(count()) AS c + FROM test.hits + GROUP BY CounterID + +UNION ALL + +SELECT CounterID, 2 AS table, sum(Sign) AS c + FROM test.visits + GROUP BY CounterID + HAVING c > 0 +``` + +Result columns are matched by their index (order inside `SELECT`). If column names do not match, names for the final result are taken from the first query. + +Type casting is performed for unions. For example, if two queries being combined have the same field with non-`Nullable` and `Nullable` types from a compatible type, the resulting `UNION` has a `Nullable` type field. + +Queries that are parts of `UNION` can be enclosed in round brackets. [ORDER BY](../../../sql-reference/statements/select/order-by.md) and [LIMIT](../../../sql-reference/statements/select/limit.md) are applied to separate queries, not to the final result. If you need to apply a conversion to the final result, you can put all the queries with `UNION` in a subquery in the [FROM](../../../sql-reference/statements/select/from.md) clause. + +If you use `UNION` without explicitly specifying `UNION ALL` or `UNION DISTINCT`, you can specify the union mode using the [union_default_mode](../../../operations/settings/settings.md#union-default-mode) setting. The setting values can be `ALL`, `DISTINCT` or an empty string. However, if you use `UNION` with `union_default_mode` setting to empty string, it will throw an exception. The following examples demonstrate the results of queries with different values setting. + +Query: + +```sql +SET union_default_mode = 'DISTINCT'; +SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 2; +``` + +Result: + +```text +┌─1─┐ +│ 1 │ +└───┘ +┌─1─┐ +│ 2 │ +└───┘ +┌─1─┐ +│ 3 │ +└───┘ +``` + +Query: + +```sql +SET union_default_mode = 'ALL'; +SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 2; +``` + +Result: + +```text +┌─1─┐ +│ 1 │ +└───┘ +┌─1─┐ +│ 2 │ +└───┘ +┌─1─┐ +│ 2 │ +└───┘ +┌─1─┐ +│ 3 │ +└───┘ +``` + +Queries that are parts of `UNION/UNION ALL/UNION DISTINCT` can be run simultaneously, and their results can be mixed together. + +[Original article](https://clickhouse.tech/docs/en/sql-reference/statements/select/union/) diff --git a/docs/es/sql-reference/statements/select/index.md b/docs/es/sql-reference/statements/select/index.md index a5ff9820a2b..653f737b1d0 100644 --- a/docs/es/sql-reference/statements/select/index.md +++ b/docs/es/sql-reference/statements/select/index.md @@ -44,7 +44,7 @@ Los detalles de cada cláusula opcional se cubren en secciones separadas, que se - [Cláusula HAVING](having.md) - [Cláusula SELECT](#select-clause) - [Cláusula LIMIT](limit.md) -- [UNION ALL cláusula](union-all.md) +- [UNION ALL cláusula](union.md) ## SELECT Cláusula {#select-clause} diff --git a/docs/es/sql-reference/statements/select/union-all.md b/docs/es/sql-reference/statements/select/union.md similarity index 97% rename from docs/es/sql-reference/statements/select/union-all.md rename to docs/es/sql-reference/statements/select/union.md index b2b45ba770e..d3aec34ba4b 100644 --- a/docs/es/sql-reference/statements/select/union-all.md +++ b/docs/es/sql-reference/statements/select/union.md @@ -3,7 +3,7 @@ machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd --- -# UNION ALL Cláusula {#union-all-clause} +# UNION Cláusula {#union-clause} Usted puede utilizar `UNION ALL` combinar cualquier número de `SELECT` consultas extendiendo sus resultados. Ejemplo: diff --git a/docs/fa/sql-reference/statements/select/index.md b/docs/fa/sql-reference/statements/select/index.md index 2ab3fea2ff1..90541b80636 100644 --- a/docs/fa/sql-reference/statements/select/index.md +++ b/docs/fa/sql-reference/statements/select/index.md @@ -44,7 +44,7 @@ SELECT [DISTINCT] expr_list - [داشتن بند](having.md) - [انتخاب بند](#select-clause) - [بند محدود](limit.md) -- [اتحادیه همه بند](union-all.md) +- [اتحادیه همه بند](union.md) ## انتخاب بند {#select-clause} diff --git a/docs/fa/sql-reference/statements/select/union-all.md b/docs/fa/sql-reference/statements/select/union.md similarity index 97% rename from docs/fa/sql-reference/statements/select/union-all.md rename to docs/fa/sql-reference/statements/select/union.md index 3c4fe5c1546..03d723e2338 100644 --- a/docs/fa/sql-reference/statements/select/union-all.md +++ b/docs/fa/sql-reference/statements/select/union.md @@ -3,7 +3,7 @@ machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd --- -# اتحادیه همه بند {#union-all-clause} +# اتحادیه همه بند {#union-clause} شما می توانید استفاده کنید `UNION ALL` برای ترکیب هر تعداد از `SELECT` نمایش داده شد با گسترش نتایج خود را. مثال: diff --git a/docs/fr/sql-reference/statements/select/index.md b/docs/fr/sql-reference/statements/select/index.md index 5073469e651..1d53ae80eb4 100644 --- a/docs/fr/sql-reference/statements/select/index.md +++ b/docs/fr/sql-reference/statements/select/index.md @@ -44,7 +44,7 @@ Spécificités de chaque clause facultative, sont couverts dans des sections dis - [Clause HAVING](having.md) - [Clause SELECT](#select-clause) - [Clause LIMIT](limit.md) -- [Clause UNION ALL](union-all.md) +- [Clause UNION ALL](union.md) ## Clause SELECT {#select-clause} diff --git a/docs/fr/sql-reference/statements/select/union-all.md b/docs/fr/sql-reference/statements/select/union.md similarity index 97% rename from docs/fr/sql-reference/statements/select/union-all.md rename to docs/fr/sql-reference/statements/select/union.md index 63e9987965f..9ae65ebcf72 100644 --- a/docs/fr/sql-reference/statements/select/union-all.md +++ b/docs/fr/sql-reference/statements/select/union.md @@ -3,7 +3,7 @@ machine_translated: true machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd --- -# Clause UNION ALL {#union-all-clause} +# Clause UNION ALL {#union-clause} Vous pouvez utiliser `UNION ALL` à combiner `SELECT` requêtes en étendant leurs résultats. Exemple: diff --git a/docs/ja/sql-reference/statements/select/union-all.md b/docs/ja/sql-reference/statements/select/union-all.md deleted file mode 120000 index 837caae2698..00000000000 --- a/docs/ja/sql-reference/statements/select/union-all.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql-reference/statements/select/union-all.md \ No newline at end of file diff --git a/docs/ja/sql-reference/statements/select/union.md b/docs/ja/sql-reference/statements/select/union.md new file mode 100644 index 00000000000..0eb8db0be7a --- /dev/null +++ b/docs/ja/sql-reference/statements/select/union.md @@ -0,0 +1 @@ +../../../../en/sql-reference/statements/select/union.md \ No newline at end of file diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 0992e6ce82d..c4f5cdaf2ca 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -408,11 +408,11 @@ INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2; - `'best_effort'` — включает расширенный парсинг. - ClickHouse может парсить базовый формат `YYYY-MM-DD HH:MM:SS` и все форматы [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601). Например, `'2018-06-08T01:02:03.000Z'`. +ClickHouse может парсить базовый формат `YYYY-MM-DD HH:MM:SS` и все форматы [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601). Например, `'2018-06-08T01:02:03.000Z'`. - `'basic'` — используется базовый парсер. - ClickHouse может парсить только базовый формат `YYYY-MM-DD HH:MM:SS`. Например, `'2019-08-20 10:18:56'`. +ClickHouse может парсить только базовый формат `YYYY-MM-DD HH:MM:SS` или `YYYY-MM-DD`. Например, `'2019-08-20 10:18:56'` или `2019-08-20`. Значение по умолчанию: `'basic'`. @@ -691,6 +691,21 @@ ClickHouse использует этот параметр при чтении д log_queries=1 ``` +## log_queries_min_query_duration_ms {#settings-log-queries-min-query-duration-ms} + +Минимальное время выполнения запроса для логгирования в системные таблицы: + +- `system.query_log` +- `system.query_thread_log` + +В случае ненулевого порога `log_queries_min_query_duration_ms`, в лог будут записываться лишь события об окончании выполнения запроса: + +- `QUERY_FINISH` +- `EXCEPTION_WHILE_PROCESSING` + +- Тип: milliseconds +- Значение по умолчанию: 0 (логгировать все запросы) + ## log_queries_min_type {#settings-log-queries-min-type} Задаёт минимальный уровень логирования в `query_log`. @@ -2324,6 +2339,20 @@ SELECT number FROM numbers(3) FORMAT JSONEachRow; Значение по умолчанию: `0`. +## union_default_mode {#union-default-mode} + +Устанавливает режим объединения результатов `SELECT` запросов. Настройка используется только при совместном использовании с [UNION](../../sql-reference/statements/select/union.md) без явного указания `UNION ALL` или `UNION DISTINCT`. + +Возможные значения: + +- `'DISTINCT'` — ClickHouse выводит строки в результате объединения результатов запросов, удаляя повторяющиеся строки. +- `'ALL'` — ClickHouse выводит все строки в результате объединения результатов запросов, включая повторяющиеся строки. +- `''` — Clickhouse генерирует исключение при использовании с `UNION`. + +Значение по умолчанию: `''`. + +Смотрите примеры в разделе [UNION](../../sql-reference/statements/select/union.md). + ## execute_merges_on_single_replica_time_threshold {#execute-merges-on-single-replica-time-threshold} Включает особую логику выполнения слияний на репликах. diff --git a/docs/ru/operations/system-tables/errors.md b/docs/ru/operations/system-tables/errors.md new file mode 100644 index 00000000000..3a824c8c834 --- /dev/null +++ b/docs/ru/operations/system-tables/errors.md @@ -0,0 +1,23 @@ +# system.errors {#system_tables-errors} + +Содержит коды ошибок с указанием количества срабатываний. + +Столбцы: + +- `name` ([String](../../sql-reference/data-types/string.md)) — название ошибки (`errorCodeToName`). +- `code` ([Int32](../../sql-reference/data-types/int-uint.md)) — номер кода ошибки. +- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — количество ошибок. + +**Пример** + +``` sql +SELECT * +FROM system.errors +WHERE value > 0 +ORDER BY code ASC +LIMIT 1 + +┌─name─────────────┬─code─┬─value─┐ +│ CANNOT_OPEN_FILE │ 76 │ 1 │ +└──────────────────┴──────┴───────┘ +``` diff --git a/docs/ru/sql-reference/data-types/date.md b/docs/ru/sql-reference/data-types/date.md index 9bcae2c1d72..490bc5c28b4 100644 --- a/docs/ru/sql-reference/data-types/date.md +++ b/docs/ru/sql-reference/data-types/date.md @@ -9,4 +9,39 @@ toc_title: Date Дата хранится без учёта часового пояса. +## Примеры {#examples} + +**1.** Создание таблицы и добавление в неё данных: + +``` sql +CREATE TABLE dt +( + `timestamp` Date, + `event_id` UInt8 +) +ENGINE = TinyLog; +``` + +``` sql +INSERT INTO dt Values (1546300800, 1), ('2019-01-01', 2); +``` + +``` sql +SELECT * FROM dt; +``` + +``` text +┌──timestamp─┬─event_id─┐ +│ 2019-01-01 │ 1 │ +│ 2019-01-01 │ 2 │ +└────────────┴──────────┘ +``` + +## Смотрите также {#see-also} + +- [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md) +- [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime) +- [Тип данных `DateTime`](../../sql-reference/data-types/datetime.md) + + [Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/date/) diff --git a/docs/ru/sql-reference/data-types/datetime.md b/docs/ru/sql-reference/data-types/datetime.md index 87c5da68f35..9894fa2802b 100644 --- a/docs/ru/sql-reference/data-types/datetime.md +++ b/docs/ru/sql-reference/data-types/datetime.md @@ -116,12 +116,14 @@ FROM dt ## See Also {#see-also} -- [Функции преобразования типов](../../sql-reference/data-types/datetime.md) -- [Функции для работы с датой и временем](../../sql-reference/data-types/datetime.md) -- [Функции для работы с массивами](../../sql-reference/data-types/datetime.md) -- [Настройка `date_time_input_format`](../../operations/settings/settings.md#settings-date_time_input_format) -- [Конфигурационный параметр сервера `timezone`](../../sql-reference/data-types/datetime.md#server_configuration_parameters-timezone) -- [Операторы для работы с датой и временем](../../sql-reference/data-types/datetime.md#operators-datetime) +- [Функции преобразования типов](../../sql-reference/functions/type-conversion-functions.md) +- [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md) +- [Функции для работы с массивами](../../sql-reference/functions/array-functions.md) +- [Настройка `date_time_input_format`](../../operations/settings/settings/#settings-date_time_input_format) +- [Настройка `date_time_output_format`](../../operations/settings/settings/) +- [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) +- [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime) - [Тип данных `Date`](date.md) +- [Тип данных `DateTime64`](datetime64.md) [Оригинальная статья](https://clickhouse.tech/docs/ru/data_types/datetime/) diff --git a/docs/ru/sql-reference/data-types/datetime64.md b/docs/ru/sql-reference/data-types/datetime64.md index 0a602e44636..6576bf9dc0d 100644 --- a/docs/ru/sql-reference/data-types/datetime64.md +++ b/docs/ru/sql-reference/data-types/datetime64.md @@ -92,11 +92,12 @@ FROM dt ## See Also {#see-also} -- [Функции преобразования типов](../../sql-reference/data-types/datetime64.md) -- [Функции для работы с датой и временем](../../sql-reference/data-types/datetime64.md) -- [Функции для работы с массивами](../../sql-reference/data-types/datetime64.md) +- [Функции преобразования типов](../../sql-reference/functions/type-conversion-functions.md) +- [Функции для работы с датой и временем](../../sql-reference/functions/date-time-functions.md) +- [Функции для работы с массивами](../../sql-reference/functions/array-functions.md) - [Настройка `date_time_input_format`](../../operations/settings/settings.md#settings-date_time_input_format) -- [Конфигурационный параметр сервера `timezone`](../../sql-reference/data-types/datetime64.md#server_configuration_parameters-timezone) -- [Операторы для работы с датой и временем](../../sql-reference/data-types/datetime64.md#operators-datetime) +- [Настройка `date_time_output_format`](../../operations/settings/settings.md) +- [Конфигурационный параметр сервера `timezone`](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) +- [Операторы для работы с датой и временем](../../sql-reference/operators/index.md#operators-datetime) - [Тип данных `Date`](date.md) - [Тип данных `DateTime`](datetime.md) diff --git a/docs/ru/sql-reference/functions/hash-functions.md b/docs/ru/sql-reference/functions/hash-functions.md index 92fc69227f4..f7820889ea9 100644 --- a/docs/ru/sql-reference/functions/hash-functions.md +++ b/docs/ru/sql-reference/functions/hash-functions.md @@ -153,15 +153,18 @@ SELECT groupBitXor(cityHash64(*)) FROM table `URLHash(s, N)` - вычислить хэш от строки до N-го уровня в иерархии URL, без одного завершающего символа `/`, `?` или `#` на конце, если там такой есть. Уровни аналогичные URLHierarchy. Функция специфична для Яндекс.Метрики. +## farmFingerprint64 {#farmfingerprint64} + ## farmHash64 {#farmhash64} -Генерирует 64-х битное значение [FarmHash](https://github.com/google/farmhash). +Создает 64-битное значение [FarmHash](https://github.com/google/farmhash), независимое от платформы (архитектуры сервера), что важно, если значения сохраняются или используются для разбиения данных на группы. ``` sql +farmFingerprint64(par1, ...) farmHash64(par1, ...) ``` -Из всех [доступных методов](https://github.com/google/farmhash/blob/master/src/farmhash.h) функция использует `Hash64`. +Эти функции используют методы `Fingerprint64` и `Hash64` из всех [доступных методов](https://github.com/google/farmhash/blob/master/src/farmhash.h). **Параметры** diff --git a/docs/ru/sql-reference/functions/other-functions.md b/docs/ru/sql-reference/functions/other-functions.md index e2dd667fc04..68afb3e24ce 100644 --- a/docs/ru/sql-reference/functions/other-functions.md +++ b/docs/ru/sql-reference/functions/other-functions.md @@ -1686,6 +1686,26 @@ SELECT countDigits(toDecimal32(1, 9)), countDigits(toDecimal32(-1, 9)), 10 10 19 19 39 39 ``` +## errorCodeToName {#error-code-to-name} + +**Возвращаемое значение** + +- Название переменной для кода ошибки. + +Тип: [LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md). + +**Синтаксис** + +``` sql +errorCodeToName(1) +``` + +Результат: + +``` text +UNSUPPORTED_METHOD +``` + ## tcpPort {#tcpPort} Вовращает номер TCP порта, который использует сервер для [нативного протокола](../../interfaces/tcp.md). diff --git a/docs/ru/sql-reference/functions/string-search-functions.md b/docs/ru/sql-reference/functions/string-search-functions.md index e4d9316cbf3..e8cbb8deec4 100644 --- a/docs/ru/sql-reference/functions/string-search-functions.md +++ b/docs/ru/sql-reference/functions/string-search-functions.md @@ -521,6 +521,57 @@ SELECT * FROM Months WHERE ilike(name, '%j%') !!! note "Примечание" Для случая UTF-8 мы используем триграммное расстояние. Вычисление n-граммного расстояния не совсем честное. Мы используем 2-х байтные хэши для хэширования n-грамм, а затем вычисляем (не)симметрическую разность между хэш таблицами – могут возникнуть коллизии. В формате UTF-8 без учета регистра мы не используем честную функцию `tolower` – мы обнуляем 5-й бит (нумерация с нуля) каждого байта кодовой точки, а также первый бит нулевого байта, если байтов больше 1 – это работает для латиницы и почти для всех кириллических букв. + +## countMatches(haystack, pattern) {#countmatcheshaystack-pattern} + +Возвращает количество совпадений, найденных в строке `haystack`, для регулярного выражения `pattern`. + +**Синтаксис** + +``` sql +countMatches(haystack, pattern) +``` + +**Параметры** + +- `haystack` — строка, по которой выполняется поиск. [String](../../sql-reference/syntax.md#syntax-string-literal). +- `pattern` — регулярное выражение, построенное по синтаксическим правилам [re2](https://github.com/google/re2/wiki/Syntax). [String](../../sql-reference/data-types/string.md). + +**Возвращаемое значение** + +- Количество совпадений. + +Тип: [UInt64](../../sql-reference/data-types/int-uint.md). + +**Примеры** + +Запрос: + +``` sql +SELECT countMatches('foobar.com', 'o+'); +``` + +Результат: + +``` text +┌─countMatches('foobar.com', 'o+')─┐ +│ 2 │ +└──────────────────────────────────┘ +``` + +Запрос: + +``` sql +SELECT countMatches('aaaa', 'aa'); +``` + +Результат: + +``` text +┌─countMatches('aaaa', 'aa')────┐ +│ 2 │ +└───────────────────────────────┘ +``` ## countSubstrings {#countSubstrings} diff --git a/docs/ru/sql-reference/statements/select/index.md b/docs/ru/sql-reference/statements/select/index.md index c2e05f05079..bf4ae44a6f1 100644 --- a/docs/ru/sql-reference/statements/select/index.md +++ b/docs/ru/sql-reference/statements/select/index.md @@ -44,7 +44,7 @@ SELECT [DISTINCT] expr_list - [Секция SELECT](#select-clause) - [Секция DISTINCT](distinct.md) - [Секция LIMIT](limit.md) -- [Секция UNION ALL](union-all.md) +- [Секция UNION ALL](union.md) - [Секция INTO OUTFILE](into-outfile.md) - [Секция FORMAT](format.md) diff --git a/docs/ru/sql-reference/statements/select/union-all.md b/docs/ru/sql-reference/statements/select/union-all.md deleted file mode 100644 index b9d1f485a7b..00000000000 --- a/docs/ru/sql-reference/statements/select/union-all.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -toc_title: UNION ALL ---- - -# Секция UNION ALL {#union-all-clause} - -Вы можете использовать `UNION ALL` чтобы объединить любое количество `SELECT` запросы путем расширения их результатов. Пример: - -``` sql -SELECT CounterID, 1 AS table, toInt64(count()) AS c - FROM test.hits - GROUP BY CounterID - -UNION ALL - -SELECT CounterID, 2 AS table, sum(Sign) AS c - FROM test.visits - GROUP BY CounterID - HAVING c > 0 -``` - -Результирующие столбцы сопоставляются по их индексу (порядку внутри `SELECT`). Если имена столбцов не совпадают, то имена для конечного результата берутся из первого запроса. - -При объединении выполняет приведение типов. Например, если два запроса имеют одно и то же поле с не-`Nullable` и `Nullable` совместимыми типами, полученные в результате `UNION ALL` данные будут иметь `Nullable` тип. - -Запросы, которые являются частью `UNION ALL` не могут быть заключен в круглые скобки. [ORDER BY](order-by.md) и [LIMIT](limit.md) применяются к отдельным запросам, а не к конечному результату. Если вам нужно применить преобразование к конечному результату, вы можете разместить все объединенные с помощью `UNION ALL` запросы в подзапрос в секции [FROM](from.md). - -## Ограничения {#limitations} - -Поддерживается только `UNION ALL`. Обычный `UNION` (`UNION DISTINCT`) не поддерживается. Если вам это нужно `UNION DISTINCT`, вы можете написать `SELECT DISTINCT` из подзапроса, содержащего `UNION ALL`. - -## Детали реализации {#implementation-details} - -Запросы, которые являются частью `UNION ALL` выполняются параллельно, и их результаты могут быть смешаны вместе. diff --git a/docs/ru/sql-reference/statements/select/union.md b/docs/ru/sql-reference/statements/select/union.md new file mode 100644 index 00000000000..8f1dc11c802 --- /dev/null +++ b/docs/ru/sql-reference/statements/select/union.md @@ -0,0 +1,81 @@ +--- +toc_title: UNION +--- + +# Секция UNION {#union-clause} + +Вы можете использовать `UNION` в двух режимах: `UNION ALL` или `UNION DISTINCT`. + +Если `UNION` используется без указания `ALL` или `DISTINCT`, то его поведение определяется настройкой `union_default_mode`. Разница между `UNION ALL` и `UNION DISTINCT` в том, что `UNION DISTINCT` выполняет явное преобразование для результата объединения. Это равнозначно выражению `SELECT DISTINCT` из подзапроса, содержащего `UNION ALL`. + +Чтобы объединить любое количество `SELECT` запросов путем объединения их результатов, вы можете использовать `UNION`. Пример: + +``` sql +SELECT CounterID, 1 AS table, toInt64(count()) AS c + FROM test.hits + GROUP BY CounterID + +UNION ALL + +SELECT CounterID, 2 AS table, sum(Sign) AS c + FROM test.visits + GROUP BY CounterID + HAVING c > 0 +``` + +Результирующие столбцы сопоставляются по их индексу (порядку внутри `SELECT`). Если имена столбцов не совпадают, то имена для конечного результата берутся из первого запроса. + +При объединении выполняет приведение типов. Например, если два запроса имеют одно и то же поле с не-`Nullable` и `Nullable` совместимыми типами, полученные в результате `UNION` данные будут иметь `Nullable` тип. + +Запросы, которые являются частью `UNION`, могут быть заключены в круглые скобки. [ORDER BY](order-by.md) и [LIMIT](limit.md) применяются к отдельным запросам, а не к конечному результату. Если вам нужно применить преобразование к конечному результату, вы можете разместить все объединенные с помощью `UNION` запросы в подзапрос в секции [FROM](from.md). + +Если используете `UNION` без явного указания `UNION ALL` или `UNION DISTINCT`, то вы можете указать режим объединения с помощью настройки [union_default_mode](../../../operations/settings/settings.md#union-default-mode), значениями которой могут быть `ALL`, `DISTINCT` или пустая строка. Однако если вы используете `UNION` с настройкой `union_default_mode`, значением которой является пустая строка, то будет сгенерировано исключение. В следующих примерах продемонстрированы результаты запросов при разных значениях настройки. + +Запрос: + +```sql +SET union_default_mode = 'DISTINCT'; +SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 2; +``` + +Результат: + +```text +┌─1─┐ +│ 1 │ +└───┘ +┌─1─┐ +│ 2 │ +└───┘ +┌─1─┐ +│ 3 │ +└───┘ +``` + +Запрос: + +```sql +SET union_default_mode = 'ALL'; +SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 2; +``` + +Результат: + +```text +┌─1─┐ +│ 1 │ +└───┘ +┌─1─┐ +│ 2 │ +└───┘ +┌─1─┐ +│ 2 │ +└───┘ +┌─1─┐ +│ 3 │ +└───┘ +``` + +Запросы, которые являются частью `UNION/UNION ALL/UNION DISTINCT`, выполняются параллельно, и их результаты могут быть смешаны вместе. + +[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/select/union/) diff --git a/docs/tr/sql-reference/statements/select/union-all.md b/docs/tr/sql-reference/statements/select/union-all.md deleted file mode 120000 index 837caae2698..00000000000 --- a/docs/tr/sql-reference/statements/select/union-all.md +++ /dev/null @@ -1 +0,0 @@ -../../../../en/sql-reference/statements/select/union-all.md \ No newline at end of file diff --git a/docs/tr/sql-reference/statements/select/union.md b/docs/tr/sql-reference/statements/select/union.md new file mode 100644 index 00000000000..0eb8db0be7a --- /dev/null +++ b/docs/tr/sql-reference/statements/select/union.md @@ -0,0 +1 @@ +../../../../en/sql-reference/statements/select/union.md \ No newline at end of file diff --git a/docs/zh/interfaces/formats.md b/docs/zh/interfaces/formats.md index 58d06916ed8..b37ef559aa7 100644 --- a/docs/zh/interfaces/formats.md +++ b/docs/zh/interfaces/formats.md @@ -1,179 +1,237 @@ -# 输入输出格式 {#formats} +--- +toc_priority: 21 +toc_title: 输入/输出格式 +--- -ClickHouse 可以接受多种数据格式,可以在 (`INSERT`) 以及 (`SELECT`) 请求中使用。 +# 输入/输出格式 {#formats} -下列表格列出了支持的数据格式以及在 (`INSERT`) 以及 (`SELECT`) 请求中使用它们的方式。 +ClickHouse可以接受和返回各种格式的数据。输入支持的格式可以用来解析提供给`INSERT`的数据,可以从文件备份表(如File, URL或HDFS)执行`SELECT`,或者读取外部字典。输出支持的格式可用于获取`SELECT`的结果,并支持执行`INSERT`文件的表中。 -| 格式 | INSERT | SELECT | -|-----------------------------------------------------------------|--------|--------| -| [TabSeparated](#tabseparated) | ✔ | ✔ | -| [TabSeparatedRaw](#tabseparatedraw) | ✗ | ✔ | -| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ | -| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ | -| [模板](#format-template) | ✔ | ✔ | -| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ | -| [CSV](#csv) | ✔ | ✔ | -| [CSVWithNames](#csvwithnames) | ✔ | ✔ | -| [自定义分离](#format-customseparated) | ✔ | ✔ | -| [值](#data-format-values) | ✔ | ✔ | -| [垂直](#vertical) | ✗ | ✔ | -| VerticalRaw | ✗ | ✔ | -| [JSON](#json) | ✗ | ✔ | -| [JSONCompact](#jsoncompact) | ✗ | ✔ | -| [JSONEachRow](#jsoneachrow) | ✔ | ✔ | -| [TSKV](#tskv) | ✔ | ✔ | -| [漂亮](#pretty) | ✗ | ✔ | -| [PrettyCompact](#prettycompact) | ✗ | ✔ | -| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ | -| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ | -| [PrettySpace](#prettyspace) | ✗ | ✔ | -| [Protobuf](#protobuf) | ✔ | ✔ | -| [Avro](#data-format-avro) | ✔ | ✔ | -| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ | -| [镶木地板](#data-format-parquet) | ✔ | ✔ | -| [ORC](#data-format-orc) | ✔ | ✗ | -| [RowBinary](#rowbinary) | ✔ | ✔ | -| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ | -| [本地人](#native) | ✔ | ✔ | -| [Null](#null) | ✗ | ✔ | -| [XML](#xml) | ✗ | ✔ | -| [CapnProto](#capnproto) | ✔ | ✔ | +以下是支持的格式: + +| 格式 | 输入 | 输出 | +|-----------------------------------------------------------------------------------------|-------|--------| +| [TabSeparated](#tabseparated) | ✔ | ✔ | +| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ | +| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ | +| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ | +| [Template](#format-template) | ✔ | ✔ | +| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ | +| [CSV](#csv) | ✔ | ✔ | +| [CSVWithNames](#csvwithnames) | ✔ | ✔ | +| [CustomSeparated](#format-customseparated) | ✔ | ✔ | +| [Values](#data-format-values) | ✔ | ✔ | +| [Vertical](#vertical) | ✗ | ✔ | +| [VerticalRaw](#verticalraw) | ✗ | ✔ | +| [JSON](#json) | ✗ | ✔ | +| [JSONAsString](#jsonasstring) | ✔ | ✗ | +| [JSONString](#jsonstring) | ✗ | ✔ | +| [JSONCompact](#jsoncompact) | ✗ | ✔ | +| [JSONCompactString](#jsoncompactstring) | ✗ | ✔ | +| [JSONEachRow](#jsoneachrow) | ✔ | ✔ | +| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ | +| [JSONStringEachRow](#jsonstringeachrow) | ✔ | ✔ | +| [JSONStringEachRowWithProgress](#jsonstringeachrowwithprogress) | ✗ | ✔ | +| [JSONCompactEachRow](#jsoncompacteachrow) | ✔ | ✔ | +| [JSONCompactEachRowWithNamesAndTypes](#jsoncompacteachrowwithnamesandtypes) | ✔ | ✔ | +| [JSONCompactStringEachRow](#jsoncompactstringeachrow) | ✔ | ✔ | +| [JSONCompactStringEachRowWithNamesAndTypes](#jsoncompactstringeachrowwithnamesandtypes) | ✔ | ✔ | +| [TSKV](#tskv) | ✔ | ✔ | +| [Pretty](#pretty) | ✗ | ✔ | +| [PrettyCompact](#prettycompact) | ✗ | ✔ | +| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ | +| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ | +| [PrettySpace](#prettyspace) | ✗ | ✔ | +| [Protobuf](#protobuf) | ✔ | ✔ | +| [ProtobufSingle](#protobufsingle) | ✔ | ✔ | +| [Avro](#data-format-avro) | ✔ | ✔ | +| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ | +| [Parquet](#data-format-parquet) | ✔ | ✔ | +| [Arrow](#data-format-arrow) | ✔ | ✔ | +| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ | +| [ORC](#data-format-orc) | ✔ | ✗ | +| [RowBinary](#rowbinary) | ✔ | ✔ | +| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ | +| [Native](#native) | ✔ | ✔ | +| [Null](#null) | ✗ | ✔ | +| [XML](#xml) | ✗ | ✔ | +| [CapnProto](#capnproto) | ✔ | ✗ | +| [LineAsString](#lineasstring) | ✔ | ✗ | + +您可以使用ClickHouse设置控制一些格式处理参数。更多详情设置请参考[设置](../operations/settings/settings.md) ## TabSeparated {#tabseparated} -在 TabSeparated 格式中,数据按行写入。每行包含由制表符分隔的值。除了行中的最后一个值(后面紧跟换行符)之外,每个值都跟随一个制表符。 在任何地方都可以使用严格的 Unix 命令行。最后一行还必须在最后包含换行符。值以文本格式编写,不包含引号,并且要转义特殊字符。 +在TabSeparated分隔格式中,数据按行写入。每行包含由制表符分隔的值。每个值后跟一个制表符,除了行中最后一个值后跟换行。在任何地方都采用严格的Unix换行。最后一行还必须在末尾包含换行。值以文本格式编写,不包含引号,并使用转义的特殊字符。 -这种格式也可以用 `TSV` 来表示。 +这种格式也可以用`TSV`来表示。 -TabSeparated 格式非常方便用于自定义程序或脚本处理数据。HTTP 客户端接口默认会用这种格式,命令行客户端批量模式下也会用这种格式。这种格式允许在不同数据库之间传输数据。例如,从 MYSQL 中导出数据然后导入到 ClickHouse 中,反之亦然。 +`TabSeparated`格式便于使用自定义程序和脚本处理数据。默认情况下,它在HTTP接口和命令行客户端的批处理模式中使用。这种格式还允许在不同dbms之间传输数据。例如,您可以从MySQL获取转储并将其上传到ClickHouse,反之亦然。 -TabSeparated 格式支持输出数据总值(当使用 WITH TOTALS) 以及极值(当 ‘extremes’ 设置是1)。这种情况下,总值和极值输出在主数据的后面。主要的数据,总值,极值会以一个空行隔开,例如: +`TabSeparated`格式支持输出total值(与TOTALS一起使用时)和extreme值(当`extreme`被设置为1时)。在这种情况下,total值和extreme值会在主数据后输出。主要结果、总值和极值之间用空行分隔。示例: ``` sql SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT TabSeparated`` ``` - 2014-03-17 1406958 - 2014-03-18 1383658 - 2014-03-19 1405797 - 2014-03-20 1353623 - 2014-03-21 1245779 - 2014-03-22 1031592 - 2014-03-23 1046491 +``` text +2014-03-17 1406958 +2014-03-18 1383658 +2014-03-19 1405797 +2014-03-20 1353623 +2014-03-21 1245779 +2014-03-22 1031592 +2014-03-23 1046491 - 1970-01-01 8873898 +1970-01-01 8873898 - 2014-03-17 1031592 - 2014-03-23 1406958 +2014-03-17 1031592 +2014-03-23 1406958 +``` -### 数据解析方式 {#shu-ju-jie-xi-fang-shi} +### 数据格式化 {#data-formatting} -整数以十进制形式写入。数字在开头可以包含额外的 `+` 字符(解析时忽略,格式化时不记录)。非负数不能包含负号。 读取时,允许将空字符串解析为零,或者(对于带符号的类型)将仅包含负号的字符串解析为零。 不符合相应数据类型的数字可能会被解析为不同的数字,而不会显示错误消息。 +整数是用十进制形式写的。数字可以在开头包含一个额外的`+`字符(解析时忽略,格式化时不记录)。非负数不能包含负号。在读取时,允许将空字符串解析为零,或者(对于有符号类型)将仅由一个负号组成的字符串解析为零。不符合相应数据类型的数字可以被解析为不同的数字,而不会出现错误消息。 -浮点数以十进制形式写入。点号用作小数点分隔符。支持指数等符号,如’inf’,‘+ inf’,‘-inf’和’nan’。 浮点数的输入可以以小数点开始或结束。 -格式化的时候,浮点数的精确度可能会丢失。 -解析的时候,没有严格需要去读取与机器可以表示的最接近的数值。 +浮点数以十进制形式书写。`.`号用作十进制分隔符。支持指数符号,如`inf`、`+inf`、`-inf`和`nan`。浮点数的条目可以以小数点开始或结束。 +在格式化期间,浮点数可能会丢失准确性。 +在解析期间,并不严格要求读取与机器可以表示的最接近的数值。 -日期会以 YYYY-MM-DD 格式写入和解析,但会以任何字符作为分隔符。 -带时间的日期会以 YYYY-MM-DD hh:mm:ss 格式写入和解析,但会以任何字符作为分隔符。 -这一切都发生在客户端或服务器启动时的系统时区(取决于哪一种格式的数据)。对于具有时间的日期,夏时制时间未指定。 因此,如果转储在夏令时中有时间,则转储不会明确地匹配数据,解析将选择两者之一。 -在读取操作期间,不正确的日期和具有时间的日期可以使用自然溢出或空日期和时间进行分析,而不会出现错误消息。 +日期以YYYY-MM-DD格式编写,并以相同的格式解析,但使用任何字符作为分隔符。 +日期和时间以`YYYY-MM-DD hh:mm:ss`的格式书写,并以相同的格式解析,但使用任何字符作为分隔符。 +这一切都发生在客户端或服务器启动时的系统时区(取决于它们对数据的格式)。对于带有时间的日期,夏时制时间未指定。因此,如果转储在夏令时有时间,则转储不会明确地与数据匹配,解析将选择这两次中的一次。 +在读取操作期间,不正确的日期和具有时间的日期可以使用自然溢出或null日期和时间进行分析,而不会出现错误消息。 -有个例外情况,Unix 时间戳格式(10个十进制数字)也支持使用时间解析日期。结果不是时区相关的。格式 YYYY-MM-DD hh:mm:ss和 NNNNNNNNNN 会自动区分。 +有个例外情况,Unix时间戳格式也支持用时间解析日期(如果它恰好由10个十进制数字组成)。其结果与时间区域无关。格式`YYYY-MM-DD hh:mm:ss`和`NNNNNNNNNN`是自动区分的。 -字符串以反斜线转义的特殊字符输出。 以下转义序列用于输出:`\b`,`\f`,`\r`,`\n`,`\t`,`\0`,`\'`,`\\`。 解析还支持`\a`,`\v`和`\xHH`(十六进制转义字符)和任何`\c`字符,其中`c`是任何字符(这些序列被转换为`c`)。 因此,读取数据支持可以将换行符写为`\n`或`\`的格式,或者换行。例如,字符串 `Hello world` 在单词之间换行而不是空格可以解析为以下任何形式: +字符串以反斜杠转义的特殊字符输出。下面的转义序列用于输出:`\b`, `\f`, `\r`, `\n`, `\t`, `\0`, `\'`, `\\`。解析还支持`\a`、`\v`和`\xHH`(十六进制转义字符)和任何`\c`字符,其中`c`是任何字符(这些序列被转换为`c`)。因此,读取数据支持这样一种格式,即可以将换行符写成`\n`或`\`,或者写成换行符。例如,字符串`Hello world`在单词之间有换行符,而不是空格,可以用以下语法进行解析: - Hello\nworld +``` text +Hello\nworld - Hello\ - world +Hello\ +world +``` -第二种形式是支持的,因为 MySQL 读取 tab-separated 格式数据集的时候也会使用它。 +第二种形式是支持的,因为MySQL读取tab-separated格式数据集的时候也会使用它。 -在 TabSeparated 格式中传递数据时需要转义的最小字符集为:Tab,换行符(LF)和反斜杠。 +在TabSeparated分隔格式传递数据时需要转义的最小字符集:`Tab`、换行符(LF)和反斜杠。 -只有一小组符号会被转义。你可以轻易地找到一个字符串值,但这不会正常在你的终端显示。 +只有一小部分符号被转义。您可以很容易地找到一个字符串值,而您的终端将在输出中不显示它。 -数组写在方括号内的逗号分隔值列表中。 通常情况下,数组中的数字项目会被拼凑,但日期,带时间的日期以及字符串将使用与上面相同的转义规则用单引号引起来。 +数组写在方括号内的逗号分隔值列表中。数组中的数字项按正常格式进行格式化。`Date`和`DateTime`类型用单引号表示。字符串使用与上面相同的转义规则在单引号中编写。 -[NULL](../sql-reference/syntax.md) 将输出为 `\N`。 +[NULL](../sql-reference/syntax.md)将输出为`\N`。 + +[Nested](../sql-reference/data-types/nested-data-structures/nested.md)结构的每个元素都表示为数组。 + +示例: + +``` sql +CREATE TABLE nestedt +( + `id` UInt8, + `aux` Nested( + a UInt8, + b String + ) +) +ENGINE = TinyLog +``` + +``` sql +INSERT INTO nestedt Values ( 1, [1], ['a']) +``` + +``` sql +SELECT * FROM nestedt FORMAT TSV +``` + +``` text +1 [1] ['a'] +``` ## TabSeparatedRaw {#tabseparatedraw} -与 `TabSeparated` 格式不一样的是,行数据是不会被转义的。 -该格式仅适用于输出查询结果,但不适用于解析输入(将数据插入到表中)。 +与`TabSeparated`格式的不同之处在于,写入的行没有转义。 +使用这种格式解析时,每个字段中不允许使用制表符或换行符。 -这种格式也可以使用名称 `TSVRaw` 来表示。 +这种格式也可以使用名称`TSVRaw`来表示。 ## TabSeparatedWithNames {#tabseparatedwithnames} -与 `TabSeparated` 格式不一样的是,第一行会显示列的名称。 -在解析过程中,第一行完全被忽略。您不能使用列名来确定其位置或检查其正确性。 -(未来可能会加入解析头行的功能) +与`TabSeparated`格式不同的是列名写在第一行。 +在解析过程中,第一行被完全忽略。不能使用列名来确定它们的位置或检查它们的正确性。 +(将来可能会添加对头行解析的支持。) -这种格式也可以使用名称 `TSVWithNames` 来表示。 +这种格式也可以使用名称`TSVWithNames`来表示。 ## TabSeparatedWithNamesAndTypes {#tabseparatedwithnamesandtypes} -与 `TabSeparated` 格式不一样的是,第一行会显示列的名称,第二行会显示列的类型。 -在解析过程中,第一行和第二行完全被忽略。 +与`TabSeparated`格式不同的是列名写在第一行,而列类型写在第二行。 +在解析过程中,将完全忽略第一行和第二行。 -这种格式也可以使用名称 `TSVWithNamesAndTypes` 来表示。 +这种格式也可以使用名称`TSVWithNamesAndTypes`来表示。 -## 模板 {#format-template} +## Template {#format-template} -此格式允许为具有指定转义规则的值指定带有占位符的自定义格式字符串。 +此格式允许指定带有占位符的自定义格式字符串,这些占位符用于指定转义规则。 -它使用设置 `format_schema`, `format_schema_rows`, `format_schema_rows_between_delimiter` and some settings of other formats (e.g. `output_format_json_quote_64bit_integers` 使用时 `JSON` 逃跑,进一步查看) +它使用设置`format_schema`, `format_schema_rows`, `format_schema_rows_between_delimiter`以及其他格式的一些设置(例如转义`JSON`时使用`output_format_json_quote_64bit_integers`) -格式字符串 `format_schema_rows` 使用以下语法指定行格式: +设置`format_template_row`指定文件的路径,该文件包含以下语法的行格式字符串: `delimiter_1${column_1:serializeAs_1}delimiter_2${column_2:serializeAs_2} ... delimiter_N`, - where `delimiter_i` is a delimiter between values (`$` symbol can be escaped as `$$`), - `column_i` is a name of a column whose values are to be selected or inserted (if empty, then column will be skipped), - `serializeAs_i` is an escaping rule for the column values. The following escaping rules are supported: +其中,`delimiter_i`是值之间的分隔符(`$`符号可以转义为`$$`), +`column_i`是要选择或插入其值的列的名称或索引(如果为空,则跳过该列), +`serializeAs_i`是列值的转义规则。支持以下转义规则: - - `CSV`, `JSON`, `XML` (similarly to the formats of the same names) - - `Escaped` (similarly to `TSV`) - - `Quoted` (similarly to `Values`) - - `Raw` (without escaping, similarly to `TSVRaw`) - - `None` (no escaping rule, see further) +- `CSV`, `JSON`, `XML` (类似于相同名称的格式) +- `Escaped` (类似于`TSV`) +- `Quoted` (类似于`Values`) +- `Raw` (类似于`TSVRaw`) +- `None` - If escaping rule is omitted, then`None` will be used. `XML` and `Raw` are suitable only for output. +如果省略了转义规则,那么将使用`None`。`XML`和`Raw`只适用于输出。 - So, for the following format string: +对于下面的格式字符串: `Search phrase: ${SearchPhrase:Quoted}, count: ${c:Escaped}, ad price: $$${price:JSON};` - the values of `SearchPhrase`, `c` and `price` columns, which are escaped as `Quoted`, `Escaped` and `JSON` will be printed (for select) or will be expected (for insert) between `Search phrase: `, `, count: `, `, ad price: $` and `;` delimiters respectively. For example: +`SearchPhrase`、`c`和`price`列的值被转义为`quotation`、`Escaped`和`JSON`将分别在`Search phrase:`, `, count: `, `, ad price: $`和`;`分隔符之间打印(用于选择)或expected(用于插入)。例如: - `Search phrase: 'bathroom interior design', count: 2166, ad price: $3;` +`Search phrase: 'bathroom interior design', count: 2166, ad price: $3;` -该 `format_schema_rows_between_delimiter` setting指定行之间的分隔符,该分隔符在除最后一行之外的每一行之后打印(或预期) (`\n` 默认情况下) +`format_template_rows_between_delimiter`设置指定行之间的分隔符,它将打印(或expected)在每一行之后,最后一行除外(默认为`\n`)。 -格式字符串 `format_schema` 具有相同的语法 `format_schema_rows` 并允许指定前缀,后缀和打印一些附加信息的方式。 它包含以下占位符而不是列名: +设置`format_template_resultset`指定文件路径,该文件包含resultset的格式字符串。resultset的格式字符串与row的格式字符串具有相同的语法,允许指定前缀、后缀和打印一些附加信息的方法。它包含以下占位符而不是列名: -- `data` 包含数据的行 `format_schema_rows` 格式,由分隔 `format_schema_rows_between_delimiter`. 此占位符必须是格式字符串中的第一个占位符。 -- `totals` 是包含总值的行 `format_schema_rows` 格式(与总计一起使用时) -- `min` 是具有最小值的行 `format_schema_rows` 格式(当极值设置为1时) -- `max` 是具有最大值的行 `format_schema_rows` 格式(当极值设置为1时) -- `rows` 输出行总数 -- `rows_before_limit` 是没有限制的最小行数。 仅当查询包含LIMIT时输出。 如果查询包含GROUP BY,则rows_before_limit_at_least是没有限制的确切行数。 -- `time` 请求执行时间以秒为单位 +- `data` `format_template_row`格式的数据行,由`format_template_rows_between_delimiter`分隔。此占位符必须是格式字符串中的第一个占位符。 +- `totals` `format_template_row`格式的总值(和WITH TOTALS一起使用) +- `min` `format_template_row`格式的最小值(当极值设置为1时) +- `max` `format_template_row`格式的最大值(当极值设置为1时) +- `rows` 输出行的总数 +- `rows_before_limit` 没有LIMIT的最小行数。仅当查询包含LIMIT时输出。如果查询包含GROUP BY,那么rows_before_limit_at_least就是没有LIMIT的确切行数。 +- `time` 请求执行时间(秒) - `rows_read` 已读取的行数 -- `bytes_read` 被读取的字节数(未压缩) +- `bytes_read` 已读取(未压缩)的字节数 -占位符 `data`, `totals`, `min` 和 `max` 必须没有指定转义规则(或 `None` 必须明确指定)。 其余的占位符可能具有指定的任何转义规则。 -如果 `format_schema` 设置为空字符串, `${data}` 用作默认值。 -对于插入查询格式允许跳过一些列或一些字段,如果前缀或后缀(见示例)。 +占位符`data`、`totals`、`min`和`max`必须没有指定转义规则(或者必须显式指定`None`)。其余占位符可以指定任何转义规则。 +如果`format_template_resultset`设置为空字符串,则使用`${data}`作为默认值。 +对于insert查询,格式允许跳过某些列或某些字段的前缀或后缀(参见示例)。 -`Select` 示例: +Select示例: ``` sql -SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase ORDER BY c DESC LIMIT 5 -FORMAT Template -SETTINGS format_schema = ' +SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase ORDER BY c DESC LIMIT 5 FORMAT Template SETTINGS +format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format', format_template_rows_between_delimiter = '\n ' +``` + +`/some/path/resultset.format`: + +``` text + Search phrases @@ -185,11 +243,17 @@ SETTINGS format_schema = '
Search phrases
Processed ${rows_read:XML} rows in ${time:XML} sec -', -format_schema_rows = ' ${SearchPhrase:XML} ${с:XML} ', -format_schema_rows_between_delimiter = '\n ' + ``` +`/some/path/row.format`: + +``` text + ${0:XML} ${1:XML} +``` + +结果: + ``` html Search phrases @@ -210,92 +274,122 @@ format_schema_rows_between_delimiter = '\n ' ``` -`Insert` 示例: +Insert示例: - Some header - Page views: 5, User id: 4324182021466249494, Useless field: hello, Duration: 146, Sign: -1 - Page views: 6, User id: 4324182021466249494, Useless field: world, Duration: 185, Sign: 1 - Total rows: 2 +``` text +Some header +Page views: 5, User id: 4324182021466249494, Useless field: hello, Duration: 146, Sign: -1 +Page views: 6, User id: 4324182021466249494, Useless field: world, Duration: 185, Sign: 1 +Total rows: 2 +``` ``` sql INSERT INTO UserActivity FORMAT Template SETTINGS -format_schema = 'Some header\n${data}\nTotal rows: ${:CSV}\n', -format_schema_rows = 'Page views: ${PageViews:CSV}, User id: ${UserID:CSV}, Useless field: ${:CSV}, Duration: ${Duration:CSV}, Sign: ${Sign:CSV}' +format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format' ``` -`PageViews`, `UserID`, `Duration` 和 `Sign` 占位符内部是表中列的名称。 值后 `Useless field` 在行和之后 `\nTotal rows:` in后缀将被忽略。 +`/some/path/resultset.format`: + +``` text +Some header\n${data}\nTotal rows: ${:CSV}\n +``` + +`/some/path/row.format`: + +``` text +Page views: ${PageViews:CSV}, User id: ${UserID:CSV}, Useless field: ${:CSV}, Duration: ${Duration:CSV}, Sign: ${Sign:CSV} +``` + +`PageViews`, `UserID`, `Duration`和`Sign` 内部占位符是表中列的名称。将忽略行中`Useless field`后面和后缀中`\nTotal rows:`之后的值。 输入数据中的所有分隔符必须严格等于指定格式字符串中的分隔符。 ## TemplateIgnoreSpaces {#templateignorespaces} -此格式仅适用于输入。 -类似于 `Template`,但跳过输入流中的分隔符和值之间的空格字符。 但是,如果格式字符串包含空格字符,则在输入流中将需要这些字符。 还允许指定空占位符 (`${}` 或 `${:None}`)将一些分隔符分成单独的部分,以忽略它们之间的空格。 此类占位符仅用于跳过空格字符。 -可以阅读 `JSON` 如果列的值在所有行中具有相同的顺序,则使用此格式。 例如,以下请求可用于从格式的输出示例中插入数据 [JSON](#json): +这种格式只适用于输入。 +类似于`Template`,但跳过输入流中分隔符和值之间的空白字符。但是,如果格式字符串包含空格字符,这些字符将会出现在输入流中。还允许指定空占位符(`${}`或`${:None}`)来将一些分隔符分割为单独的部分,以忽略它们之间的空格。这种占位符仅用于跳过空白字符。 +如果列的值在所有行的顺序相同,那么可以使用这种格式读取`JSON`。可以使用以下请求从格式为[JSON](#json)的输出示例中插入数据: ``` sql INSERT INTO table_name FORMAT TemplateIgnoreSpaces SETTINGS -format_schema = '{${}"meta"${}:${:JSON},${}"data"${}:${}[${data}]${},${}"totals"${}:${:JSON},${}"extremes"${}:${:JSON},${}"rows"${}:${:JSON},${}"rows_before_limit_at_least"${}:${:JSON}${}}', -format_schema_rows = '{${}"SearchPhrase"${}:${}${phrase:JSON}${},${}"c"${}:${}${cnt:JSON}${}}', -format_schema_rows_between_delimiter = ',' +format_template_resultset = '/some/path/resultset.format', format_template_row = '/some/path/row.format', format_template_rows_between_delimiter = ',' +``` + +`/some/path/resultset.format`: + +``` text +{${}"meta"${}:${:JSON},${}"data"${}:${}[${data}]${},${}"totals"${}:${:JSON},${}"extremes"${}:${:JSON},${}"rows"${}:${:JSON},${}"rows_before_limit_at_least"${}:${:JSON}${}} +``` + +`/some/path/row.format`: + +``` text +{${}"SearchPhrase"${}:${}${phrase:JSON}${},${}"c"${}:${}${cnt:JSON}${}} ``` ## TSKV {#tskv} -与 `TabSeparated` 格式类似,但它输出的是 `name=value` 的格式。名称会和 `TabSeparated` 格式一样被转义,`=` 字符也会被转义。 +类似于TabSeparated,但是输出的值是name=value格式。名称的转义方式与TabSeparated格式相同,=符号也是转义的。 - SearchPhrase= count()=8267016 - SearchPhrase=bathroom interior design count()=2166 - SearchPhrase=yandex count()=1655 - SearchPhrase=2014 spring fashion count()=1549 - SearchPhrase=freeform photos count()=1480 - SearchPhrase=angelina jolie count()=1245 - SearchPhrase=omsk count()=1112 - SearchPhrase=photos of dog breeds count()=1091 - SearchPhrase=curtain designs count()=1064 - SearchPhrase=baku count()=1000 +``` text +SearchPhrase= count()=8267016 +SearchPhrase=bathroom interior design count()=2166 +SearchPhrase=yandex count()=1655 +SearchPhrase=2014 spring fashion count()=1549 +SearchPhrase=freeform photos count()=1480 +SearchPhrase=angelina jolie count()=1245 +SearchPhrase=omsk count()=1112 +SearchPhrase=photos of dog breeds count()=1091 +SearchPhrase=curtain designs count()=1064 +SearchPhrase=baku count()=1000 +``` -[NULL](../sql-reference/syntax.md) 输出为 `\N`。 +[NULL](../sql-reference/syntax.md)格式为`\N`。 ``` sql SELECT * FROM t_null FORMAT TSKV ``` - x=1 y=\N +``` text +x=1 y=\N +``` -当有大量的小列时,这种格式是低效的,通常没有理由使用它。它被用于 Yandex 公司的一些部门。 +当有大量的小列时,这种格式是无效的,并且通常没有理由使用它。不过,就效率而言,它并不比JSONEachRow差。 +这种格式支持数据输出和解析。对于解析,不同列的值支持任何顺序。省略某些值是可以接受的——它们被视为与其默认值相等。在这种情况下,0和空白行被用作默认值。不支持在表中指定的复杂值作为缺省值。 -数据的输出和解析都支持这种格式。对于解析,任何顺序都支持不同列的值。可以省略某些值,用 `-` 表示, 它们被视为等于它们的默认值。在这种情况下,零和空行被用作默认值。作为默认值,不支持表中指定的复杂值。 - -对于不带等号或值,可以用附加字段 `tskv` 来表示,这种在解析上是被允许的。这样的话该字段被忽略。 +解析允许存在不带等号或值的附加字段`tskv`。此字段被忽略。 ## CSV {#csv} -按逗号分隔的数据格式([RFC](https://tools.ietf.org/html/rfc4180))。 +按`,`分隔的数据格式([RFC](https://tools.ietf.org/html/rfc4180))。 -格式化的时候,行是用双引号括起来的。字符串中的双引号会以两个双引号输出,除此之外没有其他规则来做字符转义了。日期和时间也会以双引号包括。数字的输出不带引号。值由一个单独的字符隔开,这个字符默认是 `,`。行使用 Unix 换行符(LF)分隔。 数组序列化成 CSV 规则如下:首先将数组序列化为 TabSeparated 格式的字符串,然后将结果字符串用双引号包括输出到 CSV。CSV 格式的元组被序列化为单独的列(即它们在元组中的嵌套关系会丢失)。 +格式化时,行是用双引号括起来的。字符串中的双引号会以两个双引号输出,除此之外没有其他规则来做字符转义了。日期和时间也会以双引号包括。数字的输出不带引号。值由一个单独的字符隔开,这个字符默认是`,`。行使用Unix换行符(LF)分隔。数组序列化成CSV规则如下:首先将数组序列化为`TabSeparated`格式的字符串,然后将结果字符串用双引号包括输出到`CSV`。`CSV`格式的元组被序列化为单独的列(即它们在元组中的嵌套关系会丢失)。 - clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FORMAT CSV" < data.csv +``` bash +$ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FORMAT CSV" < data.csv +``` -\*默认情况下间隔符是 `,` ,在 [format_csv_delimiter](../operations/settings/settings.md#settings-format_csv_delimiter) 中可以了解更多间隔符配置。 +\* 默认情况下间隔符是`,` ,在[format_csv_delimiter](../operations/settings/settings.md#settings-format_csv_delimiter)中可以了解更多分隔符配置。 -解析的时候,可以使用或不使用引号来解析所有值。支持双引号和单引号。行也可以不用引号排列。 在这种情况下,它们被解析为逗号或换行符(CR 或 LF)。在解析不带引号的行时,若违反 RFC 规则,会忽略前导和尾随的空格和制表符。 对于换行,全部支持 Unix(LF),Windows(CR LF)和 Mac OS Classic(CR LF)。 +解析的时候,可以使用或不使用引号来解析所有值。支持双引号和单引号。行也可以不用引号排列。在这种情况下,它们被解析为逗号或换行符(`CR或`LF`)。在解析不带引号的行时,若违反`RFC`规则,会忽略前缀和结尾的空格和制表符。对于换行,全部支持Unix(LF),Windows(CR LF)和Mac OS Classic(CR LF)。 -`NULL` 将输出为 `\N`。 +如果启用[input_format_defaults_for_omitted_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields),空的末尾加引号的输入值将替换为相应列的默认值。 -CSV 格式是和 TabSeparated 一样的方式输出总数和极值。 +`NULL`被格式化为`\N`或`NULL`或一个空的非引号字符串(详见配置[input_format_csv_unquoted_null_literal_as_null](../operations/settings/settings.md#settings-input_format_csv_unquoted_null_literal_as_null)或[input_format_defaults_for_omitted_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields))。 + +`CSV`格式支持输出总数和极值的方式与`TabSeparated`相同。 ## CSVWithNames {#csvwithnames} -会输出带头部行,和 `TabSeparatedWithNames` 一样。 +会输出带头部的信息(字段列表),和`TabSeparatedWithNames`一样。 -## 自定义分离 {#format-customseparated} +## CustomSeparated {#format-customseparated} -类似于 [模板](#format-template),但它打印或读取所有列,并使用从设置转义规则 `format_custom_escaping_rule` 从设置和分隔符 `format_custom_field_delimiter`, `format_custom_row_before_delimiter`, `format_custom_row_after_delimiter`, `format_custom_row_between_delimiter`, `format_custom_result_before_delimiter` 和 `format_custom_result_after_delimiter`,而不是从格式字符串。 -也有 `CustomSeparatedIgnoreSpaces` 格式,这是类似于 `TemplateIgnoreSpaces`. +类似于[Template](#format-template), 但它打印或读取所有列和使用转义规则在设置`format_custom_escaping_rule`和分隔符设置`format_custom_field_delimiter`,`format_custom_row_before_delimiter`,`format_custom_row_after_delimiter`,`format_custom_row_between_delimiter`,`format_custom_result_before_delimiter`,`format_custom_result_after_delimiter`中,而不是从格式字符串。 +也有`CustomSeparatedIgnoreSpaces`格式,这是类似于`TemplateIgnoreSpaces`。 ## JSON {#json} -以 JSON 格式输出数据。除了数据表之外,它还输出列名称和类型以及一些附加信息:输出行的总数以及在没有 LIMIT 时可以输出的行数。 例: +以JSON格式输出数据。除了数据表之外,它还输出列名和类型,以及一些附加信息: 输出行的总数,以及如果没有LIMIT的话可输出的行数。示例: ``` sql SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTALS ORDER BY c DESC LIMIT 5 FORMAT JSON @@ -306,166 +400,326 @@ SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTA "meta": [ { - "name": "SearchPhrase", + "name": "'hello'", "type": "String" }, { - "name": "c", + "name": "multiply(42, number)", "type": "UInt64" + }, + { + "name": "range(5)", + "type": "Array(UInt8)" } ], "data": [ { - "SearchPhrase": "", - "c": "8267016" + "'hello'": "hello", + "multiply(42, number)": "0", + "range(5)": [0,1,2,3,4] }, { - "SearchPhrase": "bathroom interior design", - "c": "2166" + "'hello'": "hello", + "multiply(42, number)": "42", + "range(5)": [0,1,2,3,4] }, { - "SearchPhrase": "yandex", - "c": "1655" - }, - { - "SearchPhrase": "spring 2014 fashion", - "c": "1549" - }, - { - "SearchPhrase": "freeform photos", - "c": "1480" + "'hello'": "hello", + "multiply(42, number)": "84", + "range(5)": [0,1,2,3,4] } ], - "totals": - { - "SearchPhrase": "", - "c": "8873898" - }, + "rows": 3, - "extremes": - { - "min": - { - "SearchPhrase": "", - "c": "1480" - }, - "max": - { - "SearchPhrase": "", - "c": "8267016" - } - }, - - "rows": 5, - - "rows_before_limit_at_least": 141137 + "rows_before_limit_at_least": 3 } ``` -JSON 与 JavaScript 兼容。为了确保这一点,一些字符被另外转义:斜线`/`被转义为`\/`; 替代的换行符 `U+2028` 和 `U+2029` 会打断一些浏览器解析,它们会被转义为 `\uXXXX`。 ASCII 控制字符被转义:退格,换页,换行,回车和水平制表符被替换为`\b`,`\f`,`\n`,`\r`,`\t` 作为使用`\uXXXX`序列的00-1F范围内的剩余字节。 无效的 UTF-8 序列更改为替换字符 ,因此输出文本将包含有效的 UTF-8 序列。 为了与 JavaScript 兼容,默认情况下,Int64 和 UInt64 整数用双引号引起来。要除去引号,可以将配置参数 output_format_json_quote_64bit_integers 设置为0。 +JSON与JavaScript兼容。为了确保这一点,一些字符被另外转义:斜线`/`被转义为`\/`; 替代的换行符`U+2028`和`U+2029`会打断一些浏览器解析,它们会被转义为`\uXXXX`。 ASCII控制字符被转义:退格,换页,换行,回车和水平制表符被替换为`\b`,`\f`,`\n`,`\r`,`\t` 作为使用`\uXXXX`序列的00-1F范围内的剩余字节。 无效的UTF-8序列更改为替换字符,因此输出文本将包含有效的UTF-8序列。 为了与JavaScript兼容,默认情况下,Int64和UInt64整数用双引号引起来。要除去引号,可以将配置参数`output_format_json_quote_64bit_integers`设置为0。 `rows` – 结果输出的行数。 -`rows_before_limit_at_least` 去掉 LIMIT 过滤后的最小行总数。 只会在查询包含 LIMIT 条件时输出。 -若查询包含 GROUP BY,rows_before_limit_at_least 就是去掉 LIMIT 后过滤后的准确行数。 +`rows_before_limit_at_least`去掉 LIMIT过滤后的最小行总数。 只会在查询包含LIMIT条件时输出。 +若查询包含 GROUP BY,`rows_before_limit_at_least`就是去掉LIMIT后过滤后的准确行数。 -`totals` – 总值 (当使用 TOTALS 条件时)。 +`totals` – 总值 (当使用TOTALS条件时)。 -`extremes` – 极值 (当 extremes 设置为 1时)。 +`extremes` – 极值(当extremes设置为1时)。 该格式仅适用于输出查询结果,但不适用于解析输入(将数据插入到表中)。 -ClickHouse 支持 [NULL](../sql-reference/syntax.md), 在 JSON 格式中以 `null` 输出来表示. +ClickHouse支持[NULL](../sql-reference/syntax.md), 在JSON输出中显示为`null`。若要在输出中启用`+nan`、`-nan`、`+inf`、`-inf`值,请设置[output_format_json_quote_denormals](../operations/settings/settings.md#settings-output_format_json_quote_denormals)为1。 -参考 JSONEachRow 格式。 +**参考** -## JSONCompact {#jsoncompact} +- [JSONEachRow](#jsoneachrow)格式 +- [output_format_json_array_of_rows](../operations/settings/settings.md#output-format-json-array-of-rows)设置 -与 JSON 格式不同的是它以数组的方式输出结果,而不是以结构体。 +## JSONString {#jsonstring} + +与JSON的不同之处在于数据字段以字符串输出,而不是以类型化JSON值输出。 示例: -``` json +```json { "meta": [ { - "name": "SearchPhrase", + "name": "'hello'", "type": "String" }, { - "name": "c", + "name": "multiply(42, number)", "type": "UInt64" + }, + { + "name": "range(5)", + "type": "Array(UInt8)" } ], "data": [ - ["", "8267016"], - ["bathroom interior design", "2166"], - ["yandex", "1655"], - ["fashion trends spring 2014", "1549"], - ["freeform photo", "1480"] + { + "'hello'": "hello", + "multiply(42, number)": "0", + "range(5)": "[0,1,2,3,4]" + }, + { + "'hello'": "hello", + "multiply(42, number)": "42", + "range(5)": "[0,1,2,3,4]" + }, + { + "'hello'": "hello", + "multiply(42, number)": "84", + "range(5)": "[0,1,2,3,4]" + } ], - "totals": ["","8873898"], + "rows": 3, - "extremes": - { - "min": ["","1480"], - "max": ["","8267016"] - }, - - "rows": 5, - - "rows_before_limit_at_least": 141137 + "rows_before_limit_at_least": 3 } ``` -这种格式仅仅适用于输出结果集,而不适用于解析(将数据插入到表中)。 -参考 `JSONEachRow` 格式。 +## JSONAsString {#jsonasstring} -## JSONEachRow {#jsoneachrow} +在这种格式中,一个JSON对象被解释为一个值。如果输入有几个JSON对象(逗号分隔),它们将被解释为独立的行。 -将数据结果每一行以 JSON 结构体输出(换行分割 JSON 结构体)。 +这种格式只能对具有单个字段类型的表进行解析[String](../sql-reference/data-types/string.md)。其余的列必须设置为[DEFAULT](../sql-reference/statements/create.md)或[MATERIALIZED](../sql-reference/statements/create.md),或者忽略。一旦将整个JSON对象收集为字符串,就可以使用[JSON函数](../sql-reference/functions/json-functions.md)运行它。 -``` json -{"SearchPhrase":"","count()":"8267016"} -{"SearchPhrase": "bathroom interior design","count()": "2166"} -{"SearchPhrase":"yandex","count()":"1655"} -{"SearchPhrase":"2014 spring fashion","count()":"1549"} -{"SearchPhrase":"freeform photo","count()":"1480"} -{"SearchPhrase":"angelina jolie","count()":"1245"} -{"SearchPhrase":"omsk","count()":"1112"} -{"SearchPhrase":"photos of dog breeds","count()":"1091"} -{"SearchPhrase":"curtain designs","count()":"1064"} -{"SearchPhrase":"baku","count()":"1000"} +**示例** + +查询: + +``` sql +DROP TABLE IF EXISTS json_as_string; +CREATE TABLE json_as_string (json String) ENGINE = Memory; +INSERT INTO json_as_string FORMAT JSONAsString {"foo":{"bar":{"x":"y"},"baz":1}},{},{"any json stucture":1} +SELECT * FROM json_as_string; ``` -与 JSON 格式不同的是,没有替换无效的UTF-8序列。任何一组字节都可以在行中输出。这是必要的,因为这样数据可以被格式化而不会丢失任何信息。值的转义方式与JSON相同。 +结果: -对于解析,任何顺序都支持不同列的值。可以省略某些值 - 它们被视为等于它们的默认值。在这种情况下,零和空行被用作默认值。 作为默认值,不支持表中指定的复杂值。元素之间的空白字符被忽略。如果在对象之后放置逗号,它将被忽略。对象不一定必须用新行分隔。 +``` text +┌─json──────────────────────────────┐ +│ {"foo":{"bar":{"x":"y"},"baz":1}} │ +│ {} │ +│ {"any json stucture":1} │ +└───────────────────────────────────┘ +``` -### 嵌套结构的使用 {#jsoneachrow-nested} +## JSONCompact {#jsoncompact} +## JSONCompactString {#jsoncompactstring} -如果你有一张桌子 [嵌套式](../sql-reference/data-types/nested-data-structures/nested.md) 数据类型列,可以插入具有相同结构的JSON数据。 启用此功能与 [input_format_import_nested_json](../operations/settings/settings.md#settings-input_format_import_nested_json) 设置。 +与JSON格式不同的是它以数组的方式输出结果,而不是以结构体。 -例如,请考虑下表: +示例: + +``` json +// JSONCompact +{ + "meta": + [ + { + "name": "'hello'", + "type": "String" + }, + { + "name": "multiply(42, number)", + "type": "UInt64" + }, + { + "name": "range(5)", + "type": "Array(UInt8)" + } + ], + + "data": + [ + ["hello", "0", [0,1,2,3,4]], + ["hello", "42", [0,1,2,3,4]], + ["hello", "84", [0,1,2,3,4]] + ], + + "rows": 3, + + "rows_before_limit_at_least": 3 +} +``` + +```json +// JSONCompactString +{ + "meta": + [ + { + "name": "'hello'", + "type": "String" + }, + { + "name": "multiply(42, number)", + "type": "UInt64" + }, + { + "name": "range(5)", + "type": "Array(UInt8)" + } + ], + + "data": + [ + ["hello", "0", "[0,1,2,3,4]"], + ["hello", "42", "[0,1,2,3,4]"], + ["hello", "84", "[0,1,2,3,4]"] + ], + + "rows": 3, + + "rows_before_limit_at_least": 3 +} +``` + +## JSONEachRow {#jsoneachrow} +## JSONStringEachRow {#jsonstringeachrow} +## JSONCompactEachRow {#jsoncompacteachrow} +## JSONCompactStringEachRow {#jsoncompactstringeachrow} + +使用这些格式时,ClickHouse会将行输出为分隔的、换行分隔的JSON值,但数据作为一个整体不是有效的JSON。 + +``` json +{"some_int":42,"some_str":"hello","some_tuple":[1,"a"]} // JSONEachRow +[42,"hello",[1,"a"]] // JSONCompactEachRow +["42","hello","(2,'a')"] // JSONCompactStringsEachRow +``` + +在插入数据时,应该为每一行提供一个单独的JSON值。 + +## JSONEachRowWithProgress {#jsoneachrowwithprogress} +## JSONStringEachRowWithProgress {#jsonstringeachrowwithprogress} + +与`JSONEachRow`/`JSONStringEachRow`不同的是,ClickHouse还将生成作为JSON值的进度信息。 + +```json +{"row":{"'hello'":"hello","multiply(42, number)":"0","range(5)":[0,1,2,3,4]}} +{"row":{"'hello'":"hello","multiply(42, number)":"42","range(5)":[0,1,2,3,4]}} +{"row":{"'hello'":"hello","multiply(42, number)":"84","range(5)":[0,1,2,3,4]}} +{"progress":{"read_rows":"3","read_bytes":"24","written_rows":"0","written_bytes":"0","total_rows_to_read":"3"}} +``` + +## JSONCompactEachRowWithNamesAndTypes {#jsoncompacteachrowwithnamesandtypes} +## JSONCompactStringEachRowWithNamesAndTypes {#jsoncompactstringeachrowwithnamesandtypes} + +与`JSONCompactEachRow`/`JSONCompactStringEachRow`不同的是,其中列名和类型被写入前两行。 + +```json +["'hello'", "multiply(42, number)", "range(5)"] +["String", "UInt64", "Array(UInt8)"] +["hello", "0", [0,1,2,3,4]] +["hello", "42", [0,1,2,3,4]] +["hello", "84", [0,1,2,3,4]] +``` + +### Inserting Data {#inserting-data} + +``` sql +INSERT INTO UserActivity FORMAT JSONEachRow {"PageViews":5, "UserID":"4324182021466249494", "Duration":146,"Sign":-1} {"UserID":"4324182021466249494","PageViews":6,"Duration":185,"Sign":1} +``` + +ClickHouse允许: + +- 对象中key-value的任何顺序。 +- 省略一些值。 + +ClickHouse忽略元素之间的空格和对象后面的逗号。您可以在一行中传递所有对象。你不需要用换行符把它们分开。 + +**省略值处理** + +ClickHouse将省略的值替换为对应的[data types](../sql-reference/data-types/index.md)默认值。 + +如果指定了`DEFAULT expr`,则ClickHouse根据属性使用不同的替换规则,详看[input_format_defaults_for_omitted_fields](../operations/settings/settings.md#session_settings-input_format_defaults_for_omitted_fields)设置。 + +参考下表: + +``` sql +CREATE TABLE IF NOT EXISTS example_table +( + x UInt32, + a DEFAULT x * 2 +) ENGINE = Memory; +``` + +- 如果`input_format_defaults_for_omitted_fields = 0`, 那么`x`和`a`的默认值等于`0`(作为`UInt32`数据类型的默认值)。 +- 如果`input_format_defaults_for_omitted_fields = 1`, 那么`x`的默认值为`0`,但`a`的默认值为`x * 2`。 + +!!! note "注意" +当使用`insert_sample_with_metadata = 1`插入数据时,与使用`insert_sample_with_metadata = 0`插入数据相比,ClickHouse消耗更多的计算资源。 + +### Selecting Data {#selecting-data} + +以`UserActivity`表为例: + +``` text +┌──────────────UserID─┬─PageViews─┬─Duration─┬─Sign─┐ +│ 4324182021466249494 │ 5 │ 146 │ -1 │ +│ 4324182021466249494 │ 6 │ 185 │ 1 │ +└─────────────────────┴───────────┴──────────┴──────┘ +``` + +当查询`SELECT * FROM UserActivity FORMAT JSONEachRow`返回: + +``` text +{"UserID":"4324182021466249494","PageViews":5,"Duration":146,"Sign":-1} +{"UserID":"4324182021466249494","PageViews":6,"Duration":185,"Sign":1} +``` + +与[JSON](#json)格式不同,没有替换无效的UTF-8序列。值以与`JSON`相同的方式转义。 + +!!! note "提示" +字符串中可以输出任意一组字节。如果您确信表中的数据可以被格式化为JSON而不会丢失任何信息,那么就使用`JSONEachRow`格式。 + +### Nested Structures {#jsoneachrow-nested} + +如果您有一个包含[Nested](../sql-reference/data-types/nested-data-structures/nested.md)数据类型列的表,您可以插入具有相同结构的JSON数据。使用[input_format_import_nested_json](../operations/settings/settings.md#settings-input_format_import_nested_json)设置启用该特性。 + +例如,请参考下表: ``` sql CREATE TABLE json_each_row_nested (n Nested (s String, i Int32) ) ENGINE = Memory ``` -正如你可以在找到 `Nested` 数据类型说明,ClickHouse将嵌套结构的每个组件视为单独的列, `n.s` 和 `n.i` 为了我们的桌子 所以你可以通过以下方式插入数据: +正如您在`Nested`数据类型描述中看到的,ClickHouse将嵌套结构的每个组件作为一个单独的列(`n.s`和`n.i`是我们的表)。您可以通过以下方式插入数据: ``` sql INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n.s": ["abc", "def"], "n.i": [1, 23]} ``` -将数据作为分层JSON对象集插入 [input_format_import_nested_json=1](../operations/settings/settings.md#settings-input_format_import_nested_json). +将数据作为分层JSON对象集插入[input_format_import_nested_json=1](../operations/settings/settings.md#settings-input_format_import_nested_json)。 ``` json { @@ -508,74 +762,93 @@ SELECT * FROM json_each_row_nested └───────────────┴────────┘ ``` -## 本地人 {#native} +## Native {#native} -最高性能的格式。 据通过二进制格式的块进行写入和读取。对于每个块,该块中的行数,列数,列名称和类型以及列的部分将被相继记录。 换句话说,这种格式是 «列式»的 - 它不会将列转换为行。 这是用于在服务器之间进行交互的本地界面中使用的格式,用于使用命令行客户端和 C++ 客户端。 +最高性能的格式。通过二进制格式的块进行写入和读取。对于每个块,该中的行数,列数,列名称和类型以及列的部分将被相继记录。 换句话说,这种格式是`columnar`的 - 它不会将列转换为行。这是用于在服务器之间进行交互的本地界面中使用的格式,用于使用命令行客户端和C++客户端。 -您可以使用此格式快速生成只能由 ClickHouse DBMS 读取的格式。但自己处理这种格式是没有意义的。 +您可以使用此格式快速生成只能由ClickHouse DBMS读取的格式。但自己处理这种格式是没有意义的。 ## Null {#null} -没有输出。但是,查询已处理完毕,并且在使用命令行客户端时,数据将传输到客户端。这仅用于测试,包括生产力测试。 +没有输出。但是,查询已处理完毕,并且在使用命令行客户端时,数据将传输到客户端。这仅用于测试,包括性能测试。 显然,这种格式只适用于输出,不适用于解析。 -## 漂亮 {#pretty} +## Pretty {#pretty} -将数据以表格形式输出,也可以使用 ANSI 转义字符在终端中设置颜色。 +将数据以表格形式输出,也可以使用ANSI转义字符在终端中设置颜色。 它会绘制一个完整的表格,每行数据在终端中占用两行。 -每一个结果块都会以单独的表格输出。这是很有必要的,以便结果块不用缓冲结果输出(缓冲在可以预见结果集宽度的时候是很有必要的)。 +每个结果块作为一个单独的表输出。这是必要的,以便在输出块时不需要缓冲结果(为了预先计算所有值的可见宽度,缓冲是必要的)。 -[NULL](../sql-reference/syntax.md) 输出为 `ᴺᵁᴸᴸ`。 +[NULL](../sql-reference/syntax.md)输出为`ᴺᵁᴸᴸ`。 + +示例(显示[PrettyCompact](#prettycompact)格式) ``` sql SELECT * FROM t_null ``` - ┌─x─┬────y─┐ - │ 1 │ ᴺᵁᴸᴸ │ - └───┴──────┘ +``` text +┌─x─┬────y─┐ +│ 1 │ ᴺᵁᴸᴸ │ +└───┴──────┘ +``` -为避免将太多数据传输到终端,只打印前10,000行。 如果行数大于或等于10,000,则会显示消息«Showed first 10 000»。 +行没有转义为Pretty\* 格式。示例显示了[PrettyCompact](#prettycompact)格式: + +``` sql +SELECT 'String with \'quotes\' and \t character' AS Escaping_test +``` + +``` text +┌─Escaping_test────────────────────────┐ +│ String with 'quotes' and character │ +└──────────────────────────────────────┘ +``` + +为避免将太多数据传输到终端,只打印前10,000行。 如果行数大于或等于10,000,则会显示消息`Showed first 10 000`。 该格式仅适用于输出查询结果,但不适用于解析输入(将数据插入到表中)。 -Pretty格式支持输出总值(当使用 WITH TOTALS 时)和极值(当 `extremes` 设置为1时)。 在这些情况下,总数值和极值在主数据之后以单独的表格形式输出。 示例(以 PrettyCompact 格式显示): +Pretty格式支持输出合计值(当使用WITH TOTALS时)和极值(当`extremes`设置为1时)。在这些情况下,合计值和极值将输出在主要数据之后,在单独的表中。示例(显示为[PrettyCompact](#prettycompact)格式): ``` sql SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORDER BY EventDate FORMAT PrettyCompact ``` - ┌──EventDate─┬───────c─┐ - │ 2014-03-17 │ 1406958 │ - │ 2014-03-18 │ 1383658 │ - │ 2014-03-19 │ 1405797 │ - │ 2014-03-20 │ 1353623 │ - │ 2014-03-21 │ 1245779 │ - │ 2014-03-22 │ 1031592 │ - │ 2014-03-23 │ 1046491 │ - └────────────┴─────────┘ +``` text +┌──EventDate─┬───────c─┐ +│ 2014-03-17 │ 1406958 │ +│ 2014-03-18 │ 1383658 │ +│ 2014-03-19 │ 1405797 │ +│ 2014-03-20 │ 1353623 │ +│ 2014-03-21 │ 1245779 │ +│ 2014-03-22 │ 1031592 │ +│ 2014-03-23 │ 1046491 │ +└────────────┴─────────┘ - Totals: - ┌──EventDate─┬───────c─┐ - │ 1970-01-01 │ 8873898 │ - └────────────┴─────────┘ +Totals: +┌──EventDate─┬───────c─┐ +│ 1970-01-01 │ 8873898 │ +└────────────┴─────────┘ - Extremes: - ┌──EventDate─┬───────c─┐ - │ 2014-03-17 │ 1031592 │ - │ 2014-03-23 │ 1406958 │ - └────────────┴─────────┘ +Extremes: +┌──EventDate─┬───────c─┐ +│ 2014-03-17 │ 1031592 │ +│ 2014-03-23 │ 1406958 │ +└────────────┴─────────┘ +``` ## PrettyCompact {#prettycompact} -与 `Pretty` 格式不一样的是,`PrettyCompact` 去掉了行之间的表格分割线,这样使得结果更加紧凑。这种格式会在交互命令行客户端下默认使用。 +与[Pretty](#pretty)格式不一样的是`PrettyCompact`去掉了行之间的表格分割线,这样使得结果更加紧凑。 +这种格式会在交互命令行客户端下默认使用。 ## PrettyCompactMonoBlock {#prettycompactmonoblock} -与 `PrettyCompact` 格式不一样的是,它支持 10,000 行数据缓冲,然后输出在一个表格中,不会按照块来区分 +与[PrettyCompact](#prettycompact)格式不一样的是,它支持10,000行数据缓冲,然后输出在一个表格中,不会按照块来区分。 ## PrettyNoEscapes {#prettynoescapes} -与 `Pretty` 格式不一样的是,它不使用 ANSI 字符转义, 这在浏览器显示数据以及在使用 `watch` 命令行工具是有必要的。 +与`Pretty`格式不一样的是,它不使用ANSI字符转义,这在浏览器显示数据以及在使用`watch`命令行工具是有必要的。 示例: @@ -583,7 +856,7 @@ SELECT EventDate, count() AS c FROM test.hits GROUP BY EventDate WITH TOTALS ORD watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events FORMAT PrettyCompactNoEscapes'" ``` -您可以使用 HTTP 接口来获取数据,显示在浏览器中。 +您可以使用HTTP接口来获取数据,显示在浏览器中。 ### PrettyCompactNoEscapes {#prettycompactnoescapes} @@ -593,9 +866,17 @@ watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events FOR 用法类似上述。 +### PrettyCompactNoEscapes {#prettycompactnoescapes} + +与前面的设置相同。 + +### PrettySpaceNoEscapes {#prettyspacenoescapes} + +与前面的设置相同。 + ## PrettySpace {#prettyspace} -与 `PrettyCompact`(#prettycompact) 格式不一样的是,它使用空格来代替网格来显示数据。 +与[PrettyCompact](#prettycompact)格式不一样的是,它使用空格来代替网格来显示数据。 ## RowBinary {#rowbinary} diff --git a/docs/zh/interfaces/third-party/client-libraries.md b/docs/zh/interfaces/third-party/client-libraries.md index 77c929b9730..e94eb8bcfc0 100644 --- a/docs/zh/interfaces/third-party/client-libraries.md +++ b/docs/zh/interfaces/third-party/client-libraries.md @@ -1,51 +1,66 @@ -# 第三方开发的库 {#di-san-fang-kai-fa-de-ku} +--- +toc_priority: 26 +toc_title: 客户端开发库 +--- -!!! warning "放弃" - Yandex不维护下面列出的库,也没有进行任何广泛的测试以确保其质量。 +# 第三方开发库 {#client-libraries-from-third-party-developers} + +!!! warning "声明" +Yandex**没有**维护下面列出的库,也没有做过任何广泛的测试来确保它们的质量。 - Python - [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm) - - [ツ环板driverョツ嘉ッツ偲](https://github.com/mymarilyn/clickhouse-driver) - - [ツ环板clientョツ嘉ッツ偲](https://github.com/yurial/clickhouse-client) + - [clickhouse-driver](https://github.com/mymarilyn/clickhouse-driver) + - [clickhouse-client](https://github.com/yurial/clickhouse-client) + - [aiochclient](https://github.com/maximdanilchenko/aiochclient) - PHP - [smi2/phpclickhouse](https://packagist.org/packages/smi2/phpClickHouse) - - [8bitov/clickhouse-php客户端](https://packagist.org/packages/8bitov/clickhouse-php-client) - - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://packagist.org/packages/bozerkins/clickhouse-client) - - [ツ环板clientョツ嘉ッツ偲](https://packagist.org/packages/simpod/clickhouse-client) + - [8bitov/clickhouse-php-client](https://packagist.org/packages/8bitov/clickhouse-php-client) + - [bozerkins/clickhouse-client](https://packagist.org/packages/bozerkins/clickhouse-client) + - [simpod/clickhouse-client](https://packagist.org/packages/simpod/clickhouse-client) - [seva-code/php-click-house-client](https://packagist.org/packages/seva-code/php-click-house-client) - - [ツ环板clientョツ嘉ッツ偲](https://github.com/SeasX/SeasClick) -- 走吧 + - [SeasClick C++ client](https://github.com/SeasX/SeasClick) + - [one-ck](https://github.com/lizhichao/one-ck) + - [glushkovds/phpclickhouse-laravel](https://packagist.org/packages/glushkovds/phpclickhouse-laravel) +- Go - [clickhouse](https://github.com/kshvakov/clickhouse/) - - [ツ环板-ョツ嘉ッツ偲](https://github.com/roistat/go-clickhouse) - - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/mailru/go-clickhouse) + - [go-clickhouse](https://github.com/roistat/go-clickhouse) + - [mailrugo-clickhouse](https://github.com/mailru/go-clickhouse) - [golang-clickhouse](https://github.com/leprosus/golang-clickhouse) +- Swift + - [ClickHouseNIO](https://github.com/patrick-zippenfenig/ClickHouseNIO) + - [ClickHouseVapor ORM](https://github.com/patrick-zippenfenig/ClickHouseVapor) - NodeJs - - [ツ暗ェツ氾环催ツ団ツ法ツ人)](https://github.com/TimonKK/clickhouse) - - [ツ环板-ョツ嘉ッツ偲](https://github.com/apla/node-clickhouse) + - [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse) + - [node-clickhouse](https://github.com/apla/node-clickhouse) - Perl - [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse) - [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse) - - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://metacpan.org/release/AnyEvent-ClickHouse) + - [AnyEvent-ClickHouse](https://metacpan.org/release/AnyEvent-ClickHouse) - Ruby - - [ツ暗ェツ氾环催ツ団)](https://github.com/shlima/click_house) - - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/PNixx/clickhouse-activerecord) + - [ClickHouse (Ruby)](https://github.com/shlima/click_house) + - [clickhouse-activerecord](https://github.com/PNixx/clickhouse-activerecord) - R - [clickhouse-r](https://github.com/hannesmuehleisen/clickhouse-r) - - [RClickhouse](https://github.com/IMSMWU/RClickhouse) + - [RClickHouse](https://github.com/IMSMWU/RClickHouse) - Java - [clickhouse-client-java](https://github.com/VirtusAI/clickhouse-client-java) -- 斯卡拉 - - [掳胫client-禄脢鹿脷露胫鲁隆鹿-client酶](https://github.com/crobox/clickhouse-scala-client) + - [clickhouse-client](https://github.com/Ecwid/clickhouse-client) +- Scala + - [clickhouse-scala-client](https://github.com/crobox/clickhouse-scala-client) - Kotlin - [AORM](https://github.com/TanVD/AORM) - C# - [Octonica.ClickHouseClient](https://github.com/Octonica/ClickHouseClient) - - [克莱克豪斯Ado](https://github.com/killwort/ClickHouse-Net) + - [ClickHouse.Ado](https://github.com/killwort/ClickHouse-Net) + - [ClickHouse.Client](https://github.com/DarkWanderer/ClickHouse.Client) - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) - - [克莱克豪斯客户](https://github.com/DarkWanderer/ClickHouse.Client) -- 仙丹 +- Elixir - [clickhousex](https://github.com/appodeal/clickhousex/) -- 尼姆 + - [pillar](https://github.com/sofakingworld/pillar) +- Nim - [nim-clickhouse](https://github.com/leonardoce/nim-clickhouse) +- Haskell + - [hdbc-clickhouse](https://github.com/zaneli/hdbc-clickhouse) -[来源文章](https://clickhouse.tech/docs/zh/interfaces/third-party/client_libraries/) +[来源文章](https://clickhouse.tech/docs/en/interfaces/third-party/client_libraries/) diff --git a/docs/zh/interfaces/third-party/index.md b/docs/zh/interfaces/third-party/index.md index b55897073da..40206f6b57b 100644 --- a/docs/zh/interfaces/third-party/index.md +++ b/docs/zh/interfaces/third-party/index.md @@ -1,8 +1,16 @@ --- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "\u7B2C\u4E09\u65B9" +toc_folder_title: 第三方工具 toc_priority: 24 --- +# 第三方工具 {#third-party-interfaces} +这是第三方工具的链接集合,它们提供了一些ClickHouse的接口。它可以是可视化界面、命令行界面或API: + +- [Client libraries](../../interfaces/third-party/client-libraries.md) +- [Integrations](../../interfaces/third-party/integrations.md) +- [GUI](../../interfaces/third-party/gui.md) +- [Proxies](../../interfaces/third-party/proxy.md) + +!!! note "注意" +支持通用API的通用工具[ODBC](../../interfaces/odbc.md)或[JDBC](../../interfaces/jdbc.md),通常也适用于ClickHouse,但这里没有列出,因为它们实在太多了。 diff --git a/docs/zh/interfaces/third-party/integrations.md b/docs/zh/interfaces/third-party/integrations.md index 2c57e23b724..403ef994bb9 100644 --- a/docs/zh/interfaces/third-party/integrations.md +++ b/docs/zh/interfaces/third-party/integrations.md @@ -1,100 +1,108 @@ -# 第三方集成库 {#di-san-fang-ji-cheng-ku} +--- +toc_priority: 27 +toc_title: 第三方集成库 +--- + +# 第三方集成库 {#integration-libraries-from-third-party-developers} !!! warning "声明" - Yandex不维护下面列出的库,也没有进行任何广泛的测试以确保其质量。 +Yandex**没有**维护下面列出的库,也没有做过任何广泛的测试来确保它们的质量。 -## 基建产品 {#ji-jian-chan-pin} +## 基础设施 {#infrastructure-products} -- 关系数据库管理系统 +- 关系数据库 - [MySQL](https://www.mysql.com) - [mysql2ch](https://github.com/long2ice/mysql2ch) - [ProxySQL](https://github.com/sysown/proxysql/wiki/ClickHouse-Support) - [clickhouse-mysql-data-reader](https://github.com/Altinity/clickhouse-mysql-data-reader) - - [horgh-复制器](https://github.com/larsnovikov/horgh-replicator) + - [horgh-replicator](https://github.com/larsnovikov/horgh-replicator) - [PostgreSQL](https://www.postgresql.org) - [clickhousedb_fdw](https://github.com/Percona-Lab/clickhousedb_fdw) - - [infi.clickhouse_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (使用 [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm)) + - [infi.clickhouse_fdw](https://github.com/Infinidat/infi.clickhouse_fdw) (uses [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm)) - [pg2ch](https://github.com/mkabilov/pg2ch) + - [clickhouse_fdw](https://github.com/adjust/clickhouse_fdw) - [MSSQL](https://en.wikipedia.org/wiki/Microsoft_SQL_Server) - - [ClickHouseMightrator](https://github.com/zlzforever/ClickHouseMigrator) + - [ClickHouseMigrator](https://github.com/zlzforever/ClickHouseMigrator) - 消息队列 - - [卡夫卡](https://kafka.apache.org) - - [clickhouse_sinker](https://github.com/housepower/clickhouse_sinker) (使用 [去客户](https://github.com/ClickHouse/clickhouse-go/)) + - [Kafka](https://kafka.apache.org) + - [clickhouse_sinker](https://github.com/housepower/clickhouse_sinker) (uses [Go client](https://github.com/ClickHouse/clickhouse-go/)) - [stream-loader-clickhouse](https://github.com/adform/stream-loader) - 流处理 - [Flink](https://flink.apache.org) - [flink-clickhouse-sink](https://github.com/ivi-ru/flink-clickhouse-sink) - 对象存储 - [S3](https://en.wikipedia.org/wiki/Amazon_S3) - - [ツ环板backupョツ嘉ッツ偲](https://github.com/AlexAkulov/clickhouse-backup) + - [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup) - 容器编排 - [Kubernetes](https://kubernetes.io) - - [clickhouse-操](https://github.com/Altinity/clickhouse-operator) + - [clickhouse-operator](https://github.com/Altinity/clickhouse-operator) - 配置管理 - - [木偶](https://puppet.com) - - [ツ环板/ョツ嘉ッツ偲](https://forge.puppet.com/innogames/clickhouse) - - [mfedotov/clickhouse](https://forge.puppet.com/mfedotov/clickhouse) -- 监控 - - [石墨](https://graphiteapp.org) + - [puppet](https://puppet.com) + - [innogames/clickhouse](https://forge.puppet.com/innogames/clickhouse) + - [mfedotov/clickhouse](https://forge.puppet.com/mfedotov/clickhouse) +- Monitoring + - [Graphite](https://graphiteapp.org) - [graphouse](https://github.com/yandex/graphouse) - - [ツ暗ェツ氾环催ツ団](https://github.com/lomik/carbon-clickhouse) + - - [ツ环板-ョツ嘉ッツ偲](https://github.com/lomik/graphite-clickhouse) - - [石墨-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) -优化静态分区 [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) 如果从规则 [汇总配置](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration) 可以应用 + - [carbon-clickhouse](https://github.com/lomik/carbon-clickhouse) + + - [graphite-clickhouse](https://github.com/lomik/graphite-clickhouse) + - [graphite-ch-optimizer](https://github.com/innogames/graphite-ch-optimizer) - optimizes staled partitions in [\*GraphiteMergeTree](../../engines/table-engines/mergetree-family/graphitemergetree.md#graphitemergetree) if rules from [rollup configuration](../../engines/table-engines/mergetree-family/graphitemergetree.md#rollup-configuration) could be applied - [Grafana](https://grafana.com/) - [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana) - - [普罗米修斯号](https://prometheus.io/) + - [Prometheus](https://prometheus.io/) - [clickhouse_exporter](https://github.com/f1yegor/clickhouse_exporter) - [PromHouse](https://github.com/Percona-Lab/PromHouse) - - [clickhouse_exporter](https://github.com/hot-wifi/clickhouse_exporter) (用途 [去客户](https://github.com/kshvakov/clickhouse/)) + - [clickhouse_exporter](https://github.com/hot-wifi/clickhouse_exporter) (uses [Go client](https://github.com/kshvakov/clickhouse/)) - [Nagios](https://www.nagios.org/) - [check_clickhouse](https://github.com/exogroup/check_clickhouse/) - [check_clickhouse.py](https://github.com/innogames/igmonplugins/blob/master/src/check_clickhouse.py) - [Zabbix](https://www.zabbix.com) - - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/Altinity/clickhouse-zabbix-template) + - [clickhouse-zabbix-template](https://github.com/Altinity/clickhouse-zabbix-template) - [Sematext](https://sematext.com/) - - [clickhouse积分](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse) -- 记录 + - [clickhouse integration](https://github.com/sematext/sematext-agent-integrations/tree/master/clickhouse) +- Logging - [rsyslog](https://www.rsyslog.com/) - - [鹿茫house omhousee酶](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html) + - [omclickhouse](https://www.rsyslog.com/doc/master/configuration/modules/omclickhouse.html) - [fluentd](https://www.fluentd.org) - - [loghouse](https://github.com/flant/loghouse) (对于 [Kubernetes](https://kubernetes.io)) - - [Sematext](https://www.sematext.com/logagent) - - [logagent输出-插件-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/) -- 地理 + - [loghouse](https://github.com/flant/loghouse) (for [Kubernetes](https://kubernetes.io)) + - [logagent](https://www.sematext.com/logagent) + - [logagent output-plugin-clickhouse](https://sematext.com/docs/logagent/output-plugin-clickhouse/) +- Geo - [MaxMind](https://dev.maxmind.com/geoip/) - - [ツ环板-ョツ嘉ッツ偲青clickシツ氾カツ鉄ツ工ツ渉](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip) + - [clickhouse-maxmind-geoip](https://github.com/AlexeyKupershtokh/clickhouse-maxmind-geoip) -## 编程语言生态系统 {#bian-cheng-yu-yan-sheng-tai-xi-tong} +## 编程语言 {#programming-language-ecosystems} - Python - [SQLAlchemy](https://www.sqlalchemy.org) - - [ツ暗ェツ氾环催ツ団ツ法ツ人](https://github.com/cloudflare/sqlalchemy-clickhouse) (使用 [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm)) - - [熊猫](https://pandas.pydata.org) + - [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (uses [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm)) + - [pandas](https://pandas.pydata.org) - [pandahouse](https://github.com/kszucs/pandahouse) - PHP - [Doctrine](https://www.doctrine-project.org/) - [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse) - R - [dplyr](https://db.rstudio.com/dplyr/) - - [RClickhouse](https://github.com/IMSMWU/RClickhouse) (使用 [ツ暗ェツ氾环催ツ団](https://github.com/artpaul/clickhouse-cpp)) + - [RClickHouse](https://github.com/IMSMWU/RClickHouse) (uses [clickhouse-cpp](https://github.com/artpaul/clickhouse-cpp)) - Java - [Hadoop](http://hadoop.apache.org) - - [clickhouse-hdfs-装载机](https://github.com/jaykelin/clickhouse-hdfs-loader) (使用 [JDBC](../../sql-reference/table-functions/jdbc.md)) -- 斯卡拉 + - [clickhouse-hdfs-loader](https://github.com/jaykelin/clickhouse-hdfs-loader) (uses [JDBC](../../sql-reference/table-functions/jdbc.md)) +- Scala - [Akka](https://akka.io) - - [掳胫client-禄脢鹿脷露胫鲁隆鹿-client酶](https://github.com/crobox/clickhouse-scala-client) + - [clickhouse-scala-client](https://github.com/crobox/clickhouse-scala-client) - C# - [ADO.NET](https://docs.microsoft.com/en-us/dotnet/framework/data/adonet/ado-net-overview) - - [克莱克豪斯Ado](https://github.com/killwort/ClickHouse-Net) - - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) - - [ClickHouse.Net.Migrations](https://github.com/ilyabreev/ClickHouse.Net.Migrations) -- 仙丹 + - [ClickHouse.Ado](https://github.com/killwort/ClickHouse-Net) + - [ClickHouse.Client](https://github.com/DarkWanderer/ClickHouse.Client) + - [ClickHouse.Net](https://github.com/ilyabreev/ClickHouse.Net) + - [ClickHouse.Net.Migrations](https://github.com/ilyabreev/ClickHouse.Net.Migrations) +- Elixir - [Ecto](https://github.com/elixir-ecto/ecto) - - [clickhouse_ecto](https://github.com/appodeal/clickhouse_ecto) + - [clickhouse_ecto](https://github.com/appodeal/clickhouse_ecto) - Ruby - [Ruby on Rails](https://rubyonrails.org/) - [activecube](https://github.com/bitquery/activecube) + - [ActiveRecord](https://github.com/PNixx/clickhouse-activerecord) - [GraphQL](https://github.com/graphql) - [activecube-graphql](https://github.com/bitquery/activecube-graphql) -[来源文章](https://clickhouse.tech/docs/zh/interfaces/third-party/integrations/) +[源文章](https://clickhouse.tech/docs/en/interfaces/third-party/integrations/) diff --git a/docs/zh/interfaces/third-party/proxy.md b/docs/zh/interfaces/third-party/proxy.md index 798717c0602..a2050863f30 100644 --- a/docs/zh/interfaces/third-party/proxy.md +++ b/docs/zh/interfaces/third-party/proxy.md @@ -1,37 +1,44 @@ -# 来自第三方开发人员的代理服务器 {#lai-zi-di-san-fang-kai-fa-ren-yuan-de-dai-li-fu-wu-qi} +--- +toc_priority: 29 +toc_title: 第三方代理 +--- -[chproxy](https://github.com/Vertamedia/chproxy) 是ClickHouse数据库的http代理和负载均衡器。 +# 第三方代理 {#proxy-servers-from-third-party-developers} -特征 +## chproxy {#chproxy} -*每用户路由和响应缓存。 -*灵活的限制。 -\*自动SSL证书续订。 +[chproxy](https://github.com/Vertamedia/chproxy), 是一个用于ClickHouse数据库的HTTP代理和负载均衡器。 -在Go中实现。 +特性: + +- 用户路由和响应缓存。 +- 灵活的限制。 +- 自动SSL证书续订。 + +使用go语言实现。 ## KittenHouse {#kittenhouse} -[KittenHouse](https://github.com/VKCOM/kittenhouse) 设计为ClickHouse和应用程序服务器之间的本地代理,以防在应用程序端缓冲INSERT数据是不可能或不方便的。 +[KittenHouse](https://github.com/VKCOM/kittenhouse)被设计为ClickHouse和应用服务器之间的本地代理,以防不可能或不方便在应用程序端缓冲插入数据。 -特征: +特性: -*内存和磁盘数据缓冲。 -*每表路由。 -\*负载平衡和健康检查。 +- 内存和磁盘上的数据缓冲。 +- 表路由。 +- 负载平衡和运行状况检查。 -在Go中实现。 +使用go语言实现。 -## ツ环板-ョツ嘉ッツ偲 {#clickhouse-bulk} +## ClickHouse-Bulk {#clickhouse-bulk} -[ツ环板-ョツ嘉ッツ偲](https://github.com/nikepan/clickhouse-bulk) 是一个简单的ClickHouse插入收集器。 +[ClickHouse-Bulk](https://github.com/nikepan/clickhouse-bulk)是一个简单的ClickHouse收集器。 -特征: +特性: -*分组请求并按阈值或间隔发送。 -*多个远程服务器。 -\*基本身份验证。 +- 按阈值或间隔对请求进行分组并发送。 +- 多个远程服务器。 +- 基本身份验证。 -在Go中实现。 +使用go语言实现。 -[来源文章](https://clickhouse.tech/docs/zh/interfaces/third-party/proxy/) +[Original article](https://clickhouse.tech/docs/en/interfaces/third-party/proxy/) diff --git a/docs/zh/sql-reference/statements/select/index.md b/docs/zh/sql-reference/statements/select/index.md index cdfd64ff190..689a4f91a0c 100644 --- a/docs/zh/sql-reference/statements/select/index.md +++ b/docs/zh/sql-reference/statements/select/index.md @@ -46,7 +46,7 @@ SELECT [DISTINCT] expr_list - [SELECT 子句](#select-clause) - [DISTINCT 子句](../../../sql-reference/statements/select/distinct.md) - [LIMIT 子句](../../../sql-reference/statements/select/limit.md) -- [UNION ALL 子句](../../../sql-reference/statements/select/union-all.md) +- [UNION ALL 子句](../../../sql-reference/statements/select/union.md) - [INTO OUTFILE 子句](../../../sql-reference/statements/select/into-outfile.md) - [FORMAT 子句](../../../sql-reference/statements/select/format.md) diff --git a/docs/zh/sql-reference/statements/select/union-all.md b/docs/zh/sql-reference/statements/select/union.md similarity index 97% rename from docs/zh/sql-reference/statements/select/union-all.md rename to docs/zh/sql-reference/statements/select/union.md index d32ae896f55..1d88f9674c8 100644 --- a/docs/zh/sql-reference/statements/select/union-all.md +++ b/docs/zh/sql-reference/statements/select/union.md @@ -2,7 +2,7 @@ toc_title: UNION ALL --- -# UNION ALL子句 {#union-all-clause} +# UNION ALL子句 {#union-clause} 你可以使用 `UNION ALL` 结合任意数量的 `SELECT` 来扩展其结果。 示例: diff --git a/docs/zh/whats-new/index.md b/docs/zh/whats-new/index.md index 0f248773402..1c28a538eed 100644 --- a/docs/zh/whats-new/index.md +++ b/docs/zh/whats-new/index.md @@ -1,8 +1,8 @@ --- -machine_translated: true -machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd -toc_folder_title: "\u65B0\u589E\u5185\u5BB9" -toc_priority: 72 +toc_folder_title: ClickHouse事迹 +toc_priority: 82 --- +# ClickHouse变更及改动? {#whats-new-in-clickhouse} +对于已经发布的版本,有一个[roadmap](../whats-new/roadmap.md)和一个详细的[changelog](../whats-new/changelog/index.md)。 diff --git a/docs/zh/whats-new/roadmap.md b/docs/zh/whats-new/roadmap.md index 377746efcb7..df4b027f7da 100644 --- a/docs/zh/whats-new/roadmap.md +++ b/docs/zh/whats-new/roadmap.md @@ -1,17 +1,10 @@ --- toc_priority: 74 -toc_title: 路线图 +toc_title: Roadmap --- -# 路线图 {#roadmap} +# Roadmap {#roadmap} -## Q2 2020 {#q2-2020} - -- 和外部认证服务集成 - -## Q3 2020 {#q3-2020} - -- 资源池,为用户提供更精准的集群资源分配 - -{## [原始文档](https://clickhouse.tech/docs/en/roadmap/) ##} +`2021年Roadmap`已公布供公开讨论查看[这里](https://github.com/ClickHouse/ClickHouse/issues/17623). +{## [源文章](https://clickhouse.tech/docs/en/roadmap/) ##} diff --git a/docs/zh/whats-new/security-changelog.md b/docs/zh/whats-new/security-changelog.md index f21158c7aed..9cf0ca0b08c 100644 --- a/docs/zh/whats-new/security-changelog.md +++ b/docs/zh/whats-new/security-changelog.md @@ -1,41 +1,74 @@ -## 修复于 ClickHouse Release 18.12.13, 2018-09-10 {#xiu-fu-yu-clickhouse-release-18-12-13-2018-09-10} +--- +toc_priority: 76 +toc_title: 安全更新日志 +--- + +## 修复于ClickHouse Release 19.14.3.3, 2019-09-10 {#fixed-in-clickhouse-release-19-14-3-3-2019-09-10} + +### CVE-2019-15024 {#cve-2019-15024} + +对ZooKeeper具有写访问权限并且可以运行ClickHouse所在网络上可用的自定义服务器的攻击者可以创建一个自定义的恶意服务器,该服务器将充当ClickHouse副本并在ZooKeeper中注册。当另一个副本将从恶意副本获取数据部分时,它可以强制clickhouse服务器写入文件系统上的任意路径。 + +作者:Yandex信息安全团队Eldar Zaitov + +### CVE-2019-16535 {#cve-2019-16535} + +解压算法中的OOB-read、OOB-write和整数下溢可以通过本机协议实现RCE或DoS。 + +作者: Yandex信息安全团队Eldar Zaitov + +### CVE-2019-16536 {#cve-2019-16536} + +恶意的经过身份验证的客户端可能会触发导致DoS的堆栈溢出。 + +作者: Yandex信息安全团队Eldar Zaitov + +## 修复于ClickHouse Release 19.13.6.1, 2019-09-20 {#fixed-in-clickhouse-release-19-13-6-1-2019-09-20} + +### CVE-2019-18657 {#cve-2019-18657} + +表函数`url`存在允许攻击者在请求中插入任意HTTP标头的漏洞。 + +作者: [Nikita Tikhomirov](https://github.com/NSTikhomirov) + +## 修复于ClickHouse Release 18.12.13, 2018-09-10 {#fixed-in-clickhouse-release-18-12-13-2018-09-10} ### CVE-2018-14672 {#cve-2018-14672} -加载CatBoost模型的功能,允许遍历路径并通过错误消息读取任意文件。 +加载CatBoost模型的函数允许路径遍历和通过错误消息读取任意文件。 -来源: Yandex信息安全团队的Andrey Krasichkov +作者:Yandex信息安全团队Andrey Krasichkov -## 修复于 ClickHouse Release 18.10.3, 2018-08-13 {#xiu-fu-yu-clickhouse-release-18-10-3-2018-08-13} +## 修复于Release 18.10.3, 2018-08-13 {#fixed-in-clickhouse-release-18-10-3-2018-08-13} ### CVE-2018-14671 {#cve-2018-14671} -unixODBC允许从文件系统加载任意共享对象,从而导致«远程执行代码»漏洞。 +unixODBC允许从文件系统加载任意共享对象,从而导致远程代码执行漏洞。 -来源:Yandex信息安全团队的Andrey Krasichkov和Evgeny Sidorov +作者:Yandex信息安全团队Andrey Krasichkov和Evgeny Sidorov -## 修复于 ClickHouse Release 1.1.54388, 2018-06-28 {#xiu-fu-yu-clickhouse-release-1-1-54388-2018-06-28} +## 修复于ClickHouse Release 1.1.54388, 2018-06-28 {#fixed-in-clickhouse-release-1-1-54388-2018-06-28} ### CVE-2018-14668 {#cve-2018-14668} -远程表函数功能允许在 «user», «password» 及 «default_database» 字段中使用任意符号,从而导致跨协议请求伪造攻击。 +`remote`表函数允许在`user`,`password`和`default_database`字段中使用任意符号,从而导致跨协议请求伪造攻击。 -来源:Yandex信息安全团队的Andrey Krasichkov +者:Yandex信息安全团队Andrey Krasichkov -## 修复于 ClickHouse Release 1.1.54390, 2018-07-06 {#xiu-fu-yu-clickhouse-release-1-1-54390-2018-07-06} +## 修复于ClickHouse Release 1.1.54390, 2018-07-06 {#fixed-in-clickhouse-release-1-1-54390-2018-07-06} ### CVE-2018-14669 {#cve-2018-14669} -ClickHouse MySQL客户端启用了 «LOAD DATA LOCAL INFILE» 功能,该功能允许恶意MySQL数据库从连接的ClickHouse服务器读取任意文件。 +ClickHouse MySQL客户端启用了`LOAD DATA LOCAL INFILE`功能,允许恶意MySQL数据库从连接的ClickHouse服务器读取任意文件。 -来源:Yandex信息安全团队的Andrey Krasichkov和Evgeny Sidorov +作者:Yandex信息安全团队Andrey Krasichkov和Evgeny Sidorov -## 修复于 ClickHouse Release 1.1.54131, 2017-01-10 {#xiu-fu-yu-clickhouse-release-1-1-54131-2017-01-10} +## 修复于ClickHouse Release 1.1.54131, 2017-01-10 {#fixed-in-clickhouse-release-1-1-54131-2017-01-10} ### CVE-2018-14670 {#cve-2018-14670} -deb软件包中的错误配置可能导致使用未经授权的数据库。 +deb包中的错误配置可能导致未经授权使用数据库。 -来源:英国国家网络安全中心(NCSC) +作者:英国国家网络安全中心(NCSC) -[来源文章](https://clickhouse.tech/docs/en/security_changelog/) +{## [Original article](https://clickhouse.tech/docs/en/security_changelog/) ##} diff --git a/programs/client/QueryFuzzer.cpp b/programs/client/QueryFuzzer.cpp index 53ede4a3d92..fe0b6a975ce 100644 --- a/programs/client/QueryFuzzer.cpp +++ b/programs/client/QueryFuzzer.cpp @@ -405,8 +405,8 @@ void QueryFuzzer::fuzz(ASTPtr & ast) if (fn->is_window_function) { - fuzzColumnLikeExpressionList(fn->window_partition_by); - fuzzOrderByList(fn->window_order_by); + fuzzColumnLikeExpressionList(fn->window_partition_by.get()); + fuzzOrderByList(fn->window_order_by.get()); } fuzz(fn->children); diff --git a/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp index da92efdf251..d4b706abf4d 100644 --- a/programs/compressor/Compressor.cpp +++ b/programs/compressor/Compressor.cpp @@ -6,8 +6,11 @@ #include #include #include +#include +#include #include #include +#include #include #include #include @@ -58,34 +61,41 @@ void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out) } -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic ignored "-Wmissing-declarations" - int mainEntryClickHouseCompressor(int argc, char ** argv) { using namespace DB; + namespace po = boost::program_options; - boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth()); + po::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth()); desc.add_options() ("help,h", "produce help message") + ("input", po::value()->value_name("INPUT"), "input file") + ("output", po::value()->value_name("OUTPUT"), "output file") ("decompress,d", "decompress") - ("block-size,b", boost::program_options::value()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size") + ("offset-in-compressed-file", po::value()->default_value(0ULL), "offset to the compressed block (i.e. physical file offset)") + ("offset-in-decompressed-block", po::value()->default_value(0ULL), "offset to the decompressed block (i.e. virtual offset)") + ("block-size,b", po::value()->default_value(DBMS_DEFAULT_BUFFER_SIZE), "compress in blocks of specified size") ("hc", "use LZ4HC instead of LZ4") ("zstd", "use ZSTD instead of LZ4") - ("codec", boost::program_options::value>()->multitoken(), "use codecs combination instead of LZ4") - ("level", boost::program_options::value(), "compression level for codecs specified via flags") + ("codec", po::value>()->multitoken(), "use codecs combination instead of LZ4") + ("level", po::value(), "compression level for codecs specified via flags") ("none", "use no compression instead of LZ4") ("stat", "print block statistics of compressed data") ; - boost::program_options::variables_map options; - boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options); + po::positional_options_description positional_desc; + positional_desc.add("input", 1); + positional_desc.add("output", 1); + + po::variables_map options; + po::store(po::command_line_parser(argc, argv).options(desc).positional(positional_desc).run(), options); if (options.count("help")) { - std::cout << "Usage: " << argv[0] << " [options] < in > out" << std::endl; + std::cout << "Usage: " << argv[0] << " [options] < INPUT > OUTPUT" << std::endl; + std::cout << "Usage: " << argv[0] << " [options] INPUT OUTPUT" << std::endl; std::cout << desc << std::endl; - return 1; + return 0; } try @@ -132,25 +142,52 @@ int mainEntryClickHouseCompressor(int argc, char ** argv) codec = CompressionCodecFactory::instance().get(method_family, level); - ReadBufferFromFileDescriptor rb(STDIN_FILENO); - WriteBufferFromFileDescriptor wb(STDOUT_FILENO); + std::unique_ptr rb; + std::unique_ptr wb; + + if (options.count("input")) + rb = std::make_unique(options["input"].as()); + else + rb = std::make_unique(STDIN_FILENO); + + if (options.count("output")) + wb = std::make_unique(options["output"].as()); + else + wb = std::make_unique(STDOUT_FILENO); if (stat_mode) { /// Output statistic for compressed file. - checkAndWriteHeader(rb, wb); + checkAndWriteHeader(*rb, *wb); } else if (decompress) { /// Decompression - CompressedReadBuffer from(rb); - copyData(from, wb); + + size_t offset_in_compressed_file = options["offset-in-compressed-file"].as(); + size_t offset_in_decompressed_block = options["offset-in-decompressed-block"].as(); + + if (offset_in_compressed_file || offset_in_decompressed_block) + { + if (!options.count("input")) + { + throw DB::Exception("--offset-in-compressed-file/--offset-in-decompressed-block requires --input", DB::ErrorCodes::BAD_ARGUMENTS); + } + CompressedReadBufferFromFile compressed_file(options["input"].as(), 0, 0, 0); + compressed_file.seek(offset_in_compressed_file, offset_in_decompressed_block); + copyData(compressed_file, *wb); + } + else + { + CompressedReadBuffer from(*rb); + copyData(from, *wb); + } } else { /// Compression - CompressedWriteBuffer to(wb, codec, block_size); - copyData(rb, to); + CompressedWriteBuffer to(*wb, codec, block_size); + copyData(*rb, to); } } catch (...) diff --git a/programs/server/config.xml b/programs/server/config.xml index e05ef61eed7..2bdcbd47ccf 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -324,6 +324,9 @@ auth_dn_prefix, auth_dn_suffix - prefix and suffix used to construct the DN to bind to. Effectively, the resulting DN will be constructed as auth_dn_prefix + escape(user_name) + auth_dn_suffix string. Note, that this implies that auth_dn_suffix should usually have comma ',' as its first non-space character. + verification_cooldown - a period of time, in seconds, after a successful bind attempt, during which a user will be assumed + to be successfully authenticated for all consecutive requests without contacting the LDAP server. + Specify 0 (the default) to disable caching and force contacting the LDAP server for each authentication request. enable_tls - flag to trigger use of secure connection to the LDAP server. Specify 'no' for plain text (ldap://) protocol (not recommended). Specify 'yes' for LDAP over SSL/TLS (ldaps://) protocol (recommended, the default). @@ -343,6 +346,7 @@ 636 uid= ,ou=users,dc=example,dc=com + 300 yes tls1.2 demand diff --git a/src/Access/Authentication.cpp b/src/Access/Authentication.cpp index d29e2f897e8..5fab2c92624 100644 --- a/src/Access/Authentication.cpp +++ b/src/Access/Authentication.cpp @@ -1,6 +1,5 @@ #include #include -#include #include #include @@ -49,7 +48,7 @@ Authentication::Digest Authentication::getPasswordDoubleSHA1() const } -bool Authentication::isCorrectPassword(const String & password_, const String & user_, const ExternalAuthenticators & external_authenticators) const +bool Authentication::isCorrectPassword(const String & user_, const String & password_, const ExternalAuthenticators & external_authenticators) const { switch (type) { @@ -81,14 +80,7 @@ bool Authentication::isCorrectPassword(const String & password_, const String & } case LDAP_SERVER: - { - auto ldap_server_params = external_authenticators.getLDAPServerParams(server_name); - ldap_server_params.user = user_; - ldap_server_params.password = password_; - - LDAPSimpleAuthClient ldap_client(ldap_server_params); - return ldap_client.check(); - } + return external_authenticators.checkLDAPCredentials(server_name, user_, password_); case MAX_TYPE: break; diff --git a/src/Access/Authentication.h b/src/Access/Authentication.h index 38714339221..f98d2ed4679 100644 --- a/src/Access/Authentication.h +++ b/src/Access/Authentication.h @@ -88,8 +88,8 @@ public: void setServerName(const String & server_name_); /// Checks if the provided password is correct. Returns false if not. - /// User name and external authenticators' info are used only by some specific authentication type (e.g., LDAP_SERVER). - bool isCorrectPassword(const String & password_, const String & user_, const ExternalAuthenticators & external_authenticators) const; + /// User name and external authenticators are used by the specific authentication types only (e.g., LDAP_SERVER). + bool isCorrectPassword(const String & user_, const String & password_, const ExternalAuthenticators & external_authenticators) const; friend bool operator ==(const Authentication & lhs, const Authentication & rhs) { return (lhs.type == rhs.type) && (lhs.password_hash == rhs.password_hash); } friend bool operator !=(const Authentication & lhs, const Authentication & rhs) { return !(lhs == rhs); } diff --git a/src/Access/ExternalAuthenticators.cpp b/src/Access/ExternalAuthenticators.cpp index 3ed1b21c3c2..81ab1af3b29 100644 --- a/src/Access/ExternalAuthenticators.cpp +++ b/src/Access/ExternalAuthenticators.cpp @@ -1,9 +1,13 @@ #include +#include #include #include #include #include +#include +#include + namespace DB { @@ -29,6 +33,7 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str const bool has_port = config.has(ldap_server_config + ".port"); const bool has_auth_dn_prefix = config.has(ldap_server_config + ".auth_dn_prefix"); const bool has_auth_dn_suffix = config.has(ldap_server_config + ".auth_dn_suffix"); + const bool has_verification_cooldown = config.has(ldap_server_config + ".verification_cooldown"); const bool has_enable_tls = config.has(ldap_server_config + ".enable_tls"); const bool has_tls_minimum_protocol_version = config.has(ldap_server_config + ".tls_minimum_protocol_version"); const bool has_tls_require_cert = config.has(ldap_server_config + ".tls_require_cert"); @@ -52,6 +57,9 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str if (has_auth_dn_suffix) params.auth_dn_suffix = config.getString(ldap_server_config + ".auth_dn_suffix"); + if (has_verification_cooldown) + params.verification_cooldown = std::chrono::seconds{config.getUInt64(ldap_server_config + ".verification_cooldown")}; + if (has_enable_tls) { String enable_tls_lc_str = config.getString(ldap_server_config + ".enable_tls"); @@ -130,16 +138,28 @@ auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const Str return params; } -void parseAndAddLDAPServers(ExternalAuthenticators & external_authenticators, const Poco::Util::AbstractConfiguration & config, Poco::Logger * log) +} + +void ExternalAuthenticators::reset() { + std::scoped_lock lock(mutex); + ldap_server_params.clear(); + ldap_server_caches.clear(); +} + +void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log) +{ + std::scoped_lock lock(mutex); + + reset(); + Poco::Util::AbstractConfiguration::Keys ldap_server_names; config.keys("ldap_servers", ldap_server_names); - for (const auto & ldap_server_name : ldap_server_names) { try { - external_authenticators.setLDAPServerParams(ldap_server_name, parseLDAPServer(config, ldap_server_name)); + ldap_server_params.insert_or_assign(ldap_server_name, parseLDAPServer(config, ldap_server_name)); } catch (...) { @@ -148,35 +168,100 @@ void parseAndAddLDAPServers(ExternalAuthenticators & external_authenticators, co } } -} - -void ExternalAuthenticators::reset() +bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const String & user_name, const String & password) const { - std::scoped_lock lock(mutex); - ldap_server_params.clear(); -} + std::optional params; + std::size_t params_hash = 0; -void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log) -{ - std::scoped_lock lock(mutex); - reset(); - parseAndAddLDAPServers(*this, config, log); -} + { + std::scoped_lock lock(mutex); -void ExternalAuthenticators::setLDAPServerParams(const String & server, const LDAPServerParams & params) -{ - std::scoped_lock lock(mutex); - ldap_server_params.erase(server); - ldap_server_params[server] = params; -} + // Retrieve the server parameters. + const auto pit = ldap_server_params.find(server); + if (pit == ldap_server_params.end()) + throw Exception("LDAP server '" + server + "' is not configured", ErrorCodes::BAD_ARGUMENTS); -LDAPServerParams ExternalAuthenticators::getLDAPServerParams(const String & server) const -{ - std::scoped_lock lock(mutex); - auto it = ldap_server_params.find(server); - if (it == ldap_server_params.end()) - throw Exception("LDAP server '" + server + "' is not configured", ErrorCodes::BAD_ARGUMENTS); - return it->second; + params = pit->second; + params->user = user_name; + params->password = password; + params_hash = params->getCoreHash(); + + // Check the cache, but only if the caching is enabled at all. + if (params->verification_cooldown > std::chrono::seconds{0}) + { + const auto cit = ldap_server_caches.find(server); + if (cit != ldap_server_caches.end()) + { + auto & cache = cit->second; + + const auto eit = cache.find(user_name); + if (eit != cache.end()) + { + const auto & entry = eit->second; + const auto last_check_period = std::chrono::steady_clock::now() - entry.last_successful_authentication_timestamp; + + if ( + // Forbid the initial values explicitly. + entry.last_successful_params_hash != 0 && + entry.last_successful_authentication_timestamp != std::chrono::steady_clock::time_point{} && + + // Check if we can safely "reuse" the result of the previous successful password verification. + entry.last_successful_params_hash == params_hash && + last_check_period >= std::chrono::seconds{0} && + last_check_period <= params->verification_cooldown + ) + { + return true; + } + + // Erase the entry, if expired. + if (last_check_period > params->verification_cooldown) + cache.erase(eit); + } + + // Erase the cache, if empty. + if (cache.empty()) + ldap_server_caches.erase(cit); + } + } + } + + LDAPSimpleAuthClient client(params.value()); + const auto result = client.check(); + const auto current_check_timestamp = std::chrono::steady_clock::now(); + + // Update the cache, but only if this is the latest check and the server is still configured in a compatible way. + if (result) + { + std::scoped_lock lock(mutex); + + // If the server was removed from the config while we were checking the password, we discard the current result. + const auto pit = ldap_server_params.find(server); + if (pit == ldap_server_params.end()) + return false; + + auto new_params = pit->second; + new_params.user = user_name; + new_params.password = password; + + // If the critical server params have changed while we were checking the password, we discard the current result. + if (params_hash != new_params.getCoreHash()) + return false; + + auto & entry = ldap_server_caches[server][user_name]; + if (entry.last_successful_authentication_timestamp < current_check_timestamp) + { + entry.last_successful_params_hash = params_hash; + entry.last_successful_authentication_timestamp = current_check_timestamp; + } + else if (entry.last_successful_params_hash != params_hash) + { + // Somehow a newer check with different params/password succeeded, so the current result is obsolete and we discard it. + return false; + } + } + + return result; } } diff --git a/src/Access/ExternalAuthenticators.h b/src/Access/ExternalAuthenticators.h index 7502409d817..fa618c92b3f 100644 --- a/src/Access/ExternalAuthenticators.h +++ b/src/Access/ExternalAuthenticators.h @@ -3,9 +3,10 @@ #include #include +#include #include -#include #include +#include namespace Poco @@ -27,13 +28,23 @@ class ExternalAuthenticators public: void reset(); void setConfiguration(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log); + bool checkLDAPCredentials(const String & server, const String & user_name, const String & password) const; - void setLDAPServerParams(const String & server, const LDAPServerParams & params); - LDAPServerParams getLDAPServerParams(const String & server) const; +private: + struct LDAPCacheEntry + { + std::size_t last_successful_params_hash = 0; + std::chrono::steady_clock::time_point last_successful_authentication_timestamp; + }; + + using LDAPServerCache = std::unordered_map; // user name -> cache entry + using LDAPServerCaches = std::map; // server name -> cache + using LDAPServersParams = std::map; // server name -> params private: mutable std::recursive_mutex mutex; - std::map ldap_server_params; + LDAPServersParams ldap_server_params; + mutable LDAPServerCaches ldap_server_caches; }; } diff --git a/src/Access/IAccessStorage.cpp b/src/Access/IAccessStorage.cpp index 58821e7de4b..c68f5f55ef5 100644 --- a/src/Access/IAccessStorage.cpp +++ b/src/Access/IAccessStorage.cpp @@ -463,7 +463,7 @@ UUID IAccessStorage::loginImpl( bool IAccessStorage::isPasswordCorrectImpl(const User & user, const String & password, const ExternalAuthenticators & external_authenticators) const { - return user.authentication.isCorrectPassword(password, user.getName(), external_authenticators); + return user.authentication.isCorrectPassword(user.getName(), password, external_authenticators); } diff --git a/src/Access/LDAPParams.h b/src/Access/LDAPParams.h index eeadba6bc01..28dcc5fe50f 100644 --- a/src/Access/LDAPParams.h +++ b/src/Access/LDAPParams.h @@ -2,6 +2,8 @@ #include +#include + #include @@ -68,10 +70,26 @@ struct LDAPServerParams String user; String password; + std::chrono::seconds verification_cooldown{0}; + std::chrono::seconds operation_timeout{40}; std::chrono::seconds network_timeout{30}; std::chrono::seconds search_timeout{20}; std::uint32_t search_limit = 100; + + std::size_t getCoreHash() const + { + std::size_t seed = 0; + + boost::hash_combine(seed, host); + boost::hash_combine(seed, port); + boost::hash_combine(seed, auth_dn_prefix); + boost::hash_combine(seed, auth_dn_suffix); + boost::hash_combine(seed, user); + boost::hash_combine(seed, password); + + return seed; + } }; } diff --git a/src/Common/ThreadStatus.cpp b/src/Common/ThreadStatus.cpp index 4bba3d8f4eb..3809f84711e 100644 --- a/src/Common/ThreadStatus.cpp +++ b/src/Common/ThreadStatus.cpp @@ -96,7 +96,7 @@ ThreadStatus::~ThreadStatus() catch (const DB::Exception &) { /// It's a minor tracked memory leak here (not the memory itself but it's counter). - /// We've already allocated a little bit more then the limit and cannot track it in the thread memory tracker or its parent. + /// We've already allocated a little bit more than the limit and cannot track it in the thread memory tracker or its parent. } if (deleter) diff --git a/src/Core/ExternalResultDescription.cpp b/src/Core/ExternalResultDescription.cpp index 7165d73b7d0..4be80a352c7 100644 --- a/src/Core/ExternalResultDescription.cpp +++ b/src/Core/ExternalResultDescription.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -76,6 +77,8 @@ void ExternalResultDescription::init(const Block & sample_block_) types.emplace_back(ValueType::vtDecimal128, is_nullable); else if (typeid_cast *>(type)) types.emplace_back(ValueType::vtDecimal256, is_nullable); + else if (typeid_cast(type)) + types.emplace_back(ValueType::vtFixedString, is_nullable); else throw Exception{"Unsupported type " + type->getName(), ErrorCodes::UNKNOWN_TYPE}; } diff --git a/src/Core/ExternalResultDescription.h b/src/Core/ExternalResultDescription.h index f8ba2a6bba2..cc3b26ad841 100644 --- a/src/Core/ExternalResultDescription.h +++ b/src/Core/ExternalResultDescription.h @@ -30,7 +30,8 @@ struct ExternalResultDescription vtDecimal32, vtDecimal64, vtDecimal128, - vtDecimal256 + vtDecimal256, + vtFixedString }; Block sample_block; diff --git a/src/Core/Settings.h b/src/Core/Settings.h index b09e960da36..d8ba4f45b77 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -417,7 +417,7 @@ class IColumn; M(UInt64, multiple_joins_rewriter_version, 0, "Obsolete setting, does nothing. Will be removed after 2021-03-31", 0) \ M(Bool, enable_debug_queries, false, "Enabled debug queries, but now is obsolete", 0) \ M(Bool, allow_experimental_database_atomic, true, "Obsolete setting, does nothing. Will be removed after 2021-02-12", 0) \ - M(UnionMode, union_default_mode, UnionMode::DISTINCT, "Set default Union Mode in SelectWithUnion query. Possible values: empty string, 'ALL', 'DISTINCT'. If empty, query without Union Mode will throw exception.", 0) \ + M(UnionMode, union_default_mode, UnionMode::Unspecified, "Set default Union Mode in SelectWithUnion query. Possible values: empty string, 'ALL', 'DISTINCT'. If empty, query without Union Mode will throw exception.", 0) \ M(Bool, optimize_aggregators_of_group_by_keys, true, "Eliminates min/max/any/anyLast aggregators of GROUP BY keys in SELECT section", 0) \ M(Bool, optimize_group_by_function_keys, true, "Eliminates functions of other keys in GROUP BY section", 0) \ diff --git a/src/Dictionaries/IPAddressDictionary.cpp b/src/Dictionaries/IPAddressDictionary.cpp index 4d8492f15a2..d2bbf6ec2fa 100644 --- a/src/Dictionaries/IPAddressDictionary.cpp +++ b/src/Dictionaries/IPAddressDictionary.cpp @@ -518,7 +518,7 @@ void IPAddressDictionary::loadData() { /// We format key attribute values here instead of filling with data from key_column /// because string representation can be normalized if bits beyond mask are set. - /// Also all IPv4 will be displayed as mapped IPv6 if threre are any IPv6. + /// Also all IPv4 will be displayed as mapped IPv6 if there are any IPv6. /// It's consistent with representation in table created with `ENGINE = Dictionary` from this dictionary. char str_buffer[48]; if (has_ipv6) diff --git a/src/Formats/MySQLBlockInputStream.cpp b/src/Formats/MySQLBlockInputStream.cpp index 2ff8e8e5fb2..026f688a67f 100644 --- a/src/Formats/MySQLBlockInputStream.cpp +++ b/src/Formats/MySQLBlockInputStream.cpp @@ -8,6 +8,7 @@ # include # include # include +# include # include # include # include @@ -110,6 +111,9 @@ namespace data_type.deserializeAsWholeText(column, buffer, FormatSettings{}); break; } + case ValueType::vtFixedString: + assert_cast(column).insertData(value.data(), value.size()); + break; } } diff --git a/src/Functions/ExtractString.h b/src/Functions/ExtractString.h index 8b88a5a7c37..aa0e1b04835 100644 --- a/src/Functions/ExtractString.h +++ b/src/Functions/ExtractString.h @@ -20,8 +20,7 @@ namespace DB // includes extracting ASCII ngram, UTF8 ngram, ASCII word and UTF8 word struct ExtractStringImpl { - // read a ASCII word - static ALWAYS_INLINE inline const UInt8 * readOneASCIIWord(const UInt8 *& pos, const UInt8 * end) + static ALWAYS_INLINE inline const UInt8 * readOneWord(const UInt8 *& pos, const UInt8 * end) { // jump separators while (pos < end && isUTF8Sep(*pos)) @@ -35,22 +34,6 @@ struct ExtractStringImpl return word_start; } - // read one UTF8 word from pos to word - static ALWAYS_INLINE inline const UInt8 * readOneUTF8Word(const UInt8 *& pos, const UInt8 * end) - { - // jump UTF8 separator - while (pos < end && isUTF8Sep(*pos)) - ++pos; - - // UTF8 word's character number - const UInt8 * word_start = pos; - - while (pos < end && !isUTF8Sep(*pos)) - readOneUTF8Code(pos, end); - - return word_start; - } - // we use ASCII non-alphanum character as UTF8 separator static ALWAYS_INLINE inline bool isUTF8Sep(const UInt8 c) { return c < 128 && !isAlphaNumericASCII(c); } diff --git a/src/Functions/FunctionsStringHash.cpp b/src/Functions/FunctionsStringHash.cpp index e389e2f7f98..e5ed5dc77c0 100644 --- a/src/Functions/FunctionsStringHash.cpp +++ b/src/Functions/FunctionsStringHash.cpp @@ -249,13 +249,7 @@ struct SimHashImpl // get first word shingle while (start < end && words.size() < shingle_size) { - const UInt8 * word_start; - - if constexpr (UTF8) - word_start = ExtractStringImpl::readOneUTF8Word(start, end); - else - word_start = ExtractStringImpl::readOneASCIIWord(start, end); - + const UInt8 * word_start = ExtractStringImpl::readOneWord(start, end); size_t length = start - word_start; if (length >= min_word_size) @@ -271,13 +265,7 @@ struct SimHashImpl size_t offset = 0; while (start < end) { - const UInt8 * word_start; - - if constexpr (UTF8) - word_start = ExtractStringImpl::readOneUTF8Word(start, end); - else - word_start = ExtractStringImpl::readOneASCIIWord(start, end); - + const UInt8 * word_start = ExtractStringImpl::readOneWord(start, end); size_t length = start - word_start; if (length < min_word_size) @@ -340,7 +328,7 @@ struct MinHashImpl { static constexpr size_t min_word_size = 4; - template + template struct Heap { void update(UInt64 hash, BytesRef ref, size_t limit) @@ -478,13 +466,7 @@ struct MinHashImpl // get first word shingle while (start < end && words.size() < shingle_size) { - const UInt8 * word_start; - - if constexpr (UTF8) - word_start = ExtractStringImpl::readOneUTF8Word(start, end); - else - word_start = ExtractStringImpl::readOneASCIIWord(start, end); - + const UInt8 * word_start = ExtractStringImpl::readOneWord(start, end); size_t length = start - word_start; if (length >= min_word_size) @@ -506,12 +488,7 @@ struct MinHashImpl size_t offset = 0; while (start < end) { - const UInt8 * word_start; - - if constexpr (UTF8) - word_start = ExtractStringImpl::readOneUTF8Word(start, end); - else - word_start = ExtractStringImpl::readOneASCIIWord(start, end); + const UInt8 * word_start = ExtractStringImpl::readOneWord(start, end); size_t length = start - word_start; diff --git a/src/Functions/FunctionsStringHash.h b/src/Functions/FunctionsStringHash.h index 2bcb7fa1013..c09abc33319 100644 --- a/src/Functions/FunctionsStringHash.h +++ b/src/Functions/FunctionsStringHash.h @@ -77,7 +77,7 @@ public: { if constexpr (is_simhash) throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, - "Function {} expect no more then two arguments (text, shingle size), got {}", + "Function {} expect no more than two arguments (text, shingle size), got {}", getName(), arguments.size()); if (!isUnsignedInteger(arguments[2].type)) @@ -95,7 +95,7 @@ public: if (arguments.size() > 3) { throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, - "Function {} expect no more then three arguments (text, shingle size, num hashes), got {}", + "Function {} expect no more than three arguments (text, shingle size, num hashes), got {}", getName(), arguments.size()); } diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index c7bbc019518..00600bebf07 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -738,15 +738,26 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & if (node.is_window_function) { // Also add columns from PARTITION BY and ORDER BY of window functions. - // Requiring a constant reference to a shared pointer to non-const AST - // doesn't really look sane, but the visitor does indeed require it. if (node.window_partition_by) { - visit(node.window_partition_by->clone(), data); + visit(node.window_partition_by, data); } if (node.window_order_by) { - visit(node.window_order_by->clone(), data); + visit(node.window_order_by, data); + } + + // Also manually add columns for arguments of the window function itself. + // ActionVisitor is written in such a way that this method must itself + // descend into all needed function children. Window functions can't have + // any special functions as argument, so the code below that handles + // special arguments is not needed. This is analogous to the + // appendWindowFunctionsArguments() in SelectQueryExpressionAnalyzer and + // partially duplicates its code. Probably we can remove most of the + // logic from that function, but I don't yet have it all figured out... + for (const auto & arg : node.arguments->children) + { + visit(arg, data); } // Don't need to do anything more for window functions here -- the diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 31c12490408..a80b7799a98 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -970,7 +970,9 @@ void SelectQueryExpressionAnalyzer::appendWindowFunctionsArguments( ExpressionActionsChain::Step & step = chain.lastStep(aggregated_columns); // 1) Add actions for window functions and their arguments; - // 2) Mark the columns that are really required. + // 2) Mark the columns that are really required. We have to mark them as + // required because we finish the expression chain before processing the + // window functions. for (const auto & [_, w] : window_descriptions) { for (const auto & f : w.window_functions) @@ -981,41 +983,14 @@ void SelectQueryExpressionAnalyzer::appendWindowFunctionsArguments( getRootActionsNoMakeSet(f.function_node->clone(), true /* no_subqueries */, step.actions()); - // 1.2) result of window function: an empty INPUT. - // It is an aggregate function, so it won't be added by getRootActions. - // This is something of a hack. Other options: - // a] do it like aggregate function -- break the chain of actions - // and manually add window functions to the starting list of - // input columns. Logically this is similar to what we're doing - // now, but would require to split the window function processing - // into a full-fledged step after plain functions. This would be - // somewhat cumbersome. With INPUT hack we can avoid a separate - // step and pretend that window functions are almost "normal" - // select functions. The limitation of both these ways is that - // we can't reference window functions in other SELECT - // expressions. - // b] add a WINDOW action type, then sort, then split the chain on - // each WINDOW action and insert the Window pipeline between the - // Expression pipelines. This is a "proper" way that would allow - // us to depend on window functions in other functions. But it's - // complicated so I avoid doing it for now. - ColumnWithTypeAndName col; - col.type = f.aggregate_function->getReturnType(); - col.column = col.type->createColumn(); - col.name = f.column_name; - - step.actions()->addInput(col); - + // 2.1) function arguments; for (const auto & a : f.function_node->arguments->children) { - // 2.1) function arguments; step.required_output.push_back(a->getColumnName()); } - // 2.2) function result; - step.required_output.push_back(f.column_name); } - // 2.3) PARTITION BY and ORDER BY columns. + // 2.1) PARTITION BY and ORDER BY columns. for (const auto & c : w.full_sort_description) { step.required_output.push_back(c.column_name); @@ -1048,6 +1023,15 @@ void SelectQueryExpressionAnalyzer::appendSelect(ExpressionActionsChain & chain, for (const auto & child : select_query->select()->children) { + if (const auto * function = typeid_cast(child.get()); + function + && function->is_window_function) + { + // Skip window function columns here -- they are calculated after + // other SELECT expressions by a special step. + continue; + } + step.required_output.push_back(child->getColumnName()); } } @@ -1421,11 +1405,54 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( /// If there is aggregation, we execute expressions in SELECT and ORDER BY on the initiating server, otherwise on the source servers. query_analyzer.appendSelect(chain, only_types || (need_aggregate ? !second_stage : !first_stage)); - query_analyzer.appendWindowFunctionsArguments(chain, only_types || !first_stage); + // Window functions are processed in a separate expression chain after + // the main SELECT, similar to what we do for aggregate functions. + if (has_window) + { + query_analyzer.appendWindowFunctionsArguments(chain, only_types || !first_stage); + + // Build a list of output columns of the window step. + // 1) We need the columns that are the output of ExpressionActions. + for (const auto & x : chain.getLastActions()->getNamesAndTypesList()) + { + query_analyzer.columns_after_window.push_back(x); + } + // 2) We also have to manually add the output of the window function + // to the list of the output columns of the window step, because the + // window functions are not in the ExpressionActions. + for (const auto & [_, w] : query_analyzer.window_descriptions) + { + for (const auto & f : w.window_functions) + { + query_analyzer.columns_after_window.push_back( + {f.column_name, f.aggregate_function->getReturnType()}); + } + } + + before_window = chain.getLastActions(); + finalize_chain(chain); + + auto & step = chain.lastStep(query_analyzer.columns_after_window); + + // The output of this expression chain is the result of + // SELECT (before "final projection" i.e. renaming the columns), so + // we have to mark the expressions that are required in the output, + // again. We did it for the previous expression chain ("select w/o + // window functions") earlier, in appendSelect(). But that chain also + // produced the expressions required to calculate window functions. + // They are not needed in the final SELECT result. Knowing the correct + // list of columns is important when we apply SELECT DISTINCT later. + const auto * select_query = query_analyzer.getSelectQuery(); + for (const auto & child : select_query->select()->children) + { + step.required_output.push_back(child->getColumnName()); + } + } selected_columns = chain.getLastStep().required_output; + has_order_by = query.orderBy() != nullptr; - before_order_and_select = query_analyzer.appendOrderBy( + before_order_by = query_analyzer.appendOrderBy( chain, only_types || (need_aggregate ? !second_stage : !first_stage), optimize_read_in_order, @@ -1572,9 +1599,9 @@ std::string ExpressionAnalysisResult::dump() const ss << "before_window " << before_window->dumpDAG() << "\n"; } - if (before_order_and_select) + if (before_order_by) { - ss << "before_order_and_select " << before_order_and_select->dumpDAG() << "\n"; + ss << "before_order_by " << before_order_by->dumpDAG() << "\n"; } if (before_limit_by) @@ -1587,6 +1614,20 @@ std::string ExpressionAnalysisResult::dump() const ss << "final_projection " << final_projection->dumpDAG() << "\n"; } + if (!selected_columns.empty()) + { + ss << "selected_columns "; + for (size_t i = 0; i < selected_columns.size(); i++) + { + if (i > 0) + { + ss << ", "; + } + ss << backQuote(selected_columns[i]); + } + ss << "\n"; + } + return ss.str(); } diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index fb0cb4ea4c3..ea43efa6036 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -55,6 +55,8 @@ struct ExpressionAnalyzerData NamesAndTypesList columns_after_join; /// Columns after ARRAY JOIN, JOIN, and/or aggregation. NamesAndTypesList aggregated_columns; + /// Columns after window functions. + NamesAndTypesList columns_after_window; bool has_aggregation = false; NamesAndTypesList aggregation_keys; @@ -202,11 +204,12 @@ struct ExpressionAnalysisResult ActionsDAGPtr before_aggregation; ActionsDAGPtr before_having; ActionsDAGPtr before_window; - ActionsDAGPtr before_order_and_select; + ActionsDAGPtr before_order_by; ActionsDAGPtr before_limit_by; ActionsDAGPtr final_projection; - /// Columns from the SELECT list, before renaming them to aliases. + /// Columns from the SELECT list, before renaming them to aliases. Used to + /// perform SELECT DISTINCT. Names selected_columns; /// Columns will be removed after prewhere actions execution. diff --git a/src/Interpreters/ExtractExpressionInfoVisitor.cpp b/src/Interpreters/ExtractExpressionInfoVisitor.cpp index 2d9339447b1..64c23cd4fd1 100644 --- a/src/Interpreters/ExtractExpressionInfoVisitor.cpp +++ b/src/Interpreters/ExtractExpressionInfoVisitor.cpp @@ -22,15 +22,22 @@ void ExpressionInfoMatcher::visit(const ASTFunction & ast_function, const ASTPtr { data.is_array_join = true; } - // "is_aggregate_function" doesn't mean much by itself. Apparently here it is - // used to move filters from HAVING to WHERE, and probably for this purpose - // an aggregate function calculated as a window function is not relevant. + // "is_aggregate_function" is used to determine whether we can move a filter + // (1) from HAVING to WHERE or (2) from WHERE of a parent query to HAVING of + // a subquery. + // For aggregate functions we can't do (1) but can do (2). + // For window functions both don't make sense -- they are not allowed in + // WHERE or HAVING. else if (!ast_function.is_window_function && AggregateFunctionFactory::instance().isAggregateFunctionName( ast_function.name)) { data.is_aggregate_function = true; } + else if (ast_function.is_window_function) + { + data.is_window_function = true; + } else { const auto & function = FunctionFactory::instance().tryGet(ast_function.name, data.context); @@ -75,15 +82,26 @@ bool ExpressionInfoMatcher::needChildVisit(const ASTPtr & node, const ASTPtr &) return !node->as(); } -bool hasStatefulFunction(const ASTPtr & node, const Context & context) +bool hasNonRewritableFunction(const ASTPtr & node, const Context & context) { for (const auto & select_expression : node->children) { ExpressionInfoVisitor::Data expression_info{.context = context, .tables = {}}; ExpressionInfoVisitor(expression_info).visit(select_expression); - if (expression_info.is_stateful_function) + if (expression_info.is_stateful_function + || expression_info.is_window_function) + { + // If an outer query has a WHERE on window function, we can't move + // it into the subquery, because window functions are not allowed in + // WHERE and HAVING. Example: + // select * from ( + // select number, + // count(*) over (partition by intDiv(number, 3)) c + // from numbers(3) + // ) where c > 1; return true; + } } return false; diff --git a/src/Interpreters/ExtractExpressionInfoVisitor.h b/src/Interpreters/ExtractExpressionInfoVisitor.h index 0cb43e5b00a..d05415490e6 100644 --- a/src/Interpreters/ExtractExpressionInfoVisitor.h +++ b/src/Interpreters/ExtractExpressionInfoVisitor.h @@ -21,6 +21,7 @@ struct ExpressionInfoMatcher bool is_array_join = false; bool is_stateful_function = false; bool is_aggregate_function = false; + bool is_window_function = false; bool is_deterministic_function = true; std::unordered_set unique_reference_tables_pos = {}; }; @@ -36,6 +37,6 @@ struct ExpressionInfoMatcher using ExpressionInfoVisitor = ConstInDepthNodeVisitor; -bool hasStatefulFunction(const ASTPtr & node, const Context & context); +bool hasNonRewritableFunction(const ASTPtr & node, const Context & context); } diff --git a/src/Interpreters/GetAggregatesVisitor.h b/src/Interpreters/GetAggregatesVisitor.h index d416a5f240e..266187aaffb 100644 --- a/src/Interpreters/GetAggregatesVisitor.h +++ b/src/Interpreters/GetAggregatesVisitor.h @@ -33,11 +33,14 @@ public: return false; if (auto * func = node->as()) { - if (isAggregateFunction(*func) - || func->is_window_function) + if (isAggregateFunction(*func)) { return false; } + + // Window functions can contain aggregation results as arguments + // to the window functions, or columns of PARTITION BY or ORDER BY + // of the window. } return true; } diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 38cc19a00d6..dd9df3f36a0 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -211,6 +211,18 @@ static void rewriteMultipleJoins(ASTPtr & query, const TablesWithColumns & table JoinToSubqueryTransformVisitor(join_to_subs_data).visit(query); } +/// Returns true if we should ignore quotas and limits for a specified table in the system database. +static bool shouldIgnoreQuotaAndLimits(const StorageID & table_id) +{ + if (table_id.database_name == DatabaseCatalog::SYSTEM_DATABASE) + { + static const boost::container::flat_set tables_ignoring_quota{"quotas", "quota_limits", "quota_usage", "quotas_usage", "one"}; + if (tables_ignoring_quota.count(table_id.table_name)) + return true; + } + return false; +} + InterpreterSelectQuery::InterpreterSelectQuery( const ASTPtr & query_ptr_, const Context & context_, @@ -255,14 +267,18 @@ InterpreterSelectQuery::InterpreterSelectQuery( JoinedTables joined_tables(getSubqueryContext(*context), getSelectQuery()); + bool got_storage_from_query = false; if (!has_input && !storage) + { storage = joined_tables.getLeftTableStorage(); + got_storage_from_query = true; + } if (storage) { table_lock = storage->lockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout); table_id = storage->getStorageID(); - if (metadata_snapshot == nullptr) + if (!metadata_snapshot) metadata_snapshot = storage->getInMemoryMetadataPtr(); } @@ -280,9 +296,10 @@ InterpreterSelectQuery::InterpreterSelectQuery( if (storage && joined_tables.isLeftTableSubquery()) { /// Rewritten with subquery. Free storage locks here. - storage = {}; + storage = nullptr; table_lock.reset(); table_id = StorageID::createEmpty(); + metadata_snapshot = nullptr; } } @@ -445,16 +462,14 @@ InterpreterSelectQuery::InterpreterSelectQuery( if (query.prewhere() && !query.where()) analysis_result.prewhere_info->need_filter = true; - const StorageID & left_table_id = joined_tables.leftTableID(); - - if (left_table_id) - context->checkAccess(AccessType::SELECT, left_table_id, required_columns); - - /// Remove limits for some tables in the `system` database. - if (left_table_id.database_name == "system") + if (table_id && got_storage_from_query && !joined_tables.isLeftTableFunction()) { - static const boost::container::flat_set system_tables_ignoring_quota{"quotas", "quota_limits", "quota_usage", "quotas_usage", "one"}; - if (system_tables_ignoring_quota.count(left_table_id.table_name)) + /// The current user should have the SELECT privilege. + /// If this table_id is for a table function we don't check access rights here because in this case they have been already checked in ITableFunction::execute(). + context->checkAccess(AccessType::SELECT, table_id, required_columns); + + /// Remove limits for some tables in the `system` database. + if (shouldIgnoreQuotaAndLimits(table_id) && (joined_tables.tablesCount() <= 1)) { options.ignore_quota = true; options.ignore_limits = true; @@ -538,7 +553,10 @@ Block InterpreterSelectQuery::getSampleBlockImpl() if (options.to_stage == QueryProcessingStage::Enum::WithMergeableState) { if (!analysis_result.need_aggregate) - return analysis_result.before_order_and_select->getResultColumns(); + { + // What's the difference with selected_columns? + return analysis_result.before_order_by->getResultColumns(); + } Block header = analysis_result.before_aggregation->getResultColumns(); @@ -564,7 +582,8 @@ Block InterpreterSelectQuery::getSampleBlockImpl() if (options.to_stage == QueryProcessingStage::Enum::WithMergeableStateAfterAggregation) { - return analysis_result.before_order_and_select->getResultColumns(); + // What's the difference with selected_columns? + return analysis_result.before_order_by->getResultColumns(); } return analysis_result.final_projection->getResultColumns(); @@ -958,8 +977,9 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, const BlockInpu } else { - executeExpression(query_plan, expressions.before_order_and_select, "Before ORDER BY and SELECT"); + executeExpression(query_plan, expressions.before_window, "Before window functions"); executeWindow(query_plan); + executeExpression(query_plan, expressions.before_order_by, "Before ORDER BY"); executeDistinct(query_plan, true, expressions.selected_columns, true); } @@ -1005,8 +1025,10 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, const BlockInpu else if (expressions.hasHaving()) executeHaving(query_plan, expressions.before_having); - executeExpression(query_plan, expressions.before_order_and_select, "Before ORDER BY and SELECT"); + executeExpression(query_plan, expressions.before_window, + "Before window functions"); executeWindow(query_plan); + executeExpression(query_plan, expressions.before_order_by, "Before ORDER BY"); executeDistinct(query_plan, true, expressions.selected_columns, true); } @@ -1029,10 +1051,23 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, const BlockInpu /** Optimization - if there are several sources and there is LIMIT, then first apply the preliminary LIMIT, * limiting the number of rows in each up to `offset + limit`. */ + bool has_withfill = false; + if (query.orderBy()) + { + SortDescription order_descr = getSortDescription(query, *context); + for (auto & desc : order_descr) + if (desc.with_fill) + { + has_withfill = true; + break; + } + } + bool has_prelimit = false; if (!to_aggregation_stage && query.limitLength() && !query.limit_with_ties && !hasWithTotalsInAnySubqueryInFromClause(query) && - !query.arrayJoinExpressionList() && !query.distinct && !expressions.hasLimitBy() && !settings.extremes) + !query.arrayJoinExpressionList() && !query.distinct && !expressions.hasLimitBy() && !settings.extremes && + !has_withfill) { executePreLimit(query_plan, false); has_prelimit = true; @@ -1745,6 +1780,11 @@ void InterpreterSelectQuery::executeRollupOrCube(QueryPlan & query_plan, Modific void InterpreterSelectQuery::executeExpression(QueryPlan & query_plan, const ActionsDAGPtr & expression, const std::string & description) { + if (!expression) + { + return; + } + auto expression_step = std::make_unique(query_plan.getCurrentDataStream(), expression); expression_step->setStepDescription(description); diff --git a/src/Interpreters/JoinedTables.cpp b/src/Interpreters/JoinedTables.cpp index c0511122c1e..17d7949e478 100644 --- a/src/Interpreters/JoinedTables.cpp +++ b/src/Interpreters/JoinedTables.cpp @@ -161,6 +161,7 @@ StoragePtr JoinedTables::getLeftTableStorage() if (isLeftTableFunction()) return context.getQueryContext().executeTableFunction(left_table_expression); + StorageID table_id = StorageID::createEmpty(); if (left_db_and_table) { table_id = context.resolveStorageID(StorageID(left_db_and_table->database, left_db_and_table->table, left_db_and_table->uuid)); diff --git a/src/Interpreters/JoinedTables.h b/src/Interpreters/JoinedTables.h index 1e787ee4a65..812808fed61 100644 --- a/src/Interpreters/JoinedTables.h +++ b/src/Interpreters/JoinedTables.h @@ -43,8 +43,6 @@ public: bool isLeftTableFunction() const; size_t tablesCount() const { return table_expressions.size(); } - const StorageID & leftTableID() const { return table_id; } - void rewriteDistributedInAndJoins(ASTPtr & query); std::unique_ptr makeLeftTableSubquery(const SelectQueryOptions & select_options); @@ -57,9 +55,6 @@ private: /// Legacy (duplicated left table values) ASTPtr left_table_expression; std::optional left_db_and_table; - - /// left_db_and_table or 'system.one' - StorageID table_id = StorageID::createEmpty(); }; } diff --git a/src/Interpreters/PredicateExpressionsOptimizer.cpp b/src/Interpreters/PredicateExpressionsOptimizer.cpp index 614693b5ae0..00b47be408a 100644 --- a/src/Interpreters/PredicateExpressionsOptimizer.cpp +++ b/src/Interpreters/PredicateExpressionsOptimizer.cpp @@ -90,8 +90,12 @@ std::vector PredicateExpressionsOptimizer::extractTablesPredicates(const A ExpressionInfoVisitor::Data expression_info{.context = context, .tables = tables_with_columns}; ExpressionInfoVisitor(expression_info).visit(predicate_expression); - if (expression_info.is_stateful_function || !expression_info.is_deterministic_function) - return {}; /// Not optimized when predicate contains stateful function or indeterministic function + if (expression_info.is_stateful_function + || !expression_info.is_deterministic_function + || expression_info.is_window_function) + { + return {}; /// Not optimized when predicate contains stateful function or indeterministic function or window functions + } if (!expression_info.is_array_join) { @@ -190,6 +194,12 @@ bool PredicateExpressionsOptimizer::tryMovePredicatesFromHavingToWhere(ASTSelect if (expression_info.is_stateful_function) return false; + if (expression_info.is_window_function) + { + // Window functions are not allowed in either HAVING or WHERE. + return false; + } + if (expression_info.is_aggregate_function) having_predicates.emplace_back(moving_predicate); else diff --git a/src/Interpreters/PredicateRewriteVisitor.cpp b/src/Interpreters/PredicateRewriteVisitor.cpp index 9795675bcc8..5773629d0d1 100644 --- a/src/Interpreters/PredicateRewriteVisitor.cpp +++ b/src/Interpreters/PredicateRewriteVisitor.cpp @@ -88,7 +88,7 @@ bool PredicateRewriteVisitorData::rewriteSubquery(ASTSelectQuery & subquery, con || (!optimize_with && subquery.with()) || subquery.withFill() || subquery.limitBy() || subquery.limitLength() - || hasStatefulFunction(subquery.select(), context)) + || hasNonRewritableFunction(subquery.select(), context)) return false; for (const auto & predicate : predicates) diff --git a/src/Interpreters/QueryNormalizer.cpp b/src/Interpreters/QueryNormalizer.cpp index 25496c2b613..38b54de2130 100644 --- a/src/Interpreters/QueryNormalizer.cpp +++ b/src/Interpreters/QueryNormalizer.cpp @@ -148,9 +148,9 @@ void QueryNormalizer::visit(ASTSelectQuery & select, const ASTPtr &, Data & data /// Don't go into select query. It processes children itself. /// Do not go to the left argument of lambda expressions, so as not to replace the formal parameters /// on aliases in expressions of the form 123 AS x, arrayMap(x -> 1, [2]). -void QueryNormalizer::visitChildren(const ASTPtr & node, Data & data) +void QueryNormalizer::visitChildren(IAST * node, Data & data) { - if (const auto * func_node = node->as()) + if (auto * func_node = node->as()) { if (func_node->tryGetQueryArgument()) { @@ -176,6 +176,16 @@ void QueryNormalizer::visitChildren(const ASTPtr & node, Data & data) visit(child, data); } } + + if (func_node->window_partition_by) + { + visitChildren(func_node->window_partition_by.get(), data); + } + + if (func_node->window_order_by) + { + visitChildren(func_node->window_order_by.get(), data); + } } else if (!node->as()) { @@ -221,7 +231,7 @@ void QueryNormalizer::visit(ASTPtr & ast, Data & data) if (ast.get() != initial_ast.get()) visit(ast, data); else - visitChildren(ast, data); + visitChildren(ast.get(), data); current_asts.erase(initial_ast.get()); current_asts.erase(ast.get()); diff --git a/src/Interpreters/QueryNormalizer.h b/src/Interpreters/QueryNormalizer.h index 5bd0064c002..e481f76ca8e 100644 --- a/src/Interpreters/QueryNormalizer.h +++ b/src/Interpreters/QueryNormalizer.h @@ -69,7 +69,7 @@ private: static void visit(ASTTablesInSelectQueryElement &, const ASTPtr &, Data &); static void visit(ASTSelectQuery &, const ASTPtr &, Data &); - static void visitChildren(const ASTPtr &, Data & data); + static void visitChildren(IAST * node, Data & data); }; } diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index fc3ad79f217..2b801500958 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -29,6 +29,7 @@ #include #include +#include #include #include @@ -445,6 +446,8 @@ std::vector getAggregates(ASTPtr & query, const ASTSelectQu for (auto & arg : node->arguments->children) { assertNoAggregates(arg, "inside another aggregate function"); + // We also can't have window functions inside aggregate functions, + // because the window functions are calculated later. assertNoWindows(arg, "inside an aggregate function"); } } @@ -454,7 +457,9 @@ std::vector getAggregates(ASTPtr & query, const ASTSelectQu std::vector getWindowFunctions(ASTPtr & query, const ASTSelectQuery & select_query) { - /// There can not be window functions inside the WHERE and PREWHERE. + /// There can not be window functions inside the WHERE, PREWHERE and HAVING + if (select_query.having()) + assertNoWindows(select_query.having(), "in HAVING"); if (select_query.where()) assertNoWindows(select_query.where(), "in WHERE"); if (select_query.prewhere()) @@ -463,17 +468,34 @@ std::vector getWindowFunctions(ASTPtr & query, const ASTSel GetAggregatesVisitor::Data data; GetAggregatesVisitor(data).visit(query); - /// There can not be other window functions within the aggregate functions. + /// Window functions cannot be inside aggregates or other window functions. + /// Aggregate functions can be inside window functions because they are + /// calculated earlier. for (const ASTFunction * node : data.window_functions) { if (node->arguments) { for (auto & arg : node->arguments->children) { - assertNoAggregates(arg, "inside a window function"); assertNoWindows(arg, "inside another window function"); } } + + if (node->window_partition_by) + { + for (auto & arg : node->window_partition_by->children) + { + assertNoWindows(arg, "inside PARTITION BY of a window"); + } + } + + if (node->window_order_by) + { + for (auto & arg : node->window_order_by->children) + { + assertNoWindows(arg, "inside ORDER BY of a window"); + } + } } return data.window_functions; diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp index d5d03a540c9..d9159117b2d 100644 --- a/src/Parsers/ASTFunction.cpp +++ b/src/Parsers/ASTFunction.cpp @@ -39,6 +39,16 @@ void ASTFunction::appendColumnNameImpl(WriteBuffer & ostr) const (*it)->appendColumnName(ostr); } writeChar(')', ostr); + + if (is_window_function) + { + writeCString(" OVER (", ostr); + FormatSettings settings{ostr, true /* one_line */}; + FormatState state; + FormatStateStacked frame; + appendWindowDescription(settings, state, frame); + writeCString(")", ostr); + } } /** Get the text that identifies this element. */ @@ -57,17 +67,20 @@ ASTPtr ASTFunction::clone() const if (window_name) { - res->set(res->window_name, window_name->clone()); + res->window_name = window_name->clone(); + res->children.push_back(res->window_name); } if (window_partition_by) { - res->set(res->window_partition_by, window_partition_by->clone()); + res->window_partition_by = window_partition_by->clone(); + res->children.push_back(res->window_partition_by); } if (window_order_by) { - res->set(res->window_order_by, window_order_by->clone()); + res->window_order_by = window_order_by->clone(); + res->children.push_back(res->window_order_by); } return res; diff --git a/src/Parsers/ASTFunction.h b/src/Parsers/ASTFunction.h index 38e5f3f095c..4c20309fcb9 100644 --- a/src/Parsers/ASTFunction.h +++ b/src/Parsers/ASTFunction.h @@ -21,9 +21,25 @@ public: ASTPtr parameters; bool is_window_function = false; - ASTIdentifier * window_name; - ASTExpressionList * window_partition_by; - ASTExpressionList * window_order_by; + + // We have to make these fields ASTPtr because this is what the visitors + // expect. Some of them take const ASTPtr & (makes no sense), and some + // take ASTPtr & and modify it. I don't understand how the latter is + // compatible with also having an owning `children` array -- apparently it + // leads to some dangling children that are not referenced by the fields of + // the AST class itself. Some older code hints at the idea of having + // ownership in `children` only, and making the class fields to be raw + // pointers of proper type (see e.g. IAST::set), but this is not compatible + // with the visitor interface. + + // ASTIdentifier + ASTPtr window_name; + + // ASTExpressionList + ASTPtr window_partition_by; + + // ASTExpressionList of + ASTPtr window_order_by; /// do not print empty parentheses if there are no args - compatibility with new AST for data types and engine names. bool no_empty_args = false; diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index 726e28005e3..7c82c4aca1e 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -419,7 +419,8 @@ bool ParserWindowDefinition::parseImpl(Pos & pos, ASTPtr & node, Expected & expe ParserIdentifier window_name_parser; if (window_name_parser.parse(pos, window_name_ast, expected)) { - function->set(function->window_name, window_name_ast); + function->children.push_back(window_name_ast); + function->window_name = window_name_ast; return true; } else @@ -442,7 +443,8 @@ bool ParserWindowDefinition::parseImpl(Pos & pos, ASTPtr & node, Expected & expe ASTPtr partition_by_ast; if (columns_partition_by.parse(pos, partition_by_ast, expected)) { - function->set(function->window_partition_by, partition_by_ast); + function->children.push_back(partition_by_ast); + function->window_partition_by = partition_by_ast; } else { @@ -455,7 +457,8 @@ bool ParserWindowDefinition::parseImpl(Pos & pos, ASTPtr & node, Expected & expe ASTPtr order_by_ast; if (columns_order_by.parse(pos, order_by_ast, expected)) { - function->set(function->window_order_by, order_by_ast); + function->children.push_back(order_by_ast); + function->window_order_by = order_by_ast; } else { diff --git a/src/Processors/LimitTransform.cpp b/src/Processors/LimitTransform.cpp index f7043cbfec5..36c58e1454e 100644 --- a/src/Processors/LimitTransform.cpp +++ b/src/Processors/LimitTransform.cpp @@ -237,7 +237,7 @@ LimitTransform::Status LimitTransform::preparePair(PortsData & data) previous_row_chunk = makeChunkWithPreviousRow(data.current_chunk, data.current_chunk.getNumRows() - 1); } else - /// This function may be heavy to execute in prepare. But it happens no more then twice, and make code simpler. + /// This function may be heavy to execute in prepare. But it happens no more than twice, and make code simpler. splitChunk(data); bool may_need_more_data_for_ties = previous_row_chunk || rows_read - rows <= offset + limit; diff --git a/src/Processors/Merges/Algorithms/MergedData.h b/src/Processors/Merges/Algorithms/MergedData.h index 5075174db62..9bf33d72f31 100644 --- a/src/Processors/Merges/Algorithms/MergedData.h +++ b/src/Processors/Merges/Algorithms/MergedData.h @@ -82,7 +82,7 @@ public: if (need_flush) return true; - /// Never return more then max_block_size. + /// Never return more than max_block_size. if (merged_rows >= max_block_size) return true; diff --git a/src/Processors/Pipe.h b/src/Processors/Pipe.h index 065a89d7c0f..2d64de3e664 100644 --- a/src/Processors/Pipe.h +++ b/src/Processors/Pipe.h @@ -71,8 +71,8 @@ public: enum class StreamType { Main = 0, /// Stream for query data. There may be several streams of this type. - Totals, /// Stream for totals. No more then one. - Extremes, /// Stream for extremes. No more then one. + Totals, /// Stream for totals. No more than one. + Extremes, /// Stream for extremes. No more than one. }; using ProcessorGetter = std::function; diff --git a/src/Processors/QueryPlan/IQueryPlanStep.cpp b/src/Processors/QueryPlan/IQueryPlanStep.cpp index 71c4caaa795..f06897e8488 100644 --- a/src/Processors/QueryPlan/IQueryPlanStep.cpp +++ b/src/Processors/QueryPlan/IQueryPlanStep.cpp @@ -46,6 +46,8 @@ static void doDescribeHeader(const Block & header, size_t count, IQueryPlanStep: first = false; elem.dumpNameAndType(settings.out); + settings.out << ": "; + elem.dumpStructure(settings.out); settings.out << '\n'; } } diff --git a/src/Processors/QueryPlan/QueryPlan.cpp b/src/Processors/QueryPlan/QueryPlan.cpp index e88090b4819..1b3ea16a213 100644 --- a/src/Processors/QueryPlan/QueryPlan.cpp +++ b/src/Processors/QueryPlan/QueryPlan.cpp @@ -247,6 +247,15 @@ static void explainStep( step.describeActions(settings); } +std::string debugExplainStep(const IQueryPlanStep & step) +{ + WriteBufferFromOwnString out; + IQueryPlanStep::FormatSettings settings{.out = out}; + QueryPlan::ExplainPlanOptions options{.actions = true}; + explainStep(step, settings, options); + return out.str(); +} + void QueryPlan::explainPlan(WriteBuffer & buffer, const ExplainPlanOptions & options) { checkInitialized(); @@ -488,6 +497,7 @@ static bool tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Node * { auto & parent = parent_node->step; auto & child = child_node->step; + /// TODO: FilterStep auto * parent_expr = typeid_cast(parent.get()); auto * child_expr = typeid_cast(child.get()); diff --git a/src/Processors/QueryPlan/QueryPlan.h b/src/Processors/QueryPlan/QueryPlan.h index cbe487312cc..9d2d7d93a36 100644 --- a/src/Processors/QueryPlan/QueryPlan.h +++ b/src/Processors/QueryPlan/QueryPlan.h @@ -97,4 +97,6 @@ private: std::vector> interpreter_context; }; +std::string debugExplainStep(const IQueryPlanStep & step); + } diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index 6e8b0ea8e39..b200e306213 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -77,6 +77,11 @@ void WindowTransform::transform(Chunk & chunk) ws.argument_columns.clear(); for (const auto column_index : ws.argument_column_indices) { + // Aggregate functions can't work with constant columns, so we have to + // materialize them like the Aggregator does. + columns[column_index] + = std::move(columns[column_index])->convertToFullColumnIfConst(); + ws.argument_columns.push_back(columns[column_index].get()); } diff --git a/src/Storages/MergeTree/MergeTreeIOSettings.h b/src/Storages/MergeTree/MergeTreeIOSettings.h index 185211af84b..2dec16e7d10 100644 --- a/src/Storages/MergeTree/MergeTreeIOSettings.h +++ b/src/Storages/MergeTree/MergeTreeIOSettings.h @@ -46,7 +46,7 @@ struct MergeTreeWriterSettings bool rewrite_primary_key; bool blocks_are_granules_size; - /// Used for AIO threshold comparsion + /// Used for AIO threshold comparison /// FIXME currently doesn't work because WriteBufferAIO contain obscure bug(s) size_t estimated_size = 0; }; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp index c2619317776..db4fe34e702 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeAltersSequence.cpp @@ -26,7 +26,7 @@ void ReplicatedMergeTreeAltersSequence::addMetadataAlter( int alter_version, std::lock_guard & /*state_lock*/) { /// Data alter (mutation) always added before. See ReplicatedMergeTreeQueue::pullLogsToQueue. - /// So mutation alredy added to this sequence or doesn't exist. + /// So mutation already added to this sequence or doesn't exist. if (!queue_state.count(alter_version)) queue_state.emplace(alter_version, AlterState{.metadata_finished=false, .data_finished=true}); else diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index a87b47a0eb7..f1b6c3c7e00 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -6155,7 +6155,7 @@ bool StorageReplicatedMergeTree::dropPart( /// DROP_RANGE with detach will move this part together with source parts to `detached/` dir. entry.type = LogEntry::DROP_RANGE; entry.source_replica = replica_name; - entry.new_part_name = drop_part_info.getPartName(); + entry.new_part_name = getPartNamePossiblyFake(format_version, drop_part_info); entry.detach = detach; entry.create_time = time(nullptr); diff --git a/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py b/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py index 47e325d0e96..c04194c8ebb 100644 --- a/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py +++ b/tests/integration/test_materialize_mysql_database/materialize_with_ddl.py @@ -48,16 +48,15 @@ def dml_with_materialize_mysql_database(clickhouse_node, mysql_node, service_nam "/* Need ClickHouse support read mysql decimal unsigned_decimal DECIMAL(19, 10) UNSIGNED, _decimal DECIMAL(19, 10), */" "unsigned_float FLOAT UNSIGNED, _float FLOAT, " "unsigned_double DOUBLE UNSIGNED, _double DOUBLE, " - "_varchar VARCHAR(10), _char CHAR(10), " + "_varchar VARCHAR(10), _char CHAR(10), binary_col BINARY(8), " "/* Need ClickHouse support Enum('a', 'b', 'v') _enum ENUM('a', 'b', 'c'), */" "_date Date, _datetime DateTime, _timestamp TIMESTAMP, _bool BOOLEAN) ENGINE = InnoDB;") # it already has some data mysql_node.query(""" - INSERT INTO test_database.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', + INSERT INTO test_database.test_table_1 VALUES(1, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary', '2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', true); """) - clickhouse_node.query( "CREATE DATABASE test_database ENGINE = MaterializeMySQL('{}:3306', 'test_database', 'root', 'clickhouse')".format( service_name)) @@ -65,51 +64,51 @@ def dml_with_materialize_mysql_database(clickhouse_node, mysql_node, service_nam assert "test_database" in clickhouse_node.query("SHOW DATABASES") check_query(clickhouse_node, "SELECT * FROM test_database.test_table_1 ORDER BY key FORMAT TSV", - "1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\t2020-01-01\t" + "1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t" "2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n") mysql_node.query(""" - INSERT INTO test_database.test_table_1 VALUES(2, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', + INSERT INTO test_database.test_table_1 VALUES(2, 1, -1, 2, -2, 3, -3, 4, -4, 5, -5, 6, -6, 3.2, -3.2, 3.4, -3.4, 'varchar', 'char', 'binary', '2020-01-01', '2020-01-01 00:00:00', '2020-01-01 00:00:00', false); """) check_query(clickhouse_node, "SELECT * FROM test_database.test_table_1 ORDER BY key FORMAT TSV", - "1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\t2020-01-01\t" + "1\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t" "2020-01-01 00:00:00\t2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t" - "varchar\tchar\t2020-01-01\t2020-01-01 00:00:00\t2020-01-01 00:00:00\t0\n") + "varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t2020-01-01 00:00:00\t0\n") mysql_node.query("UPDATE test_database.test_table_1 SET unsigned_tiny_int = 2 WHERE `key` = 1") check_query(clickhouse_node, """ SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int, small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, - unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, + unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ _bool FROM test_database.test_table_1 ORDER BY key FORMAT TSV """, - "1\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\t2020-01-01\t" + "1\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t" "2020-01-01 00:00:00\t1\n2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t" - "varchar\tchar\t2020-01-01\t2020-01-01 00:00:00\t0\n") + "varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n") # update primary key mysql_node.query("UPDATE test_database.test_table_1 SET `key` = 3 WHERE `unsigned_tiny_int` = 2") check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int," " small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, " - " unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, " + " unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, " " _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ " " _bool FROM test_database.test_table_1 ORDER BY key FORMAT TSV", "2\t1\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\t" - "varchar\tchar\t2020-01-01\t2020-01-01 00:00:00\t0\n3\t2\t-1\t2\t-2\t3\t-3\t" - "4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\t2020-01-01\t2020-01-01 00:00:00\t1\n") + "varchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t0\n3\t2\t-1\t2\t-2\t3\t-3\t" + "4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t2020-01-01 00:00:00\t1\n") mysql_node.query('DELETE FROM test_database.test_table_1 WHERE `key` = 2') check_query(clickhouse_node, "SELECT key, unsigned_tiny_int, tiny_int, unsigned_small_int," " small_int, unsigned_medium_int, medium_int, unsigned_int, _int, unsigned_integer, _integer, " - " unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, " + " unsigned_bigint, _bigint, unsigned_float, _float, unsigned_double, _double, _varchar, _char, binary_col, " " _date, _datetime, /* exclude it, because ON UPDATE CURRENT_TIMESTAMP _timestamp, */ " " _bool FROM test_database.test_table_1 ORDER BY key FORMAT TSV", - "3\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\t2020-01-01\t" + "3\t2\t-1\t2\t-2\t3\t-3\t4\t-4\t5\t-5\t6\t-6\t3.2\t-3.2\t3.4\t-3.4\tvarchar\tchar\tbinary\\0\\0\t2020-01-01\t" "2020-01-01 00:00:00\t1\n") mysql_node.query('DELETE FROM test_database.test_table_1 WHERE `unsigned_tiny_int` = 2') diff --git a/tests/integration/test_merge_tree_empty_parts/__init__.py b/tests/integration/test_merge_tree_empty_parts/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_merge_tree_empty_parts/configs/cleanup_thread.xml b/tests/integration/test_merge_tree_empty_parts/configs/cleanup_thread.xml new file mode 100644 index 00000000000..943662aad67 --- /dev/null +++ b/tests/integration/test_merge_tree_empty_parts/configs/cleanup_thread.xml @@ -0,0 +1,6 @@ + + + 0 + 0 + + \ No newline at end of file diff --git a/tests/integration/test_merge_tree_empty_parts/configs/remote_servers.xml b/tests/integration/test_merge_tree_empty_parts/configs/remote_servers.xml new file mode 100644 index 00000000000..e7369160a81 --- /dev/null +++ b/tests/integration/test_merge_tree_empty_parts/configs/remote_servers.xml @@ -0,0 +1,13 @@ + + + + + true + + node1 + 9000 + + + + + diff --git a/tests/integration/test_merge_tree_empty_parts/test.py b/tests/integration/test_merge_tree_empty_parts/test.py new file mode 100644 index 00000000000..bc2679d4c92 --- /dev/null +++ b/tests/integration/test_merge_tree_empty_parts/test.py @@ -0,0 +1,38 @@ +import pytest +import helpers.client +import helpers.cluster +from helpers.test_tools import assert_eq_with_retry + + +cluster = helpers.cluster.ClickHouseCluster(__file__) +node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml', 'configs/cleanup_thread.xml'], with_zookeeper=True) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + +def test_empty_parts_alter_delete(started_cluster): + node1.query("CREATE TABLE empty_parts_delete (d Date, key UInt64, value String) \ + ENGINE = ReplicatedMergeTree('/clickhouse/tables/empty_parts_delete', 'r1', d, key, 8192)") + + node1.query("INSERT INTO empty_parts_delete VALUES (toDate('2020-10-10'), 1, 'a')") + node1.query("ALTER TABLE empty_parts_delete DELETE WHERE 1 SETTINGS mutations_sync = 2") + + print(node1.query("SELECT count() FROM empty_parts_delete")) + assert_eq_with_retry(node1, "SELECT count() FROM system.parts WHERE table = 'empty_parts_delete' AND active", "0") + +def test_empty_parts_summing(started_cluster): + node1.query("CREATE TABLE empty_parts_summing (d Date, key UInt64, value Int64) \ + ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/empty_parts_summing', 'r1', d, key, 8192)") + + node1.query("INSERT INTO empty_parts_summing VALUES (toDate('2020-10-10'), 1, 1)") + node1.query("INSERT INTO empty_parts_summing VALUES (toDate('2020-10-10'), 1, -1)") + node1.query("OPTIMIZE TABLE empty_parts_summing FINAL") + + assert_eq_with_retry(node1, "SELECT count() FROM system.parts WHERE table = 'empty_parts_summing' AND active", "0") diff --git a/tests/integration/test_select_access_rights/__init__.py b/tests/integration/test_select_access_rights/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_select_access_rights/test.py b/tests/integration/test_select_access_rights/test.py new file mode 100644 index 00000000000..ccea77d6fb7 --- /dev/null +++ b/tests/integration/test_select_access_rights/test.py @@ -0,0 +1,157 @@ +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.test_tools import TSV + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance('instance') + + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def cleanup_after_test(): + instance.query("CREATE USER OR REPLACE A") + yield + instance.query("DROP TABLE IF EXISTS table1") + instance.query("DROP TABLE IF EXISTS table2") + + +def test_select_single_column(): + instance.query("CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d") + + select_query = "SELECT a FROM table1" + assert "it's necessary to have grant SELECT(a) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(a) ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "" + + instance.query("REVOKE SELECT(a) ON default.table1 FROM A") + assert "it's necessary to have grant SELECT(a) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + +def test_select_single_column_with_table_grant(): + instance.query("CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d") + + select_query = "SELECT a FROM table1" + assert "it's necessary to have grant SELECT(a) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "" + + instance.query("REVOKE SELECT(a) ON default.table1 FROM A") + assert "it's necessary to have grant SELECT(a) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + +def test_select_all_columns(): + instance.query("CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d") + + select_query = "SELECT * FROM table1" + assert "it's necessary to have grant SELECT(d, a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(d) ON default.table1 TO A") + assert "it's necessary to have grant SELECT(d, a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(a) ON default.table1 TO A") + assert "it's necessary to have grant SELECT(d, a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(b) ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "" + + +def test_select_all_columns_with_table_grant(): + instance.query("CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d") + + select_query = "SELECT * FROM table1" + assert "it's necessary to have grant SELECT(d, a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "" + + +def test_alias(): + instance.query("CREATE TABLE table1(x Int32, y Int32) ENGINE = MergeTree ORDER BY tuple()") + + select_query = "SELECT x, y, x + y AS s FROM table1" + assert "it's necessary to have grant SELECT(x, y) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(x, y) ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "" + + +def test_alias_columns(): + instance.query("CREATE TABLE table1(x Int32, y Int32, s Int32 ALIAS x + y) ENGINE = MergeTree ORDER BY tuple()") + + select_query = "SELECT * FROM table1" + assert "it's necessary to have grant SELECT(x, y) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(x,y) ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "" + + select_query = "SELECT s FROM table1" + assert "it's necessary to have grant SELECT(s) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(s) ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "" + + instance.query("REVOKE SELECT(x,y) ON default.table1 FROM A") + assert instance.query(select_query, user = 'A') == "" + + +def test_materialized_columns(): + instance.query("CREATE TABLE table1(x Int32, y Int32, p Int32 MATERIALIZED x * y) ENGINE = MergeTree ORDER BY tuple()") + + select_query = "SELECT * FROM table1" + assert "it's necessary to have grant SELECT(x, y) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(x,y) ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "" + + select_query = "SELECT p FROM table1" + assert "it's necessary to have grant SELECT(p) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(p) ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "" + + instance.query("REVOKE SELECT(x,y) ON default.table1 FROM A") + assert instance.query(select_query, user = 'A') == "" + + +def test_select_join(): + instance.query("CREATE TABLE table1(d DATE, a String, b UInt8) ENGINE = MergeTree ORDER BY d") + instance.query("CREATE TABLE table2(d DATE, x UInt32, y UInt8) ENGINE = MergeTree ORDER BY d") + + select_query = "SELECT * FROM table1 JOIN table2 USING(d)" + assert "it's necessary to have grant SELECT(d, x, y) ON default.table2" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(d, x, y) ON default.table2 TO A") + assert "it's necessary to have grant SELECT(d, a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(d, a, b) ON default.table1 TO A") + assert instance.query(select_query, user = 'A') == "" + + instance.query("REVOKE SELECT ON default.table2 FROM A") + assert "it's necessary to have grant SELECT(d, x, y) ON default.table2" in instance.query_and_get_error(select_query, user = 'A') + + +def test_select_union(): + instance.query("CREATE TABLE table1(a String, b UInt8) ENGINE = MergeTree ORDER BY tuple()") + instance.query("CREATE TABLE table2(a String, b UInt8) ENGINE = MergeTree ORDER BY tuple()") + + select_query = "SELECT * FROM table1 UNION ALL SELECT * FROM table2" + assert "it's necessary to have grant SELECT(a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(a, b) ON default.table1 TO A") + assert "it's necessary to have grant SELECT(a, b) ON default.table2" in instance.query_and_get_error(select_query, user = 'A') + + instance.query("GRANT SELECT(a, b) ON default.table2 TO A") + assert instance.query(select_query, user = 'A') == "" + + instance.query("REVOKE SELECT ON default.table1 FROM A") + assert "it's necessary to have grant SELECT(a, b) ON default.table1" in instance.query_and_get_error(select_query, user = 'A') diff --git a/tests/integration/test_storage_mysql/test.py b/tests/integration/test_storage_mysql/test.py index 87033381e2c..7b23e20e200 100644 --- a/tests/integration/test_storage_mysql/test.py +++ b/tests/integration/test_storage_mysql/test.py @@ -148,6 +148,13 @@ def test_table_function(started_cluster): assert node1.query("SELECT sum(`money`) FROM {}".format(table_function)).rstrip() == '60000' conn.close() +def test_binary_type(started_cluster): + conn = get_mysql_conn() + with conn.cursor() as cursor: + cursor.execute("CREATE TABLE clickhouse.binary_type (id INT PRIMARY KEY, data BINARY(16) NOT NULL)") + table_function = "mysql('mysql1:3306', 'clickhouse', '{}', 'root', 'clickhouse')".format('binary_type') + node1.query("INSERT INTO {} VALUES (42, 'clickhouse')".format('TABLE FUNCTION ' + table_function)) + assert node1.query("SELECT * FROM {}".format(table_function)) == '42\tclickhouse\\0\\0\\0\\0\\0\\0\n' def test_enum_type(started_cluster): table_name = 'test_enum_type' diff --git a/tests/performance/window_functions.xml b/tests/performance/window_functions.xml new file mode 100644 index 00000000000..f42345d0696 --- /dev/null +++ b/tests/performance/window_functions.xml @@ -0,0 +1,38 @@ + + + hits_100m_single + + + + 1 + + + + + + + + diff --git a/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh b/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh index 3f09e256b02..86f1d1f161c 100755 --- a/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh +++ b/tests/queries/0_stateless/00029_test_zookeeper_optimize_exception.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/00039_inserts_through_http.sh b/tests/queries/0_stateless/00039_inserts_through_http.sh index 35abcd166d7..2eaa4393935 100755 --- a/tests/queries/0_stateless/00039_inserts_through_http.sh +++ b/tests/queries/0_stateless/00039_inserts_through_http.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo 'DROP TABLE IF EXISTS long_insert' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}" -d @- diff --git a/tests/queries/0_stateless/00070_insert_fewer_columns_http.sh b/tests/queries/0_stateless/00070_insert_fewer_columns_http.sh index 0cf5f95d3d9..52076767981 100755 --- a/tests/queries/0_stateless/00070_insert_fewer_columns_http.sh +++ b/tests/queries/0_stateless/00070_insert_fewer_columns_http.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo 'DROP TABLE IF EXISTS insert_fewer_columns' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}" -d @- diff --git a/tests/queries/0_stateless/00090_union_race_conditions_1.sh b/tests/queries/0_stateless/00090_union_race_conditions_1.sh index a2da4c461dd..afec8b5bac9 100755 --- a/tests/queries/0_stateless/00090_union_race_conditions_1.sh +++ b/tests/queries/0_stateless/00090_union_race_conditions_1.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -o errexit diff --git a/tests/queries/0_stateless/00091_union_race_conditions_2.sh b/tests/queries/0_stateless/00091_union_race_conditions_2.sh index 94df04e824a..78a6cca2b2b 100755 --- a/tests/queries/0_stateless/00091_union_race_conditions_2.sh +++ b/tests/queries/0_stateless/00091_union_race_conditions_2.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -o errexit diff --git a/tests/queries/0_stateless/00092_union_race_conditions_3.sh b/tests/queries/0_stateless/00092_union_race_conditions_3.sh index 2d74f853ecd..9be2613f70a 100755 --- a/tests/queries/0_stateless/00092_union_race_conditions_3.sh +++ b/tests/queries/0_stateless/00092_union_race_conditions_3.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -o errexit diff --git a/tests/queries/0_stateless/00093_union_race_conditions_4.sh b/tests/queries/0_stateless/00093_union_race_conditions_4.sh index d33b2aacc06..ab1a025c5b2 100755 --- a/tests/queries/0_stateless/00093_union_race_conditions_4.sh +++ b/tests/queries/0_stateless/00093_union_race_conditions_4.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -o errexit diff --git a/tests/queries/0_stateless/00094_union_race_conditions_5.sh b/tests/queries/0_stateless/00094_union_race_conditions_5.sh index 774c4f1e54e..b546b6a0092 100755 --- a/tests/queries/0_stateless/00094_union_race_conditions_5.sh +++ b/tests/queries/0_stateless/00094_union_race_conditions_5.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -o errexit diff --git a/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.sh b/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.sh index 26502247055..b3c0b37e0c0 100755 --- a/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.sh +++ b/tests/queries/0_stateless/00097_long_storage_buffer_race_condition.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh [ "$NO_SHELL_CONFIG" ] || . "$CURDIR"/../shell_config.sh seq 1 1000 | sed -r 's/.+/CREATE TABLE IF NOT EXISTS buf_00097 (a UInt8) ENGINE = Buffer('$CLICKHOUSE_DATABASE', b, 1, 1, 1, 1, 1, 1, 1); DROP TABLE buf_00097;/' | $CLICKHOUSE_CLIENT -n diff --git a/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.sh b/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.sh index b0cc62cc652..3856da7f214 100755 --- a/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.sh +++ b/tests/queries/0_stateless/00097_long_storage_buffer_race_condition_mt.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh export NO_SHELL_CONFIG=1 diff --git a/tests/queries/0_stateless/00100_subquery_table_identifier.sh b/tests/queries/0_stateless/00100_subquery_table_identifier.sh index 2a42e9a0e70..e20939bc992 100755 --- a/tests/queries/0_stateless/00100_subquery_table_identifier.sh +++ b/tests/queries/0_stateless/00100_subquery_table_identifier.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="SELECT sum(dummy) FROM remote('localhost', system, one) WHERE 1 GLOBAL IN (SELECT 1)" diff --git a/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh b/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh index 855c5f8ac86..1348989e244 100755 --- a/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh +++ b/tests/queries/0_stateless/00115_shard_in_incomplete_result.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -o errexit diff --git a/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh b/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh index 7dbd662c54b..05ebe9d19a8 100755 --- a/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh +++ b/tests/queries/0_stateless/00133_long_shard_memory_tracker_and_exception_safety.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -n --query=" diff --git a/tests/queries/0_stateless/00155_long_merges.sh b/tests/queries/0_stateless/00155_long_merges.sh index 863575daf11..c2aafaf0c95 100755 --- a/tests/queries/0_stateless/00155_long_merges.sh +++ b/tests/queries/0_stateless/00155_long_merges.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh function create { diff --git a/tests/queries/0_stateless/00177_inserts_through_http_parts.sh b/tests/queries/0_stateless/00177_inserts_through_http_parts.sh index bae77c56e13..72a9d4fa16f 100755 --- a/tests/queries/0_stateless/00177_inserts_through_http_parts.sh +++ b/tests/queries/0_stateless/00177_inserts_through_http_parts.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=DROP+TABLE" -d 'IF EXISTS insert' diff --git a/tests/queries/0_stateless/00186_very_long_arrays.sh b/tests/queries/0_stateless/00186_very_long_arrays.sh index 24ff6e098b6..26a4496a85b 100755 --- a/tests/queries/0_stateless/00186_very_long_arrays.sh +++ b/tests/queries/0_stateless/00186_very_long_arrays.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh (echo 'SELECT number FROM system.numbers WHERE transform(number, ['; seq 1 100000 | tr '\n' ','; echo '0],['; seq 1 100000 | tr '\n' ','; echo '0]) = 10000000 LIMIT 1';) | $CLICKHOUSE_CLIENT --max_query_size=100000000 diff --git a/tests/queries/0_stateless/00210_insert_select_extremes_http.sh b/tests/queries/0_stateless/00210_insert_select_extremes_http.sh index 5a944c58466..e9c6257d848 100755 --- a/tests/queries/0_stateless/00210_insert_select_extremes_http.sh +++ b/tests/queries/0_stateless/00210_insert_select_extremes_http.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1" -d @- <<< "DROP TABLE IF EXISTS test_00210" diff --git a/tests/queries/0_stateless/00265_http_content_type_format_timezone.sh b/tests/queries/0_stateless/00265_http_content_type_format_timezone.sh index 9e92a997f51..74cbbe7f71d 100755 --- a/tests/queries/0_stateless/00265_http_content_type_format_timezone.sh +++ b/tests/queries/0_stateless/00265_http_content_type_format_timezone.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh CLICKHOUSE_TIMEZONE_ESCAPED=$($CLICKHOUSE_CLIENT --query="SELECT timezone()" | sed 's/[]\/$*.^+:()[]/\\&/g') diff --git a/tests/queries/0_stateless/00301_csv.sh b/tests/queries/0_stateless/00301_csv.sh index 6d9c5fc4fed..0aee9abe25c 100755 --- a/tests/queries/0_stateless/00301_csv.sh +++ b/tests/queries/0_stateless/00301_csv.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS csv"; diff --git a/tests/queries/0_stateless/00302_http_compression.sh b/tests/queries/0_stateless/00302_http_compression.sh index 1727d5ab993..829475e8602 100755 --- a/tests/queries/0_stateless/00302_http_compression.sh +++ b/tests/queries/0_stateless/00302_http_compression.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&enable_http_compression=1" -d 'SELECT number FROM system.numbers LIMIT 10'; diff --git a/tests/queries/0_stateless/00304_http_external_data.sh b/tests/queries/0_stateless/00304_http_external_data.sh index e088540120c..41a9dea1ebb 100755 --- a/tests/queries/0_stateless/00304_http_external_data.sh +++ b/tests/queries/0_stateless/00304_http_external_data.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo -ne '1,Hello\n2,World\n' | ${CLICKHOUSE_CURL} -sSF 'file=@-' "${CLICKHOUSE_URL}&query=SELECT+*+FROM+file&file_format=CSV&file_types=UInt8,String"; diff --git a/tests/queries/0_stateless/00305_http_and_readonly.sh b/tests/queries/0_stateless/00305_http_and_readonly.sh index 969f3b42e92..dd9f116be7a 100755 --- a/tests/queries/0_stateless/00305_http_and_readonly.sh +++ b/tests/queries/0_stateless/00305_http_and_readonly.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # POST permits everything. diff --git a/tests/queries/0_stateless/00310_tskv.sh b/tests/queries/0_stateless/00310_tskv.sh index 419bb7d228d..73b6581ac0d 100755 --- a/tests/queries/0_stateless/00310_tskv.sh +++ b/tests/queries/0_stateless/00310_tskv.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS tskv"; diff --git a/tests/queries/0_stateless/00313_const_totals_extremes.sh b/tests/queries/0_stateless/00313_const_totals_extremes.sh index eff01bfd511..0c51d80abe8 100755 --- a/tests/queries/0_stateless/00313_const_totals_extremes.sh +++ b/tests/queries/0_stateless/00313_const_totals_extremes.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&extremes=1&output_format_write_statistics=0" -d "SELECT 1 AS k, count() GROUP BY k WITH TOTALS"; diff --git a/tests/queries/0_stateless/00322_disable_checksumming.sh b/tests/queries/0_stateless/00322_disable_checksumming.sh index f06b8609d01..e04ec076f80 100755 --- a/tests/queries/0_stateless/00322_disable_checksumming.sh +++ b/tests/queries/0_stateless/00322_disable_checksumming.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo -ne '\x50\x74\x32\xf2\x59\xe9\x8a\xdb\x37\xc6\x4a\xa7\xfb\x22\xc4\x39''\x82\x13\x00\x00\x00\x09\x00\x00\x00''\x90SELECT 1\n' | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&decompress=1" --data-binary @- diff --git a/tests/queries/0_stateless/00335_bom.sh b/tests/queries/0_stateless/00335_bom.sh index 75622f0ba42..b8bcbb7d635 100755 --- a/tests/queries/0_stateless/00335_bom.sh +++ b/tests/queries/0_stateless/00335_bom.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo 'DROP TABLE IF EXISTS bom' | ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" --data-binary @- diff --git a/tests/queries/0_stateless/00336_shard_stack_trace.sh b/tests/queries/0_stateless/00336_shard_stack_trace.sh index a096d7a71c6..19389ec11c1 100755 --- a/tests/queries/0_stateless/00336_shard_stack_trace.sh +++ b/tests/queries/0_stateless/00336_shard_stack_trace.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'SELECT a' | wc -l diff --git a/tests/queries/0_stateless/00339_parsing_bad_arrays.sh b/tests/queries/0_stateless/00339_parsing_bad_arrays.sh index 2794cceb65d..51ffd8f9814 100755 --- a/tests/queries/0_stateless/00339_parsing_bad_arrays.sh +++ b/tests/queries/0_stateless/00339_parsing_bad_arrays.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -d 'DROP TABLE IF EXISTS bad_arrays' diff --git a/tests/queries/0_stateless/00354_host_command_line_option.sh b/tests/queries/0_stateless/00354_host_command_line_option.sh index 227908a7318..9d0d4d59bee 100755 --- a/tests/queries/0_stateless/00354_host_command_line_option.sh +++ b/tests/queries/0_stateless/00354_host_command_line_option.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh clickhouse_client_removed_host_parameter --host="${CLICKHOUSE_HOST}" --query="SELECT 1"; diff --git a/tests/queries/0_stateless/00365_statistics_in_formats.sh b/tests/queries/0_stateless/00365_statistics_in_formats.sh index d395f543ce7..724a5dc5fde 100755 --- a/tests/queries/0_stateless/00365_statistics_in_formats.sh +++ b/tests/queries/0_stateless/00365_statistics_in_formats.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS numbers"; diff --git a/tests/queries/0_stateless/00366_multi_statements.sh b/tests/queries/0_stateless/00366_multi_statements.sh index e8daffa79b1..9b885bb1b32 100755 --- a/tests/queries/0_stateless/00366_multi_statements.sh +++ b/tests/queries/0_stateless/00366_multi_statements.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="SELECT 1" diff --git a/tests/queries/0_stateless/00368_format_option_collision.sh b/tests/queries/0_stateless/00368_format_option_collision.sh index 425a90a1301..323de47428c 100755 --- a/tests/queries/0_stateless/00368_format_option_collision.sh +++ b/tests/queries/0_stateless/00368_format_option_collision.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh clickhouse_client_removed_host_parameter --host="${CLICKHOUSE_HOST}" --query="SELECT * FROM ext" --format=Vertical --external --file=- --structure="s String" --name=ext --format=JSONEachRow <<< '{"s":"Hello"}' diff --git a/tests/queries/0_stateless/00372_cors_header.sh b/tests/queries/0_stateless/00372_cors_header.sh index 6eec46fd942..8af6ee09876 100755 --- a/tests/queries/0_stateless/00372_cors_header.sh +++ b/tests/queries/0_stateless/00372_cors_header.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&add_http_cors_header=1" -H "Origin:smi2.ru" --data-binary @- <<< "SELECT 1" 2>&1 | grep -F "< Access-Control-Allow-Origin: *" | wc -l diff --git a/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.sh b/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.sh index c614b35bdfd..c9c53dedd69 100755 --- a/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.sh +++ b/tests/queries/0_stateless/00374_json_each_row_input_with_noisy_fields.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS json_noisy" diff --git a/tests/queries/0_stateless/00379_system_processes_port.sh b/tests/queries/0_stateless/00379_system_processes_port.sh index edbcea8c4f8..99413af98cf 100755 --- a/tests/queries/0_stateless/00379_system_processes_port.sh +++ b/tests/queries/0_stateless/00379_system_processes_port.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -sS --local-port 1390 "${CLICKHOUSE_URL}&query_id=my_id&query=SELECT+port+FROM+system.processes+WHERE+query_id%3D%27my_id%27+ORDER+BY+elapsed+LIMIT+1" diff --git a/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh b/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh index d58928ecfeb..d24d029c4e7 100755 --- a/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh +++ b/tests/queries/0_stateless/00380_client_break_at_exception_in_batch_mode.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --multiquery --query="SELECT 1; SELECT xyz; SELECT 2;" 2> /dev/null || true; diff --git a/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh b/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh index 7f6d5d32a46..ef0ec1ae842 100755 --- a/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh +++ b/tests/queries/0_stateless/00385_storage_file_and_clickhouse-local_app.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh TABLE_HASH="cityHash64(groupArray(cityHash64(*)))" diff --git a/tests/queries/0_stateless/00386_long_in_pk.sh b/tests/queries/0_stateless/00386_long_in_pk.sh index 8cad8f93a13..66cc4ccc227 100755 --- a/tests/queries/0_stateless/00386_long_in_pk.sh +++ b/tests/queries/0_stateless/00386_long_in_pk.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # We should have correct env vars from shell_config.sh to run this test diff --git a/tests/queries/0_stateless/00387_use_client_time_zone.sh b/tests/queries/0_stateless/00387_use_client_time_zone.sh index a86f866a6ad..201277b76d6 100755 --- a/tests/queries/0_stateless/00387_use_client_time_zone.sh +++ b/tests/queries/0_stateless/00387_use_client_time_zone.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh env TZ=UTC ${CLICKHOUSE_CLIENT} --use_client_time_zone=1 --query="SELECT toDateTime(1000000000)" diff --git a/tests/queries/0_stateless/00400_client_external_options.sh b/tests/queries/0_stateless/00400_client_external_options.sh index d519ca16a63..c2c6f44d62e 100755 --- a/tests/queries/0_stateless/00400_client_external_options.sh +++ b/tests/queries/0_stateless/00400_client_external_options.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo -ne "1\n2\n3\n" | $CLICKHOUSE_CLIENT --query="SELECT * FROM _data" --external --file=- --types=Int8; diff --git a/tests/queries/0_stateless/00405_PrettyCompactMonoBlock.sh b/tests/queries/0_stateless/00405_PrettyCompactMonoBlock.sh index a353ce5ea5c..fb89199acb1 100755 --- a/tests/queries/0_stateless/00405_PrettyCompactMonoBlock.sh +++ b/tests/queries/0_stateless/00405_PrettyCompactMonoBlock.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo 'one block' diff --git a/tests/queries/0_stateless/00407_parsing_nulls.sh b/tests/queries/0_stateless/00407_parsing_nulls.sh index 8618d4da8de..cc627dbb97c 100755 --- a/tests/queries/0_stateless/00407_parsing_nulls.sh +++ b/tests/queries/0_stateless/00407_parsing_nulls.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo -ne '\\tHello\t123\t\\N\n\\N\t\t2000-01-01 00:00:00\n' | ${CLICKHOUSE_LOCAL} --input-format=TabSeparated --output-format=TabSeparated --structure='s Nullable(String), x Nullable(UInt64), t Nullable(DateTime)' --query="SELECT * FROM table" diff --git a/tests/queries/0_stateless/00408_http_keep_alive.sh b/tests/queries/0_stateless/00408_http_keep_alive.sh index d0c14720cd8..4bd0e494eb8 100755 --- a/tests/queries/0_stateless/00408_http_keep_alive.sh +++ b/tests/queries/0_stateless/00408_http_keep_alive.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh URL="${CLICKHOUSE_PORT_HTTP_PROTO}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}/" diff --git a/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.sh index 17d0c7564e3..d03e02efc55 100755 --- a/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.sh +++ b/tests/queries/0_stateless/00411_long_accurate_number_comparison_float.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # We should have correct env vars from shell_config.sh to run this test diff --git a/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.sh index 43d9d550ddf..c2c5491ed90 100755 --- a/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.sh +++ b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int1.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # We should have correct env vars from shell_config.sh to run this test diff --git a/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.sh index 34aaf9ef7ed..539bdd297bd 100755 --- a/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.sh +++ b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int2.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # We should have correct env vars from shell_config.sh to run this test diff --git a/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.sh index 139792944ee..a0e25e8cb0a 100755 --- a/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.sh +++ b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int3.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # We should have correct env vars from shell_config.sh to run this test diff --git a/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.sh b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.sh index f57099e77ca..b09a05baf69 100755 --- a/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.sh +++ b/tests/queries/0_stateless/00411_long_accurate_number_comparison_int4.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # We should have correct env vars from shell_config.sh to run this test diff --git a/tests/queries/0_stateless/00415_into_outfile.sh b/tests/queries/0_stateless/00415_into_outfile.sh index 6ceeb7297d6..77dc96a48e6 100755 --- a/tests/queries/0_stateless/00415_into_outfile.sh +++ b/tests/queries/0_stateless/00415_into_outfile.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh function perform() diff --git a/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh b/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh index 31f5e0f0f43..5d9cd12e4bf 100755 --- a/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh +++ b/tests/queries/0_stateless/00416_pocopatch_progress_in_http_headers.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -vsS "${CLICKHOUSE_URL}&max_block_size=5&send_progress_in_http_headers=1&http_headers_progress_interval_ms=0" -d 'SELECT max(number) FROM numbers(10)' 2>&1 | grep -E 'Content-Encoding|X-ClickHouse-Progress|^[0-9]' diff --git a/tests/queries/0_stateless/00417_kill_query.sh b/tests/queries/0_stateless/00417_kill_query.sh index cb08f8c009c..ce4c5851762 100755 --- a/tests/queries/0_stateless/00417_kill_query.sh +++ b/tests/queries/0_stateless/00417_kill_query.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh QUERY_FIELND_NUM=4 diff --git a/tests/queries/0_stateless/00417_system_build_options.sh b/tests/queries/0_stateless/00417_system_build_options.sh index 34f9d4dfc5d..bfdfa7d14ce 100755 --- a/tests/queries/0_stateless/00417_system_build_options.sh +++ b/tests/queries/0_stateless/00417_system_build_options.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="SELECT * FROM system.build_options" | perl -lnE 'print $1 if /(BUILD_DATE|BUILD_TYPE|CXX_COMPILER)\s+\S+/ || /(CXX_FLAGS|LINK_FLAGS|TZDATA_VERSION)/'; diff --git a/tests/queries/0_stateless/00418_input_format_allow_errors.sh b/tests/queries/0_stateless/00418_input_format_allow_errors.sh index 762e35fa8cd..b27c6f3fe29 100755 --- a/tests/queries/0_stateless/00418_input_format_allow_errors.sh +++ b/tests/queries/0_stateless/00418_input_format_allow_errors.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS formats_test" diff --git a/tests/queries/0_stateless/00419_show_sql_queries.sh b/tests/queries/0_stateless/00419_show_sql_queries.sh index a76e9020db0..1737e874ff2 100755 --- a/tests/queries/0_stateless/00419_show_sql_queries.sh +++ b/tests/queries/0_stateless/00419_show_sql_queries.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "SHOW PROCESSLIST" &>/dev/null diff --git a/tests/queries/0_stateless/00421_storage_merge__table_index.sh b/tests/queries/0_stateless/00421_storage_merge__table_index.sh index 769be11f56b..c4af0528f7d 100755 --- a/tests/queries/0_stateless/00421_storage_merge__table_index.sh +++ b/tests/queries/0_stateless/00421_storage_merge__table_index.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh for i in $(seq -w 0 2 20); do diff --git a/tests/queries/0_stateless/00427_alter_primary_key.sh b/tests/queries/0_stateless/00427_alter_primary_key.sh index 352263aad40..4ad1166bfa4 100755 --- a/tests/queries/0_stateless/00427_alter_primary_key.sh +++ b/tests/queries/0_stateless/00427_alter_primary_key.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh function perform() diff --git a/tests/queries/0_stateless/00429_long_http_bufferization.sh b/tests/queries/0_stateless/00429_long_http_bufferization.sh index 83c9992218d..aab9aeba937 100755 --- a/tests/queries/0_stateless/00429_long_http_bufferization.sh +++ b/tests/queries/0_stateless/00429_long_http_bufferization.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh function query { diff --git a/tests/queries/0_stateless/00430_https_server.sh b/tests/queries/0_stateless/00430_https_server.sh index f562f108791..10df01e2c93 100755 --- a/tests/queries/0_stateless/00430_https_server.sh +++ b/tests/queries/0_stateless/00430_https_server.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # TODO: enable this test with self-signed server cert diff --git a/tests/queries/0_stateless/00443_optimize_final_vertical_merge.sh b/tests/queries/0_stateless/00443_optimize_final_vertical_merge.sh index cc19a608c20..32633a458cc 100755 --- a/tests/queries/0_stateless/00443_optimize_final_vertical_merge.sh +++ b/tests/queries/0_stateless/00443_optimize_final_vertical_merge.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh table="optimize_me_finally" diff --git a/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh index be986a72457..724630057d9 100755 --- a/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh +++ b/tests/queries/0_stateless/00443_preferred_block_size_bytes.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS preferred_block_size_bytes" diff --git a/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh b/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh index 97830b828b6..60de1822318 100755 --- a/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh +++ b/tests/queries/0_stateless/00446_clear_column_in_partition_concurrent_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ch="$CLICKHOUSE_CLIENT --stacktrace -q" diff --git a/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh index c49924741df..d3bdf32db74 100755 --- a/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh +++ b/tests/queries/0_stateless/00463_long_sessions_in_http_interface.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh request() { diff --git a/tests/queries/0_stateless/00473_output_format_json_quote_denormals.sh b/tests/queries/0_stateless/00473_output_format_json_quote_denormals.sh index 46f051639d1..47f5c698d17 100755 --- a/tests/queries/0_stateless/00473_output_format_json_quote_denormals.sh +++ b/tests/queries/0_stateless/00473_output_format_json_quote_denormals.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="select 1/0, -1/0, sqrt(-1), -sqrt(-1) format JSON" --output_format_json_quote_denormals=0 | grep -o null diff --git a/tests/queries/0_stateless/00474_readonly_settings.sh b/tests/queries/0_stateless/00474_readonly_settings.sh index 04134c24be7..0edde9f12ed 100755 --- a/tests/queries/0_stateless/00474_readonly_settings.sh +++ b/tests/queries/0_stateless/00474_readonly_settings.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="select toUInt64(pow(2, 62)) as value format JSON" --output_format_json_quote_64bit_integers=0 | grep value diff --git a/tests/queries/0_stateless/00485_http_insert_format.sh b/tests/queries/0_stateless/00485_http_insert_format.sh index 358fb09bf67..b5bf36e6c36 100755 --- a/tests/queries/0_stateless/00485_http_insert_format.sh +++ b/tests/queries/0_stateless/00485_http_insert_format.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS format" diff --git a/tests/queries/0_stateless/00497_whitespaces_in_insert.sh b/tests/queries/0_stateless/00497_whitespaces_in_insert.sh index 6869d79420e..7a799792dd0 100755 --- a/tests/queries/0_stateless/00497_whitespaces_in_insert.sh +++ b/tests/queries/0_stateless/00497_whitespaces_in_insert.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS ws"; diff --git a/tests/queries/0_stateless/00501_http_head.sh b/tests/queries/0_stateless/00501_http_head.sh index 318e55fdcee..60283f26833 100755 --- a/tests/queries/0_stateless/00501_http_head.sh +++ b/tests/queries/0_stateless/00501_http_head.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ( ${CLICKHOUSE_CURL} -s --head "${CLICKHOUSE_URL}&query=SELECT%201"; diff --git a/tests/queries/0_stateless/00504_insert_miss_columns.sh b/tests/queries/0_stateless/00504_insert_miss_columns.sh index 98d6249c758..ea699ab58a5 100755 --- a/tests/queries/0_stateless/00504_insert_miss_columns.sh +++ b/tests/queries/0_stateless/00504_insert_miss_columns.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # https://github.com/ClickHouse/ClickHouse/issues/1300 diff --git a/tests/queries/0_stateless/00505_secure.sh b/tests/queries/0_stateless/00505_secure.sh index d1968ffe638..3d9e28ba08d 100755 --- a/tests/queries/0_stateless/00505_secure.sh +++ b/tests/queries/0_stateless/00505_secure.sh @@ -3,6 +3,7 @@ # set -x CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # Not default server config needed diff --git a/tests/queries/0_stateless/00505_shard_secure.sh b/tests/queries/0_stateless/00505_shard_secure.sh index c9200f8804c..526176f8c39 100755 --- a/tests/queries/0_stateless/00505_shard_secure.sh +++ b/tests/queries/0_stateless/00505_shard_secure.sh @@ -3,6 +3,7 @@ # set -x CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "SELECT * FROM remoteSecure('127.0.0.{1,2}', system.one);" diff --git a/tests/queries/0_stateless/00507_array_no_params.sh b/tests/queries/0_stateless/00507_array_no_params.sh index 6ae0c1c2067..63829b30f76 100755 --- a/tests/queries/0_stateless/00507_array_no_params.sh +++ b/tests/queries/0_stateless/00507_array_no_params.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS foo;" diff --git a/tests/queries/0_stateless/00512_fractional_time_zones.sh b/tests/queries/0_stateless/00512_fractional_time_zones.sh index 748c4584d06..45be8fe8d17 100755 --- a/tests/queries/0_stateless/00512_fractional_time_zones.sh +++ b/tests/queries/0_stateless/00512_fractional_time_zones.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh TZ=Europe/Moscow ${CLICKHOUSE_LOCAL} --query="SELECT toDateTime('1990-10-19 00:00:00')" diff --git a/tests/queries/0_stateless/00520_http_nullable.sh b/tests/queries/0_stateless/00520_http_nullable.sh index 0f068d5a6fe..6dff8109b9f 100755 --- a/tests/queries/0_stateless/00520_http_nullable.sh +++ b/tests/queries/0_stateless/00520_http_nullable.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -sS "$CLICKHOUSE_URL" -d 'SELECT floor(NULL), 1;'; diff --git a/tests/queries/0_stateless/00531_client_ignore_error.sh b/tests/queries/0_stateless/00531_client_ignore_error.sh index 98cb297e237..daf636b0765 100755 --- a/tests/queries/0_stateless/00531_client_ignore_error.sh +++ b/tests/queries/0_stateless/00531_client_ignore_error.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null diff --git a/tests/queries/0_stateless/00534_client_ignore_error.sh b/tests/queries/0_stateless/00534_client_ignore_error.sh index 98cb297e237..daf636b0765 100755 --- a/tests/queries/0_stateless/00534_client_ignore_error.sh +++ b/tests/queries/0_stateless/00534_client_ignore_error.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo "SELECT 1; SELECT 2; SELECT CAST(); SELECT ';'; SELECT 3;SELECT CAST();SELECT 4;" | $CLICKHOUSE_CLIENT -n --ignore-error 2>/dev/null diff --git a/tests/queries/0_stateless/00534_filimonov.sh b/tests/queries/0_stateless/00534_filimonov.sh index 044b5c9e65d..a629c0f3a92 100755 --- a/tests/queries/0_stateless/00534_filimonov.sh +++ b/tests/queries/0_stateless/00534_filimonov.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # Server should not crash on any function trash calls diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments1.sh b/tests/queries/0_stateless/00534_functions_bad_arguments1.sh index 15bbdb1e4c7..2979e18e2a5 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments1.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments1.sh @@ -2,8 +2,10 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./00534_functions_bad_arguments.lib . "$CURDIR"/00534_functions_bad_arguments.lib test_variant 'SELECT $_;' diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments10.sh b/tests/queries/0_stateless/00534_functions_bad_arguments10.sh index 8d151d5573d..178c9dd8d94 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments10.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments10.sh @@ -2,8 +2,10 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./00534_functions_bad_arguments.lib . "$CURDIR"/00534_functions_bad_arguments.lib test_variant 'SELECT $_([NULL],[NULL]);' diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments11.sh b/tests/queries/0_stateless/00534_functions_bad_arguments11.sh index ac02a9e0a38..3004c6c84ff 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments11.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments11.sh @@ -2,8 +2,10 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./00534_functions_bad_arguments.lib . "$CURDIR"/00534_functions_bad_arguments.lib test_variant 'SELECT $_(NULL, NULL, NULL);' diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments12.sh b/tests/queries/0_stateless/00534_functions_bad_arguments12.sh index 7d8ba7e78f6..8e5bd15dc80 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments12.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments12.sh @@ -2,8 +2,10 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./00534_functions_bad_arguments.lib . "$CURDIR"/00534_functions_bad_arguments.lib test_variant 'SELECT $_([], [], []);' diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments13.sh b/tests/queries/0_stateless/00534_functions_bad_arguments13.sh index 3995ddefc15..37f4282ae79 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments13.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments13.sh @@ -2,8 +2,10 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./00534_functions_bad_arguments.lib . "$CURDIR"/00534_functions_bad_arguments.lib test_variant 'SELECT $_([NULL], [NULL], [NULL]);' diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments2.sh b/tests/queries/0_stateless/00534_functions_bad_arguments2.sh index 23b03a8d168..9c8eda6fbcb 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments2.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments2.sh @@ -2,8 +2,10 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./00534_functions_bad_arguments.lib . "$CURDIR"/00534_functions_bad_arguments.lib test_variant 'SELECT $_();' diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments3.sh b/tests/queries/0_stateless/00534_functions_bad_arguments3.sh index 316456045cd..640467e9d00 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments3.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments3.sh @@ -2,8 +2,10 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./00534_functions_bad_arguments.lib . "$CURDIR"/00534_functions_bad_arguments.lib test_variant 'SELECT $_(NULL);' diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments4.sh b/tests/queries/0_stateless/00534_functions_bad_arguments4.sh index 10fce23049b..b0381295b39 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments4.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments4.sh @@ -2,8 +2,10 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./00534_functions_bad_arguments.lib . "$CURDIR"/00534_functions_bad_arguments.lib test_variant 'SELECT $_([]);' diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments5.sh b/tests/queries/0_stateless/00534_functions_bad_arguments5.sh index fbed81b41d3..3e1d4be7af5 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments5.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments5.sh @@ -2,8 +2,10 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./00534_functions_bad_arguments.lib . "$CURDIR"/00534_functions_bad_arguments.lib test_variant 'SELECT $_([NULL]);' diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments6.sh b/tests/queries/0_stateless/00534_functions_bad_arguments6.sh index 3e1e1d38649..53dff77a390 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments6.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments6.sh @@ -2,8 +2,10 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./00534_functions_bad_arguments.lib . "$CURDIR"/00534_functions_bad_arguments.lib test_variant 'SELECT $_(-1);' diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments7.sh b/tests/queries/0_stateless/00534_functions_bad_arguments7.sh index ac024b4cb94..3625b65c381 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments7.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments7.sh @@ -2,8 +2,10 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./00534_functions_bad_arguments.lib . "$CURDIR"/00534_functions_bad_arguments.lib test_variant "SELECT \$_('');" diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments8.sh b/tests/queries/0_stateless/00534_functions_bad_arguments8.sh index 31e1eea52a4..35cb25e6f8e 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments8.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments8.sh @@ -2,8 +2,10 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./00534_functions_bad_arguments.lib . "$CURDIR"/00534_functions_bad_arguments.lib test_variant 'SELECT $_(NULL, NULL);' diff --git a/tests/queries/0_stateless/00534_functions_bad_arguments9.sh b/tests/queries/0_stateless/00534_functions_bad_arguments9.sh index ca4fd0bb5ba..1756e968271 100755 --- a/tests/queries/0_stateless/00534_functions_bad_arguments9.sh +++ b/tests/queries/0_stateless/00534_functions_bad_arguments9.sh @@ -2,8 +2,10 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./00534_functions_bad_arguments.lib . "$CURDIR"/00534_functions_bad_arguments.lib test_variant 'SELECT $_([], []);' diff --git a/tests/queries/0_stateless/00540_bad_data_types.sh b/tests/queries/0_stateless/00540_bad_data_types.sh index 73245e94a52..400d1476ac4 100755 --- a/tests/queries/0_stateless/00540_bad_data_types.sh +++ b/tests/queries/0_stateless/00540_bad_data_types.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="SELECT CAST(0 AS Array)" 2>/dev/null || true; diff --git a/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh b/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh index eeae4f6fc1c..560b97a1d1b 100755 --- a/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh +++ b/tests/queries/0_stateless/00543_access_to_temporary_table_in_readonly_mode.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -n --query=" diff --git a/tests/queries/0_stateless/00550_join_insert_select.sh b/tests/queries/0_stateless/00550_join_insert_select.sh index 8e60ddebf8f..bfaccb613ca 100755 --- a/tests/queries/0_stateless/00550_join_insert_select.sh +++ b/tests/queries/0_stateless/00550_join_insert_select.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -n --ignore-error --query=" diff --git a/tests/queries/0_stateless/00557_remote_port.sh b/tests/queries/0_stateless/00557_remote_port.sh index abb282ca123..4b6e715438e 100755 --- a/tests/queries/0_stateless/00557_remote_port.sh +++ b/tests/queries/0_stateless/00557_remote_port.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh if [ "$CLICKHOUSE_HOST" == "localhost" ]; then diff --git a/tests/queries/0_stateless/00564_enum_order.sh b/tests/queries/0_stateless/00564_enum_order.sh index dc47b3c3773..f50b1c39590 100755 --- a/tests/queries/0_stateless/00564_enum_order.sh +++ b/tests/queries/0_stateless/00564_enum_order.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "DROP TABLE IF EXISTS enum"; diff --git a/tests/queries/0_stateless/00565_enum_order.sh b/tests/queries/0_stateless/00565_enum_order.sh index 2851bcaaca2..6958a403246 100755 --- a/tests/queries/0_stateless/00565_enum_order.sh +++ b/tests/queries/0_stateless/00565_enum_order.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e -o pipefail diff --git a/tests/queries/0_stateless/00574_empty_strings_deserialization.sh b/tests/queries/0_stateless/00574_empty_strings_deserialization.sh index 95b3c2d9e5f..1cbc9456ab0 100755 --- a/tests/queries/0_stateless/00574_empty_strings_deserialization.sh +++ b/tests/queries/0_stateless/00574_empty_strings_deserialization.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS empty_strings_deserialization" diff --git a/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh b/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh index a526fe00e1b..94a9d331b6a 100755 --- a/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh +++ b/tests/queries/0_stateless/00575_illegal_column_exception_when_drop_depen_column.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/00595_insert_into_view.sh b/tests/queries/0_stateless/00595_insert_into_view.sh index ad16b6147d7..16cc843f9b7 100755 --- a/tests/queries/0_stateless/00595_insert_into_view.sh +++ b/tests/queries/0_stateless/00595_insert_into_view.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh exception_pattern="Code: 48.*Method write is not supported by storage View" diff --git a/tests/queries/0_stateless/00596_limit_on_expanded_ast.sh b/tests/queries/0_stateless/00596_limit_on_expanded_ast.sh index 0c4430f3705..ef0436e55f2 100755 --- a/tests/queries/0_stateless/00596_limit_on_expanded_ast.sh +++ b/tests/queries/0_stateless/00596_limit_on_expanded_ast.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh exception_pattern="too big" diff --git a/tests/queries/0_stateless/00598_create_as_select_http.sh b/tests/queries/0_stateless/00598_create_as_select_http.sh index 58c9593919e..34a70efacd0 100755 --- a/tests/queries/0_stateless/00598_create_as_select_http.sh +++ b/tests/queries/0_stateless/00598_create_as_select_http.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e -o pipefail diff --git a/tests/queries/0_stateless/00600_replace_running_query.sh b/tests/queries/0_stateless/00600_replace_running_query.sh index ea70866538d..be5523e06ea 100755 --- a/tests/queries/0_stateless/00600_replace_running_query.sh +++ b/tests/queries/0_stateless/00600_replace_running_query.sh @@ -3,6 +3,7 @@ CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/00601_kill_running_query.sh b/tests/queries/0_stateless/00601_kill_running_query.sh index 9a6a3a476a4..e4e3ee98877 100755 --- a/tests/queries/0_stateless/00601_kill_running_query.sh +++ b/tests/queries/0_stateless/00601_kill_running_query.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e -o pipefail diff --git a/tests/queries/0_stateless/00602_throw_if.sh b/tests/queries/0_stateless/00602_throw_if.sh index 5a133ce57a7..fe8feab0303 100755 --- a/tests/queries/0_stateless/00602_throw_if.sh +++ b/tests/queries/0_stateless/00602_throw_if.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh default_exception_message="Value passed to 'throwIf' function is non zero" diff --git a/tests/queries/0_stateless/00612_http_max_query_size.sh b/tests/queries/0_stateless/00612_http_max_query_size.sh index 78ae4eba1dc..0a37d7ec766 100755 --- a/tests/queries/0_stateless/00612_http_max_query_size.sh +++ b/tests/queries/0_stateless/00612_http_max_query_size.sh @@ -2,6 +2,7 @@ # shellcheck disable=SC2028 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo 'select 1' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}&max_query_size=8" -d @- 2>&1 | grep -o "Max query size exceeded" diff --git a/tests/queries/0_stateless/00612_pk_in_tuple_perf.sh b/tests/queries/0_stateless/00612_pk_in_tuple_perf.sh index 59617c81db9..99813d894ae 100755 --- a/tests/queries/0_stateless/00612_pk_in_tuple_perf.sh +++ b/tests/queries/0_stateless/00612_pk_in_tuple_perf.sh @@ -2,6 +2,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/00623_truncate_table_throw_exception.sh b/tests/queries/0_stateless/00623_truncate_table_throw_exception.sh index f4e88b21b75..59092ff28f2 100755 --- a/tests/queries/0_stateless/00623_truncate_table_throw_exception.sh +++ b/tests/queries/0_stateless/00623_truncate_table_throw_exception.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS test_truncate;" diff --git a/tests/queries/0_stateless/00625_query_in_form_data.sh b/tests/queries/0_stateless/00625_query_in_form_data.sh index 035f4a7ee21..38fb166d846 100755 --- a/tests/queries/0_stateless/00625_query_in_form_data.sh +++ b/tests/queries/0_stateless/00625_query_in_form_data.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} "${CLICKHOUSE_URL}&query=select" -X POST --form-string 'query= 1;' 2>/dev/null diff --git a/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh index 0c56a02b894..8945fc3e56b 100755 --- a/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh +++ b/tests/queries/0_stateless/00626_replace_partition_from_table_zookeeper.sh @@ -7,6 +7,7 @@ CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh function query_with_retry diff --git a/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.sh b/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.sh index cb3f70367cf..978b7bdb671 100755 --- a/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.sh +++ b/tests/queries/0_stateless/00630_arbitrary_csv_delimiter.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS csv"; diff --git a/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh b/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh index a9be70be5b9..817da08bfa0 100755 --- a/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh +++ b/tests/queries/0_stateless/00633_materialized_view_and_too_many_parts_zookeeper.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS root" diff --git a/tests/queries/0_stateless/00634_logging_shard.sh b/tests/queries/0_stateless/00634_logging_shard.sh index 821ae363218..ab210e5a373 100755 --- a/tests/queries/0_stateless/00634_logging_shard.sh +++ b/tests/queries/0_stateless/00634_logging_shard.sh @@ -5,6 +5,7 @@ set -e export CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL="trace" CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh cur_name=$(basename "${BASH_SOURCE[0]}") diff --git a/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh index add1f037436..c645bea23b3 100755 --- a/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh +++ b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh @@ -5,6 +5,7 @@ set -e export CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL="trace" CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh cur_name=$(basename "${BASH_SOURCE[0]}") diff --git a/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh b/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh index 8150d52abc9..fdaecd87f53 100755 --- a/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh +++ b/tests/queries/0_stateless/00636_partition_key_parts_pruning.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query="SELECT '*** Single column partition key ***'" diff --git a/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.sh b/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.sh index 901f3150ce6..014fc696da3 100755 --- a/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.sh +++ b/tests/queries/0_stateless/00637_sessions_in_http_interface_and_settings.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh user="readonly" diff --git a/tests/queries/0_stateless/00646_url_engine.sh b/tests/queries/0_stateless/00646_url_engine.sh index bf20e0c1222..357e0d74fd0 100755 --- a/tests/queries/0_stateless/00646_url_engine.sh +++ b/tests/queries/0_stateless/00646_url_engine.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # We should have correct env vars from shell_config.sh to run this test diff --git a/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.sh b/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.sh index 8540267a809..3971510f7f6 100755 --- a/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.sh +++ b/tests/queries/0_stateless/00650_csv_with_specified_quote_rule.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS csv"; diff --git a/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh b/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh index 154ee654a81..23ce8bb9677 100755 --- a/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh +++ b/tests/queries/0_stateless/00651_default_database_on_client_reconnect.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "DROP TABLE IF EXISTS tab_00651; CREATE TABLE tab_00651 (val UInt64) engine = Memory; SHOW CREATE TABLE tab_00651 format abcd; DESC tab_00651; DROP TABLE tab_00651;" ||: 2> /dev/null diff --git a/tests/queries/0_stateless/00652_mergetree_mutations.sh b/tests/queries/0_stateless/00652_mergetree_mutations.sh index a49a0c8f500..7c7117d5f75 100755 --- a/tests/queries/0_stateless/00652_mergetree_mutations.sh +++ b/tests/queries/0_stateless/00652_mergetree_mutations.sh @@ -1,8 +1,10 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./mergetree_mutations.lib . "$CURDIR"/mergetree_mutations.lib ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS mutations" diff --git a/tests/queries/0_stateless/00652_mutations_alter_update.sh b/tests/queries/0_stateless/00652_mutations_alter_update.sh index 83a5e18d4ae..67c024336a0 100755 --- a/tests/queries/0_stateless/00652_mutations_alter_update.sh +++ b/tests/queries/0_stateless/00652_mutations_alter_update.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS alter_update" diff --git a/tests/queries/0_stateless/00652_mutations_default_database.sh b/tests/queries/0_stateless/00652_mutations_default_database.sh index 78aa0e88c36..eed45540f9b 100755 --- a/tests/queries/0_stateless/00652_mutations_default_database.sh +++ b/tests/queries/0_stateless/00652_mutations_default_database.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --multiquery --mutations_sync=1 << EOF diff --git a/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh b/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh index e40c877e8a2..02f552c250d 100755 --- a/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh +++ b/tests/queries/0_stateless/00652_replicated_mutations_default_database_zookeeper.sh @@ -1,8 +1,10 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./mergetree_mutations.lib . "$CURDIR"/mergetree_mutations.lib ${CLICKHOUSE_CLIENT} --multiquery << EOF diff --git a/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh b/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh index 9e4bdba1294..08a39c58c3e 100755 --- a/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh +++ b/tests/queries/0_stateless/00652_replicated_mutations_zookeeper.sh @@ -1,8 +1,10 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./mergetree_mutations.lib . "$CURDIR"/mergetree_mutations.lib ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS mutations_r1" diff --git a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh index ff73669bbd0..f49aeb93184 100755 --- a/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh +++ b/tests/queries/0_stateless/00653_verification_monotonic_data_load.sh @@ -11,6 +11,7 @@ #-------------------------------------------- CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS string_test_table;" diff --git a/tests/queries/0_stateless/00682_empty_parts_merge.sh b/tests/queries/0_stateless/00682_empty_parts_merge.sh index 1a336580b3a..0213f31ea94 100755 --- a/tests/queries/0_stateless/00682_empty_parts_merge.sh +++ b/tests/queries/0_stateless/00682_empty_parts_merge.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS ordinary_00682" diff --git a/tests/queries/0_stateless/00686_client_exit_code.sh b/tests/queries/0_stateless/00686_client_exit_code.sh index dea82af2211..eab534dd8d5 100755 --- a/tests/queries/0_stateless/00686_client_exit_code.sh +++ b/tests/queries/0_stateless/00686_client_exit_code.sh @@ -1,8 +1,10 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./mergetree_mutations.lib . "$CURDIR"/mergetree_mutations.lib echo "INSERT INTO test FORMAT CSV" | ${CLICKHOUSE_CLIENT} -n 2>/dev/null diff --git a/tests/queries/0_stateless/00687_top_and_offset.sh b/tests/queries/0_stateless/00687_top_and_offset.sh index fb7dc2b4d90..4355f746ac8 100755 --- a/tests/queries/0_stateless/00687_top_and_offset.sh +++ b/tests/queries/0_stateless/00687_top_and_offset.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/00690_insert_select_converting_exception_message.sh b/tests/queries/0_stateless/00690_insert_select_converting_exception_message.sh index 290bde5b4ac..3f22726877a 100755 --- a/tests/queries/0_stateless/00690_insert_select_converting_exception_message.sh +++ b/tests/queries/0_stateless/00690_insert_select_converting_exception_message.sh @@ -3,6 +3,7 @@ CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test_00690;" diff --git a/tests/queries/0_stateless/00699_materialized_view_mutations.sh b/tests/queries/0_stateless/00699_materialized_view_mutations.sh index a8166ca29c0..f55b8ac10ed 100755 --- a/tests/queries/0_stateless/00699_materialized_view_mutations.sh +++ b/tests/queries/0_stateless/00699_materialized_view_mutations.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/00704_drop_truncate_memory_table.sh b/tests/queries/0_stateless/00704_drop_truncate_memory_table.sh index e406b603717..bdb4627ae30 100755 --- a/tests/queries/0_stateless/00704_drop_truncate_memory_table.sh +++ b/tests/queries/0_stateless/00704_drop_truncate_memory_table.sh @@ -4,6 +4,7 @@ set -e CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --multiquery --query=" diff --git a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh index c817767c0b5..ede490cf8f1 100755 --- a/tests/queries/0_stateless/00705_drop_create_merge_tree.sh +++ b/tests/queries/0_stateless/00705_drop_create_merge_tree.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh function stress() diff --git a/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh index 8a8a9ed6615..54b6c80f2ac 100755 --- a/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh +++ b/tests/queries/0_stateless/00715_fetch_merged_or_mutated_part_zookeeper.sh @@ -1,7 +1,9 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./mergetree_mutations.lib . "$CURDIR"/mergetree_mutations.lib diff --git a/tests/queries/0_stateless/00715_json_each_row_input_nested.sh b/tests/queries/0_stateless/00715_json_each_row_input_nested.sh index c38f5b2ba28..72e01aef742 100755 --- a/tests/queries/0_stateless/00715_json_each_row_input_nested.sh +++ b/tests/queries/0_stateless/00715_json_each_row_input_nested.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS json_each_row_nested" diff --git a/tests/queries/0_stateless/00719_insert_block_without_column.sh b/tests/queries/0_stateless/00719_insert_block_without_column.sh index 384445b1ae6..11aaab98cdc 100755 --- a/tests/queries/0_stateless/00719_insert_block_without_column.sh +++ b/tests/queries/0_stateless/00719_insert_block_without_column.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh [ -e "${CLICKHOUSE_TMP}"/test_squashing_block_without_column.out ] && rm "${CLICKHOUSE_TMP}"/test_squashing_block_without_column.out diff --git a/tests/queries/0_stateless/00719_parallel_ddl_db.sh b/tests/queries/0_stateless/00719_parallel_ddl_db.sh index c9f86fa0a41..5608a57eecc 100755 --- a/tests/queries/0_stateless/00719_parallel_ddl_db.sh +++ b/tests/queries/0_stateless/00719_parallel_ddl_db.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS parallel_ddl" diff --git a/tests/queries/0_stateless/00719_parallel_ddl_table.sh b/tests/queries/0_stateless/00719_parallel_ddl_table.sh index c06b6f6cb12..2a542ea21f6 100755 --- a/tests/queries/0_stateless/00719_parallel_ddl_table.sh +++ b/tests/queries/0_stateless/00719_parallel_ddl_table.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS parallel_ddl" diff --git a/tests/queries/0_stateless/00728_json_each_row_parsing.sh b/tests/queries/0_stateless/00728_json_each_row_parsing.sh index 73ea72a5e2c..6a43fc2d8da 100755 --- a/tests/queries/0_stateless/00728_json_each_row_parsing.sh +++ b/tests/queries/0_stateless/00728_json_each_row_parsing.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS json_parse;" diff --git a/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh index d4cbc753aee..25a742a481a 100755 --- a/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh +++ b/tests/queries/0_stateless/00731_long_merge_tree_select_opened_files.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh settings="--log_queries=1 --log_query_threads=1 --log_profile_events=1 --log_query_settings=1" diff --git a/tests/queries/0_stateless/00738_lock_for_inner_table.sh b/tests/queries/0_stateless/00738_lock_for_inner_table.sh index 4570c853f31..9540d566ac3 100755 --- a/tests/queries/0_stateless/00738_lock_for_inner_table.sh +++ b/tests/queries/0_stateless/00738_lock_for_inner_table.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo "DROP TABLE IF EXISTS tab_00738; diff --git a/tests/queries/0_stateless/00746_sql_fuzzy.sh b/tests/queries/0_stateless/00746_sql_fuzzy.sh index aed00c905d7..9fa64d10057 100755 --- a/tests/queries/0_stateless/00746_sql_fuzzy.sh +++ b/tests/queries/0_stateless/00746_sql_fuzzy.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh export SQL_FUZZY_FILE_FUNCTIONS=${CLICKHOUSE_TMP}/clickhouse-functions diff --git a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh index d3aa04c9095..55b74e055db 100755 --- a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh +++ b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS mergetree_00754;" diff --git a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh index 5cdc150dace..ecf75352ca9 100755 --- a/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh +++ b/tests/queries/0_stateless/00754_distributed_optimize_skip_select_on_unused_shards_with_prewhere.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS distributed_00754;" diff --git a/tests/queries/0_stateless/00763_lock_buffer.sh b/tests/queries/0_stateless/00763_lock_buffer.sh index 7e1a1e87917..44660035208 100755 --- a/tests/queries/0_stateless/00763_lock_buffer.sh +++ b/tests/queries/0_stateless/00763_lock_buffer.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS mt_00763_2" diff --git a/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh b/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh index 1605689fd11..65dd2474580 100755 --- a/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh +++ b/tests/queries/0_stateless/00763_long_lock_buffer_alter_destination_table.sh @@ -4,6 +4,7 @@ set -e CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS mt_00763_1" diff --git a/tests/queries/0_stateless/00764_max_query_size_allocation.sh b/tests/queries/0_stateless/00764_max_query_size_allocation.sh index f074077b6ea..e42a5bd9fb9 100755 --- a/tests/queries/0_stateless/00764_max_query_size_allocation.sh +++ b/tests/queries/0_stateless/00764_max_query_size_allocation.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&max_query_size=1000000000&max_memory_usage=10000000" -d "SELECT 1" diff --git a/tests/queries/0_stateless/00816_long_concurrent_alter_column.sh b/tests/queries/0_stateless/00816_long_concurrent_alter_column.sh index 7f4df58d764..63b687d072d 100755 --- a/tests/queries/0_stateless/00816_long_concurrent_alter_column.sh +++ b/tests/queries/0_stateless/00816_long_concurrent_alter_column.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo "DROP TABLE IF EXISTS concurrent_alter_column" | ${CLICKHOUSE_CLIENT} diff --git a/tests/queries/0_stateless/00823_capnproto_input.sh b/tests/queries/0_stateless/00823_capnproto_input.sh index a53d54b11d8..b86c8882bbd 100755 --- a/tests/queries/0_stateless/00823_capnproto_input.sh +++ b/tests/queries/0_stateless/00823_capnproto_input.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh #create the schema file diff --git a/tests/queries/0_stateless/00825_http_header_query_id.sh b/tests/queries/0_stateless/00825_http_header_query_id.sh index 2b0f199baf2..aad7c038bce 100755 --- a/tests/queries/0_stateless/00825_http_header_query_id.sh +++ b/tests/queries/0_stateless/00825_http_header_query_id.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL_COMMAND} -q -I -sSg "${CLICKHOUSE_URL}&query=SELECT%201" | grep -o X-ClickHouse-Query-Id diff --git a/tests/queries/0_stateless/00825_protobuf_format_input.sh b/tests/queries/0_stateless/00825_protobuf_format_input.sh index b9912b2b849..5a85a852cb1 100755 --- a/tests/queries/0_stateless/00825_protobuf_format_input.sh +++ b/tests/queries/0_stateless/00825_protobuf_format_input.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -eo pipefail @@ -43,6 +44,7 @@ EOF # To generate the file 00825_protobuf_format_input.insh use the following commands: # ninja ProtobufDelimitedMessagesSerializer # build/utils/test-data-generator/ProtobufDelimitedMessagesSerializer +# shellcheck source=./00825_protobuf_format_input.insh source "$CURDIR"/00825_protobuf_format_input.insh $CLICKHOUSE_CLIENT --query "SELECT * FROM in_persons_00825 ORDER BY uuid;" @@ -51,6 +53,7 @@ $CLICKHOUSE_CLIENT --query "SELECT * FROM in_squares_00825 ORDER BY number;" $CLICKHOUSE_CLIENT --query "TRUNCATE TABLE in_persons_00825;" $CLICKHOUSE_CLIENT --query "TRUNCATE TABLE in_squares_00825;" +# shellcheck source=./00825_protobuf_format_input_single.insh source "$CURDIR"/00825_protobuf_format_input_single.insh $CLICKHOUSE_CLIENT --query "SELECT * FROM in_persons_00825 ORDER BY uuid;" diff --git a/tests/queries/0_stateless/00825_protobuf_format_output.sh b/tests/queries/0_stateless/00825_protobuf_format_output.sh index 889d3a9d2ae..f2d0c60b393 100755 --- a/tests/queries/0_stateless/00825_protobuf_format_output.sh +++ b/tests/queries/0_stateless/00825_protobuf_format_output.sh @@ -5,6 +5,7 @@ # build/utils/test-data-generator/ProtobufDelimitedMessagesSerializer CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e -o pipefail diff --git a/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh b/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh index 0088effcf34..3da40f16786 100755 --- a/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh +++ b/tests/queries/0_stateless/00834_cancel_http_readonly_queries_on_client_close.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} --max-time 1 -sS "${CLICKHOUSE_URL}&query_id=cancel_http_readonly_queries_on_client_close&cancel_http_readonly_queries_on_client_close=1&query=SELECT+count()+FROM+system.numbers" 2>&1 | grep -cF 'curl: (28)' diff --git a/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.sh b/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.sh index 421d898eb83..be57757af06 100755 --- a/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.sh +++ b/tests/queries/0_stateless/00834_dont_allow_to_set_two_configuration_files_in_client.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh OUTPUT=$($CLICKHOUSE_CLIENT_BINARY -c 1 -C 2 2>&1) diff --git a/tests/queries/0_stateless/00834_hints_for_type_function_typos.sh b/tests/queries/0_stateless/00834_hints_for_type_function_typos.sh index e00590692ba..6640e0003cd 100755 --- a/tests/queries/0_stateless/00834_hints_for_type_function_typos.sh +++ b/tests/queries/0_stateless/00834_hints_for_type_function_typos.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "select c23ount(*) from system.functions;" 2>&1 | grep "Maybe you meant: \['count'" &>/dev/null; diff --git a/tests/queries/0_stateless/00834_kill_mutation.sh b/tests/queries/0_stateless/00834_kill_mutation.sh index 886433e7ba8..a17d85cf9a5 100755 --- a/tests/queries/0_stateless/00834_kill_mutation.sh +++ b/tests/queries/0_stateless/00834_kill_mutation.sh @@ -1,8 +1,10 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./mergetree_mutations.lib . "$CURDIR"/mergetree_mutations.lib ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS kill_mutation" diff --git a/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh b/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh index 8414db1fee5..d1f938f73fe 100755 --- a/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh +++ b/tests/queries/0_stateless/00834_kill_mutation_replicated_zookeeper.sh @@ -1,8 +1,10 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./mergetree_mutations.lib . "$CURDIR"/mergetree_mutations.lib ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS kill_mutation_r1" diff --git a/tests/queries/0_stateless/00837_minmax_index.sh b/tests/queries/0_stateless/00837_minmax_index.sh index 9f947db6b6f..39e39f9d628 100755 --- a/tests/queries/0_stateless/00837_minmax_index.sh +++ b/tests/queries/0_stateless/00837_minmax_index.sh @@ -2,6 +2,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" diff --git a/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh b/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh index c64e1b41ca4..2e070d685a4 100755 --- a/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh +++ b/tests/queries/0_stateless/00838_system_tables_drop_table_race.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS table" diff --git a/tests/queries/0_stateless/00838_unique_index.sh b/tests/queries/0_stateless/00838_unique_index.sh index 330eb4ed346..36504b754a7 100755 --- a/tests/queries/0_stateless/00838_unique_index.sh +++ b/tests/queries/0_stateless/00838_unique_index.sh @@ -2,6 +2,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" diff --git a/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh b/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh index 481bcc043d7..60a2d8eb9a0 100755 --- a/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh +++ b/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh for _ in {1..200}; do echo "drop table if exists view_00840" | $CLICKHOUSE_CLIENT; echo "create view view_00840 as select count(*),database,table from system.columns group by database,table" | $CLICKHOUSE_CLIENT; done & diff --git a/tests/queries/0_stateless/00851_http_insert_json_defaults.sh b/tests/queries/0_stateless/00851_http_insert_json_defaults.sh index 08218cec5b9..08458822cc0 100755 --- a/tests/queries/0_stateless/00851_http_insert_json_defaults.sh +++ b/tests/queries/0_stateless/00851_http_insert_json_defaults.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS defaults" diff --git a/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.sh b/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.sh index c75b752576d..0eeabde917c 100755 --- a/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.sh +++ b/tests/queries/0_stateless/00898_parsing_bad_diagnostic_message.sh @@ -3,6 +3,7 @@ # set -x CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh echo -ne '0\t1\t2\t3\t4\t5\t6\t7\t8\t9\t10\ta' | $CLICKHOUSE_LOCAL --structure 'c0 UInt8, c1 UInt8, c2 UInt8, c3 UInt8, c4 UInt8, c5 UInt8, c6 UInt8, c7 UInt8, c8 UInt8, c9 UInt8, c10 UInt8, c11 UInt8' --input-format TSV --query 'SELECT * FROM table' 2>&1 | grep -F 'Column 11' diff --git a/tests/queries/0_stateless/00900_orc_load.sh b/tests/queries/0_stateless/00900_orc_load.sh index 6e08b415397..a0bacff43e5 100755 --- a/tests/queries/0_stateless/00900_orc_load.sh +++ b/tests/queries/0_stateless/00900_orc_load.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh DATA_FILE=$CUR_DIR/data_orc/test.orc diff --git a/tests/queries/0_stateless/00900_parquet.sh b/tests/queries/0_stateless/00900_parquet.sh index 1af893c0b45..4b06001429f 100755 --- a/tests/queries/0_stateless/00900_parquet.sh +++ b/tests/queries/0_stateless/00900_parquet.sh @@ -3,6 +3,7 @@ set -e CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh #${CLICKHOUSE_CLIENT} --max_block_size=1 --query="SELECT * FROM system.numbers LIMIT 10 FORMAT Parquet" > ${CLICKHOUSE_TMP}/t1.pq diff --git a/tests/queries/0_stateless/00900_parquet_decimal.sh b/tests/queries/0_stateless/00900_parquet_decimal.sh index e6174a1f3a9..67561f484bd 100755 --- a/tests/queries/0_stateless/00900_parquet_decimal.sh +++ b/tests/queries/0_stateless/00900_parquet_decimal.sh @@ -3,6 +3,7 @@ # set -x CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS decimal;" ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS decimal2;" diff --git a/tests/queries/0_stateless/00900_parquet_load.sh b/tests/queries/0_stateless/00900_parquet_load.sh index 43b738aab83..52213f066e1 100755 --- a/tests/queries/0_stateless/00900_parquet_load.sh +++ b/tests/queries/0_stateless/00900_parquet_load.sh @@ -14,6 +14,7 @@ # set -x CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh CB_DIR=$(dirname "$CLICKHOUSE_CLIENT_BINARY") diff --git a/tests/queries/0_stateless/00907_set_index_max_rows.sh b/tests/queries/0_stateless/00907_set_index_max_rows.sh index 8109223db97..f780517934d 100755 --- a/tests/queries/0_stateless/00907_set_index_max_rows.sh +++ b/tests/queries/0_stateless/00907_set_index_max_rows.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" diff --git a/tests/queries/0_stateless/00908_bloom_filter_index.sh b/tests/queries/0_stateless/00908_bloom_filter_index.sh index d989ed0fa6b..a7631db759c 100755 --- a/tests/queries/0_stateless/00908_bloom_filter_index.sh +++ b/tests/queries/0_stateless/00908_bloom_filter_index.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx;" diff --git a/tests/queries/0_stateless/00908_long_http_insert.sh b/tests/queries/0_stateless/00908_long_http_insert.sh index 06a28bc0820..5e793410412 100755 --- a/tests/queries/0_stateless/00908_long_http_insert.sh +++ b/tests/queries/0_stateless/00908_long_http_insert.sh @@ -2,6 +2,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo 'DROP TABLE IF EXISTS table_for_insert' | ${CLICKHOUSE_CURL} -sSg "${CLICKHOUSE_URL}" -d @- diff --git a/tests/queries/0_stateless/00909_kill_not_initialized_query.sh b/tests/queries/0_stateless/00909_kill_not_initialized_query.sh index bc2a42bd708..531652a33e7 100755 --- a/tests/queries/0_stateless/00909_kill_not_initialized_query.sh +++ b/tests/queries/0_stateless/00909_kill_not_initialized_query.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/00910_client_window_size_detection.sh b/tests/queries/0_stateless/00910_client_window_size_detection.sh index eb0b14b0006..47d913de152 100755 --- a/tests/queries/0_stateless/00910_client_window_size_detection.sh +++ b/tests/queries/0_stateless/00910_client_window_size_detection.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/00921_datetime64_compatibility.sh b/tests/queries/0_stateless/00921_datetime64_compatibility.sh index 3e5de1a552c..564c456bc7f 100755 --- a/tests/queries/0_stateless/00921_datetime64_compatibility.sh +++ b/tests/queries/0_stateless/00921_datetime64_compatibility.sh @@ -3,6 +3,7 @@ CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL="none" # We should have correct env vars from shell_config.sh to run this test CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # In order to check queries individually (don't stop on the first one that fails): diff --git a/tests/queries/0_stateless/00927_asof_join_other_types.sh b/tests/queries/0_stateless/00927_asof_join_other_types.sh index 13a8e798f38..c002d092b40 100755 --- a/tests/queries/0_stateless/00927_asof_join_other_types.sh +++ b/tests/queries/0_stateless/00927_asof_join_other_types.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh for typename in "UInt32" "UInt64" "Float64" "Float32" "DateTime" "Decimal32(5)" "Decimal64(5)" "Decimal128(5)" "DateTime64(3)" diff --git a/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh b/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh index c5f5c61b41d..2b9a69d19d4 100755 --- a/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh +++ b/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/00937_template_output_format.sh b/tests/queries/0_stateless/00937_template_output_format.sh index 230bc0c0205..6030887cfba 100755 --- a/tests/queries/0_stateless/00937_template_output_format.sh +++ b/tests/queries/0_stateless/00937_template_output_format.sh @@ -2,6 +2,7 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS template"; diff --git a/tests/queries/0_stateless/00937_test_use_header_csv.sh b/tests/queries/0_stateless/00937_test_use_header_csv.sh index c7c0d1f99c0..8ab37b30b60 100755 --- a/tests/queries/0_stateless/00937_test_use_header_csv.sh +++ b/tests/queries/0_stateless/00937_test_use_header_csv.sh @@ -2,6 +2,7 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS csv" diff --git a/tests/queries/0_stateless/00937_test_use_header_tsv.sh b/tests/queries/0_stateless/00937_test_use_header_tsv.sh index a272e70d32b..aa06f2be3b8 100755 --- a/tests/queries/0_stateless/00937_test_use_header_tsv.sh +++ b/tests/queries/0_stateless/00937_test_use_header_tsv.sh @@ -2,6 +2,7 @@ # shellcheck disable=SC2016 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS tsv" diff --git a/tests/queries/0_stateless/00938_fix_rwlock_segfault.sh b/tests/queries/0_stateless/00938_fix_rwlock_segfault.sh index 9df9cce29ce..5c4253e682b 100755 --- a/tests/queries/0_stateless/00938_fix_rwlock_segfault.sh +++ b/tests/queries/0_stateless/00938_fix_rwlock_segfault.sh @@ -3,6 +3,7 @@ # Test fix for issue #5066 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/00938_template_input_format.sh b/tests/queries/0_stateless/00938_template_input_format.sh index fde74ad5512..75616b35af0 100755 --- a/tests/queries/0_stateless/00938_template_input_format.sh +++ b/tests/queries/0_stateless/00938_template_input_format.sh @@ -2,6 +2,7 @@ # shellcheck disable=SC2016,SC2028 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS template1"; diff --git a/tests/queries/0_stateless/00941_system_columns_race_condition.sh b/tests/queries/0_stateless/00941_system_columns_race_condition.sh index 952c1f78c98..0a3fc7f3b3f 100755 --- a/tests/queries/0_stateless/00941_system_columns_race_condition.sh +++ b/tests/queries/0_stateless/00941_system_columns_race_condition.sh @@ -3,6 +3,7 @@ # Test fix for issue #5066 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/00942_dataparts_500.sh b/tests/queries/0_stateless/00942_dataparts_500.sh index 9d6dc06cb75..19cb1138aa8 100755 --- a/tests/queries/0_stateless/00942_dataparts_500.sh +++ b/tests/queries/0_stateless/00942_dataparts_500.sh @@ -3,6 +3,7 @@ # Test fix for issue #5066 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} --include -sS "${CLICKHOUSE_URL_INTERSERVER}?endpoint=DataPartsExchange%3A%2Fclickhouse%2Ftables%2F01-01%2Fvisits%2Freplicas%2Fsome.server.com&part=0&compress=false" 2>&1 | grep -F 'HTTP/1.1 500 Internal Server Error' diff --git a/tests/queries/0_stateless/00942_mutate_index.sh b/tests/queries/0_stateless/00942_mutate_index.sh index df02361af78..4eebdd1147f 100755 --- a/tests/queries/0_stateless/00942_mutate_index.sh +++ b/tests/queries/0_stateless/00942_mutate_index.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" diff --git a/tests/queries/0_stateless/00943_materialize_index.sh b/tests/queries/0_stateless/00943_materialize_index.sh index 92947a76b97..43c9af84672 100755 --- a/tests/queries/0_stateless/00943_materialize_index.sh +++ b/tests/queries/0_stateless/00943_materialize_index.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" diff --git a/tests/queries/0_stateless/00944_clear_index_in_partition.sh b/tests/queries/0_stateless/00944_clear_index_in_partition.sh index 8687e2044f0..1f349cf5946 100755 --- a/tests/queries/0_stateless/00944_clear_index_in_partition.sh +++ b/tests/queries/0_stateless/00944_clear_index_in_partition.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS minmax_idx;" diff --git a/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.sh b/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.sh index 9cdc3da4bac..bb4a04eb15e 100755 --- a/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.sh +++ b/tests/queries/0_stateless/00944_create_bloom_filter_index_with_merge_tree.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/00948_format_in_with_single_element.sh b/tests/queries/0_stateless/00948_format_in_with_single_element.sh index 03a77ed7f49..0edfb3c2d8b 100755 --- a/tests/queries/0_stateless/00948_format_in_with_single_element.sh +++ b/tests/queries/0_stateless/00948_format_in_with_single_element.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/00952_basic_constraints.sh b/tests/queries/0_stateless/00952_basic_constraints.sh index d8d44a9e77d..0a7ba91909c 100755 --- a/tests/queries/0_stateless/00952_basic_constraints.sh +++ b/tests/queries/0_stateless/00952_basic_constraints.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh EXCEPTION_TEXT=violated diff --git a/tests/queries/0_stateless/00952_input_function.sh b/tests/queries/0_stateless/00952_input_function.sh index 4915a469997..54496ba09e0 100755 --- a/tests/queries/0_stateless/00952_input_function.sh +++ b/tests/queries/0_stateless/00952_input_function.sh @@ -3,6 +3,7 @@ set -e CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS input_function_table_1" diff --git a/tests/queries/0_stateless/00953_constraints_operations.sh b/tests/queries/0_stateless/00953_constraints_operations.sh index a7880055244..b4335e1fd41 100755 --- a/tests/queries/0_stateless/00953_constraints_operations.sh +++ b/tests/queries/0_stateless/00953_constraints_operations.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh EXCEPTION_TEXT=violated diff --git a/tests/queries/0_stateless/00953_indices_alter_exceptions.sh b/tests/queries/0_stateless/00953_indices_alter_exceptions.sh index 1fee87f5cde..8d4097c0722 100755 --- a/tests/queries/0_stateless/00953_indices_alter_exceptions.sh +++ b/tests/queries/0_stateless/00953_indices_alter_exceptions.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh EXCEPTION_SUCCESS_TEXT=ok diff --git a/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh index 3caba663df6..bbc2d957937 100755 --- a/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh +++ b/tests/queries/0_stateless/00953_zookeeper_suetin_deduplication_bug.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/00954_client_prepared_statements.sh b/tests/queries/0_stateless/00954_client_prepared_statements.sh index 1bff57c574e..460b0d44943 100755 --- a/tests/queries/0_stateless/00954_client_prepared_statements.sh +++ b/tests/queries/0_stateless/00954_client_prepared_statements.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS ps"; diff --git a/tests/queries/0_stateless/00955_complex_prepared_statements.sh b/tests/queries/0_stateless/00955_complex_prepared_statements.sh index affb73a1ec3..2096272df91 100755 --- a/tests/queries/0_stateless/00955_complex_prepared_statements.sh +++ b/tests/queries/0_stateless/00955_complex_prepared_statements.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh EXCEPTION_TEXT="Code: 457." diff --git a/tests/queries/0_stateless/00955_test_final_mark_use.sh b/tests/queries/0_stateless/00955_test_final_mark_use.sh index d14c9e49814..3c7db3249b3 100755 --- a/tests/queries/0_stateless/00955_test_final_mark_use.sh +++ b/tests/queries/0_stateless/00955_test_final_mark_use.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query=" diff --git a/tests/queries/0_stateless/00956_http_prepared_statements.sh b/tests/queries/0_stateless/00956_http_prepared_statements.sh index 091ad9a2ebc..80c7aa69ae3 100755 --- a/tests/queries/0_stateless/00956_http_prepared_statements.sh +++ b/tests/queries/0_stateless/00956_http_prepared_statements.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -sS "$CLICKHOUSE_URL" -d "DROP TABLE IF EXISTS ps"; diff --git a/tests/queries/0_stateless/00956_sensitive_data_masking.sh b/tests/queries/0_stateless/00956_sensitive_data_masking.sh index c2118be444a..799941e94bf 100755 --- a/tests/queries/0_stateless/00956_sensitive_data_masking.sh +++ b/tests/queries/0_stateless/00956_sensitive_data_masking.sh @@ -4,6 +4,7 @@ export CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL="trace" CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh cur_name=$(basename "${BASH_SOURCE[0]}") diff --git a/tests/queries/0_stateless/00957_format_with_clashed_aliases.sh b/tests/queries/0_stateless/00957_format_with_clashed_aliases.sh index f877289da62..dc6908659a0 100755 --- a/tests/queries/0_stateless/00957_format_with_clashed_aliases.sh +++ b/tests/queries/0_stateless/00957_format_with_clashed_aliases.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/00958_format_of_tuple_array_element.sh b/tests/queries/0_stateless/00958_format_of_tuple_array_element.sh index bcc54d1e2d2..decd44da8a3 100755 --- a/tests/queries/0_stateless/00958_format_of_tuple_array_element.sh +++ b/tests/queries/0_stateless/00958_format_of_tuple_array_element.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/00959_format_with_different_aliases.sh b/tests/queries/0_stateless/00959_format_with_different_aliases.sh index f1aa43ba917..53b994c27e8 100755 --- a/tests/queries/0_stateless/00959_format_with_different_aliases.sh +++ b/tests/queries/0_stateless/00959_format_with_different_aliases.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/00964_bloom_index_string_functions.sh b/tests/queries/0_stateless/00964_bloom_index_string_functions.sh index 80117e2bdec..7697578ea66 100755 --- a/tests/queries/0_stateless/00964_bloom_index_string_functions.sh +++ b/tests/queries/0_stateless/00964_bloom_index_string_functions.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bloom_filter_idx;" diff --git a/tests/queries/0_stateless/00965_logs_level_bugfix.sh b/tests/queries/0_stateless/00965_logs_level_bugfix.sh index 0c618cf1bf0..5666e69656d 100755 --- a/tests/queries/0_stateless/00965_logs_level_bugfix.sh +++ b/tests/queries/0_stateless/00965_logs_level_bugfix.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT_BINARY} --send_logs_level="trace" --query="SELECT 1" 2>&1 | awk '{ print $8 }' | grep "Trace" | head -n 1 diff --git a/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh b/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh index 34dd1e5c083..c1590047ffa 100755 --- a/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh +++ b/tests/queries/0_stateless/00965_send_logs_level_concurrent_queries.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh for _ in {1..10}; do diff --git a/tests/queries/0_stateless/00965_set_index_string_functions.sh b/tests/queries/0_stateless/00965_set_index_string_functions.sh index 811d484d5ba..dba33d9abcf 100755 --- a/tests/queries/0_stateless/00965_set_index_string_functions.sh +++ b/tests/queries/0_stateless/00965_set_index_string_functions.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS set_idx;" diff --git a/tests/queries/0_stateless/00971_query_id_in_logs.sh b/tests/queries/0_stateless/00971_query_id_in_logs.sh index 2f9101a91b6..9e927f36a9c 100755 --- a/tests/queries/0_stateless/00971_query_id_in_logs.sh +++ b/tests/queries/0_stateless/00971_query_id_in_logs.sh @@ -3,6 +3,7 @@ CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=trace CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh index 2c06168e1af..a817acd88a6 100755 --- a/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh +++ b/tests/queries/0_stateless/00974_primary_key_for_lowCardinality.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS lowString;" diff --git a/tests/queries/0_stateless/00974_text_log_table_not_empty.sh b/tests/queries/0_stateless/00974_text_log_table_not_empty.sh index 73e4de4a926..ab1b32ad90e 100755 --- a/tests/queries/0_stateless/00974_text_log_table_not_empty.sh +++ b/tests/queries/0_stateless/00974_text_log_table_not_empty.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query="SELECT 6103" diff --git a/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh index 6e476042731..81c0c563db1 100755 --- a/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh +++ b/tests/queries/0_stateless/00975_indices_mutation_replicated_zookeeper.sh @@ -1,7 +1,9 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./mergetree_mutations.lib . "$CURDIR"/mergetree_mutations.lib $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS indices_mutaions1;" diff --git a/tests/queries/0_stateless/00980_alter_settings_race.sh b/tests/queries/0_stateless/00980_alter_settings_race.sh index 0876d846fc9..004504b1227 100755 --- a/tests/queries/0_stateless/00980_alter_settings_race.sh +++ b/tests/queries/0_stateless/00980_alter_settings_race.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS table_for_concurrent_alter" diff --git a/tests/queries/0_stateless/00981_in_subquery_with_tuple.sh b/tests/queries/0_stateless/00981_in_subquery_with_tuple.sh index 803123d1045..99173062595 100755 --- a/tests/queries/0_stateless/00981_in_subquery_with_tuple.sh +++ b/tests/queries/0_stateless/00981_in_subquery_with_tuple.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS bug"; diff --git a/tests/queries/0_stateless/00984_parser_stack_overflow.sh b/tests/queries/0_stateless/00984_parser_stack_overflow.sh index cf67ab7e2f1..167678db5ec 100755 --- a/tests/queries/0_stateless/00984_parser_stack_overflow.sh +++ b/tests/queries/0_stateless/00984_parser_stack_overflow.sh @@ -3,6 +3,7 @@ CLICKHOUSE_CURL_TIMEOUT=30 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # Too deep recursion diff --git a/tests/queries/0_stateless/00990_hasToken.sh b/tests/queries/0_stateless/00990_hasToken.sh index 4b42a570e99..6a1d4ff5ccf 100755 --- a/tests/queries/0_stateless/00990_hasToken.sh +++ b/tests/queries/0_stateless/00990_hasToken.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # We should have correct env vars from shell_config.sh to run this test diff --git a/tests/queries/0_stateless/00991_system_parts_race_condition.sh b/tests/queries/0_stateless/00991_system_parts_race_condition.sh index 0689641b0ae..55ff4d97149 100755 --- a/tests/queries/0_stateless/00991_system_parts_race_condition.sh +++ b/tests/queries/0_stateless/00991_system_parts_race_condition.sh @@ -5,6 +5,7 @@ # https://github.com/google/sanitizers/issues/950 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh index c3c3a21f96f..613e032f42a 100755 --- a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh +++ b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh index 0c13ad2d573..1731148f71f 100755 --- a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh +++ b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/00995_exception_while_insert.sh b/tests/queries/0_stateless/00995_exception_while_insert.sh index 0c318c727ea..28351078df3 100755 --- a/tests/queries/0_stateless/00995_exception_while_insert.sh +++ b/tests/queries/0_stateless/00995_exception_while_insert.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=none/g') diff --git a/tests/queries/0_stateless/01000_unneeded_substitutions_client.sh b/tests/queries/0_stateless/01000_unneeded_substitutions_client.sh index c4c44e0e963..f20a0d8d3bd 100755 --- a/tests/queries/0_stateless/01000_unneeded_substitutions_client.sh +++ b/tests/queries/0_stateless/01000_unneeded_substitutions_client.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "SELECT '\${}'" diff --git a/tests/queries/0_stateless/01001_rename_merge_race_condition.sh b/tests/queries/0_stateless/01001_rename_merge_race_condition.sh index 8b2cb026187..5aeea34e7c1 100755 --- a/tests/queries/0_stateless/01001_rename_merge_race_condition.sh +++ b/tests/queries/0_stateless/01001_rename_merge_race_condition.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.sh b/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.sh index f09d74adb3d..d2766a14c24 100755 --- a/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.sh +++ b/tests/queries/0_stateless/01002_alter_nullable_adaptive_granularity_long.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01003_kill_query_race_condition.sh b/tests/queries/0_stateless/01003_kill_query_race_condition.sh index ec7b87c112b..2d21aabf91a 100755 --- a/tests/queries/0_stateless/01003_kill_query_race_condition.sh +++ b/tests/queries/0_stateless/01003_kill_query_race_condition.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01004_rename_deadlock.sh b/tests/queries/0_stateless/01004_rename_deadlock.sh index ebb6cc42792..aa9e6f8a5bc 100755 --- a/tests/queries/0_stateless/01004_rename_deadlock.sh +++ b/tests/queries/0_stateless/01004_rename_deadlock.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01005_rwr_shard_deadlock.sh b/tests/queries/0_stateless/01005_rwr_shard_deadlock.sh index 4a6a6e7eb1c..5c2ca22715d 100755 --- a/tests/queries/0_stateless/01005_rwr_shard_deadlock.sh +++ b/tests/queries/0_stateless/01005_rwr_shard_deadlock.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.sh b/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.sh index 5473185af3d..fbd287478b6 100755 --- a/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.sh +++ b/tests/queries/0_stateless/01006_simpod_empty_part_single_column_write.sh @@ -1,8 +1,10 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +# shellcheck source=./mergetree_mutations.lib . "$CURDIR"/mergetree_mutations.lib diff --git a/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh b/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh index ae3c9e62fdd..8773a180822 100755 --- a/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh +++ b/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01010_low_cardinality_and_native_http.sh b/tests/queries/0_stateless/01010_low_cardinality_and_native_http.sh index 3a933e1fb21..1bf34445dbd 100755 --- a/tests/queries/0_stateless/01010_low_cardinality_and_native_http.sh +++ b/tests/queries/0_stateless/01010_low_cardinality_and_native_http.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh index 078b49da940..724caa7f414 100755 --- a/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh +++ b/tests/queries/0_stateless/01013_sync_replica_timeout_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01014_format_custom_separated.sh b/tests/queries/0_stateless/01014_format_custom_separated.sh index 4f98d55a078..42599bcc944 100755 --- a/tests/queries/0_stateless/01014_format_custom_separated.sh +++ b/tests/queries/0_stateless/01014_format_custom_separated.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS custom_separated" diff --git a/tests/queries/0_stateless/01014_lazy_database_basic.sh b/tests/queries/0_stateless/01014_lazy_database_basic.sh index 2712e3d05bb..11d698e764e 100755 --- a/tests/queries/0_stateless/01014_lazy_database_basic.sh +++ b/tests/queries/0_stateless/01014_lazy_database_basic.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} -n -q "DROP DATABASE IF EXISTS testlazy" diff --git a/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh index ef001b0988a..2003effb71b 100755 --- a/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh +++ b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh export CURR_DATABASE="test_lazy_01014_concurrent_${CLICKHOUSE_DATABASE}" diff --git a/tests/queries/0_stateless/01015_insert_values_parametrized.sh b/tests/queries/0_stateless/01015_insert_values_parametrized.sh index 896ad97156a..bdc4fe01698 100755 --- a/tests/queries/0_stateless/01015_insert_values_parametrized.sh +++ b/tests/queries/0_stateless/01015_insert_values_parametrized.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS insert_values_parametrized"; diff --git a/tests/queries/0_stateless/01016_input_null_as_default.sh b/tests/queries/0_stateless/01016_input_null_as_default.sh index f31e6591e97..137e25b6a12 100755 --- a/tests/queries/0_stateless/01016_input_null_as_default.sh +++ b/tests/queries/0_stateless/01016_input_null_as_default.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS null_as_default"; diff --git a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh index 992f5614772..d7d0dab71b9 100755 --- a/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh +++ b/tests/queries/0_stateless/01017_mutations_with_nondeterministic_functions_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01017_tsv_empty_as_default.sh b/tests/queries/0_stateless/01017_tsv_empty_as_default.sh index a0404b89801..84faf7600f5 100755 --- a/tests/queries/0_stateless/01017_tsv_empty_as_default.sh +++ b/tests/queries/0_stateless/01017_tsv_empty_as_default.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS empty_as_default"; diff --git a/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.sh b/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.sh index 33cde0d5961..37e9fd774d7 100755 --- a/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.sh +++ b/tests/queries/0_stateless/01018_ddl_dictionaries_bad_queries.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh b/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh index 6e47787b33a..bc13e44934a 100755 --- a/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh +++ b/tests/queries/0_stateless/01018_ddl_dictionaries_concurrent_requrests.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.sh b/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.sh index ced0f269327..5733bbfc6d2 100755 --- a/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.sh +++ b/tests/queries/0_stateless/01018_insert_multiple_blocks_with_defaults.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS defaults" diff --git a/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh index 46f1e82f54c..54a7e940377 100755 --- a/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh +++ b/tests/queries/0_stateless/01019_alter_materialized_view_atomic.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --multiquery <&1 | grep -oP "Maximum parse depth .* exceeded." diff --git a/tests/queries/0_stateless/01071_http_header_exception_code.sh b/tests/queries/0_stateless/01071_http_header_exception_code.sh index 9e882bcf542..22444f46cbc 100755 --- a/tests/queries/0_stateless/01071_http_header_exception_code.sh +++ b/tests/queries/0_stateless/01071_http_header_exception_code.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh if [[ $(${CLICKHOUSE_CURL_COMMAND} -q -I "${CLICKHOUSE_URL}&query=BADREQUEST" 2>&1 | grep -c 'X-ClickHouse-Exception-Code: 62') -eq 1 ]]; then diff --git a/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh b/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh index 4f71ef383c7..2562e701a25 100755 --- a/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh +++ b/tests/queries/0_stateless/01076_cache_dictionary_datarace_exception_ptr.sh @@ -3,6 +3,7 @@ # This is a monkey test used to trigger sanitizers. CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="CREATE DATABASE IF NOT EXISTS dictdb_01076; " diff --git a/tests/queries/0_stateless/01076_json_each_row_array.sh b/tests/queries/0_stateless/01076_json_each_row_array.sh index 32327e57b5e..cbbaafacace 100755 --- a/tests/queries/0_stateless/01076_json_each_row_array.sh +++ b/tests/queries/0_stateless/01076_json_each_row_array.sh @@ -3,6 +3,7 @@ set -e CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh ${CLICKHOUSE_LOCAL} --query "SELECT '[' || arrayStringConcat(arrayMap(x -> '{\"id\": 1, \"name\": \"name1\"}', range(1000000)), ',') || ']'" | ${CLICKHOUSE_LOCAL} --query "SELECT count() FROM table" --input-format JSONEachRow --structure 'id UInt32, name String' diff --git a/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh b/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh index c9e5d7b9447..ca453ee8f0d 100755 --- a/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh +++ b/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh @@ -10,6 +10,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh REPLICAS=5 diff --git a/tests/queries/0_stateless/01077_mutations_index_consistency.sh b/tests/queries/0_stateless/01077_mutations_index_consistency.sh index aa77906a8ad..129ef0b161c 100755 --- a/tests/queries/0_stateless/01077_mutations_index_consistency.sh +++ b/tests/queries/0_stateless/01077_mutations_index_consistency.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh b/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh index 98aa6d6e6e2..1c0206453b7 100755 --- a/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh +++ b/tests/queries/0_stateless/01079_bad_alters_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_bad_alters"; diff --git a/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh index d6025652343..b3a5de8f9bc 100755 --- a/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh +++ b/tests/queries/0_stateless/01079_parallel_alter_add_drop_column_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh REPLICAS=3 diff --git a/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh index 365bc2ae444..d5f0c987e5d 100755 --- a/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh +++ b/tests/queries/0_stateless/01079_parallel_alter_detach_table_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh REPLICAS=3 diff --git a/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh index 777c694e5bd..5b14c5a8543 100755 --- a/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh +++ b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh REPLICAS=5 diff --git a/tests/queries/0_stateless/01085_max_distributed_connections.sh b/tests/queries/0_stateless/01085_max_distributed_connections.sh index 7b561604640..c63c671f7fc 100755 --- a/tests/queries/0_stateless/01085_max_distributed_connections.sh +++ b/tests/queries/0_stateless/01085_max_distributed_connections.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh opts=( diff --git a/tests/queries/0_stateless/01085_max_distributed_connections_http.sh b/tests/queries/0_stateless/01085_max_distributed_connections_http.sh index 23d609cfea6..d7eb75e717a 100755 --- a/tests/queries/0_stateless/01085_max_distributed_connections_http.sh +++ b/tests/queries/0_stateless/01085_max_distributed_connections_http.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh query="SELECT sleepEachRow(1) FROM remote('127.{2,3}', system.one)" diff --git a/tests/queries/0_stateless/01085_regexp_input_format.sh b/tests/queries/0_stateless/01085_regexp_input_format.sh index 4359cd5762d..5736d031c08 100755 --- a/tests/queries/0_stateless/01085_regexp_input_format.sh +++ b/tests/queries/0_stateless/01085_regexp_input_format.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS regexp"; diff --git a/tests/queries/0_stateless/01086_odbc_roundtrip.sh b/tests/queries/0_stateless/01086_odbc_roundtrip.sh index 1ce3e656f42..3c9e5c8aba9 100755 --- a/tests/queries/0_stateless/01086_odbc_roundtrip.sh +++ b/tests/queries/0_stateless/01086_odbc_roundtrip.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh b/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh index 58c8c5275b8..c96aed7d3ee 100755 --- a/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh +++ b/tests/queries/0_stateless/01086_regexp_input_format_skip_unmatched.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS regexp"; diff --git a/tests/queries/0_stateless/01088_benchmark_query_id.sh b/tests/queries/0_stateless/01088_benchmark_query_id.sh index 81cfa4dad84..cc3531282dd 100755 --- a/tests/queries/0_stateless/01088_benchmark_query_id.sh +++ b/tests/queries/0_stateless/01088_benchmark_query_id.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh QUERY_ID=$RANDOM diff --git a/tests/queries/0_stateless/01098_msgpack_format.sh b/tests/queries/0_stateless/01098_msgpack_format.sh index 8be55e99213..c7a1a0cff42 100755 --- a/tests/queries/0_stateless/01098_msgpack_format.sh +++ b/tests/queries/0_stateless/01098_msgpack_format.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS msgpack"; diff --git a/tests/queries/0_stateless/01098_temporary_and_external_tables.sh b/tests/queries/0_stateless/01098_temporary_and_external_tables.sh index 54834107728..bdac3c6fae3 100755 --- a/tests/queries/0_stateless/01098_temporary_and_external_tables.sh +++ b/tests/queries/0_stateless/01098_temporary_and_external_tables.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh url_without_session="https://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTPS}/?" diff --git a/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh b/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh index 30319e7cfea..1039f8f7d97 100755 --- a/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh +++ b/tests/queries/0_stateless/01103_check_cpu_instructions_at_startup.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # If we run sanitized binary under qemu, it will try to slowly allocate 20 TiB until OOM. diff --git a/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh b/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh index c48665b6093..287a63f858b 100755 --- a/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh +++ b/tests/queries/0_stateless/01103_optimize_drop_race_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh b/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh index 173bf44e1f1..227c4df6785 100755 --- a/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh +++ b/tests/queries/0_stateless/01107_atomic_db_detach_attach.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP DATABASE IF EXISTS test_01107" diff --git a/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.sh b/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.sh index 05d4b060a8a..36a8c507605 100755 --- a/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.sh +++ b/tests/queries/0_stateless/01107_tuples_arrays_parsing_exceptions.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "SELECT (1, 2 2)" 2>&1 | grep -o "Syntax error" diff --git a/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock_zookeeper.sh b/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock_zookeeper.sh index 39240ea9f8d..b0cba465c3d 100755 --- a/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock_zookeeper.sh +++ b/tests/queries/0_stateless/01108_restart_replicas_rename_deadlock_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh for i in $(seq 4); do diff --git a/tests/queries/0_stateless/01114_database_atomic.sh b/tests/queries/0_stateless/01114_database_atomic.sh index 2a3a171b724..55288e1e3f6 100755 --- a/tests/queries/0_stateless/01114_database_atomic.sh +++ b/tests/queries/0_stateless/01114_database_atomic.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01146_clickhouse_local_data.sh b/tests/queries/0_stateless/01146_clickhouse_local_data.sh index 2c98b06942b..7268f051b8b 100755 --- a/tests/queries/0_stateless/01146_clickhouse_local_data.sh +++ b/tests/queries/0_stateless/01146_clickhouse_local_data.sh @@ -2,6 +2,7 @@ set -e CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh ${CLICKHOUSE_LOCAL} --query "create table test engine Log as select 1 a" diff --git a/tests/queries/0_stateless/01150_ddl_guard_rwr.sh b/tests/queries/0_stateless/01150_ddl_guard_rwr.sh index 43804075938..6355f790e23 100755 --- a/tests/queries/0_stateless/01150_ddl_guard_rwr.sh +++ b/tests/queries/0_stateless/01150_ddl_guard_rwr.sh @@ -3,6 +3,7 @@ CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query "DROP DATABASE IF EXISTS test_01150" diff --git a/tests/queries/0_stateless/01187_set_profile_as_setting.sh b/tests/queries/0_stateless/01187_set_profile_as_setting.sh index 8247ab8870a..db9d095fe92 100755 --- a/tests/queries/0_stateless/01187_set_profile_as_setting.sh +++ b/tests/queries/0_stateless/01187_set_profile_as_setting.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -n -m -q "select value, changed from system.settings where name='readonly';" diff --git a/tests/queries/0_stateless/01192_rename_database_zookeeper.sh b/tests/queries/0_stateless/01192_rename_database_zookeeper.sh index bb84cab8977..90b9baf4ebf 100755 --- a/tests/queries/0_stateless/01192_rename_database_zookeeper.sh +++ b/tests/queries/0_stateless/01192_rename_database_zookeeper.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. $CURDIR/../shell_config.sh +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh # 1. init diff --git a/tests/queries/0_stateless/01193_metadata_loading.sh b/tests/queries/0_stateless/01193_metadata_loading.sh index 0ee583a7265..2e28c2c0165 100755 --- a/tests/queries/0_stateless/01193_metadata_loading.sh +++ b/tests/queries/0_stateless/01193_metadata_loading.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # it is the worst way of making performance test, nevertheless it can detect significant slowdown and some other issues, that usually found by stress test diff --git a/tests/queries/0_stateless/01194_http_query_id.sh b/tests/queries/0_stateless/01194_http_query_id.sh index 5eb766bbe2a..b1ab9eed9db 100755 --- a/tests/queries/0_stateless/01194_http_query_id.sh +++ b/tests/queries/0_stateless/01194_http_query_id.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh url="http://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}/?session_id=test_01194" diff --git a/tests/queries/0_stateless/01195_formats_diagnostic_info.sh b/tests/queries/0_stateless/01195_formats_diagnostic_info.sh index e3bd8f901e7..6c64b17f719 100755 --- a/tests/queries/0_stateless/01195_formats_diagnostic_info.sh +++ b/tests/queries/0_stateless/01195_formats_diagnostic_info.sh @@ -2,6 +2,7 @@ # shellcheck disable=SC2206 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh PARSER=(${CLICKHOUSE_LOCAL} --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format CSV) @@ -21,7 +22,7 @@ echo '2020-04-21 12:34:56, "Hello", 12345678,1' | "${PARSER[@]}" 2>&1| grep "ERR echo '2020-04-21 12:34:56,,123Hello' | "${PARSER[@]}" 2>&1| grep "ERROR" echo -e '2020-04-21 12:34:56, "Hello", 12345678\n\n\n\n ' | "${PARSER[@]}" 2>&1| grep "ERROR" || echo "OK" -PARSER=(${CLICKHOUSE_LOCAL} --input_format_null_as_default=0 --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format TSV) +PARSER=(${CLICKHOUSE_LOCAL} --input_format_null_as_default 0 --query 'SELECT t, s, d FROM table' --structure 't DateTime, s String, d Decimal64(10)' --input-format TSV) echo -e '2020-04-21 12:34:56\tHello\t12345678' | "${PARSER[@]}" 2>&1| grep "ERROR" || echo -e "\nTSV" echo -e '2020-04-21 12:34:56\tHello\t123456789' | "${PARSER[@]}" 2>&1| grep "ERROR" echo -e '2020-04-21 12:34:567\tHello\t123456789' | "${PARSER[@]}" 2>&1| grep "ERROR" diff --git a/tests/queries/0_stateless/01196_max_parser_depth.sh b/tests/queries/0_stateless/01196_max_parser_depth.sh index ecbb741121f..ae4851bf0c3 100755 --- a/tests/queries/0_stateless/01196_max_parser_depth.sh +++ b/tests/queries/0_stateless/01196_max_parser_depth.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh { printf "select "; for _ in {1..1000}; do printf "coalesce(null, "; done; printf "1"; for _ in {1..1000}; do printf ")"; done; } > "${CLICKHOUSE_TMP}"/query diff --git a/tests/queries/0_stateless/01198_client_quota_key.sh b/tests/queries/0_stateless/01198_client_quota_key.sh index 698d5b5841f..1a08b33e336 100755 --- a/tests/queries/0_stateless/01198_client_quota_key.sh +++ b/tests/queries/0_stateless/01198_client_quota_key.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --quota_key Hello --query_id test_quota_key --log_queries 1 --multiquery --query "SELECT 1; SYSTEM FLUSH LOGS; SELECT DISTINCT quota_key FROM system.query_log WHERE event_date >= yesterday() AND event_time >= now() - 300 AND query_id = 'test_quota_key'" diff --git a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh index ae0c8324468..5ab0e800d39 100755 --- a/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh +++ b/tests/queries/0_stateless/01213_alter_rename_column_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_rename_replicated" diff --git a/tests/queries/0_stateless/01232_json_as_string_format.sh b/tests/queries/0_stateless/01232_json_as_string_format.sh index dd988052edf..ed8c5d37cac 100755 --- a/tests/queries/0_stateless/01232_json_as_string_format.sh +++ b/tests/queries/0_stateless/01232_json_as_string_format.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS json_as_string"; diff --git a/tests/queries/0_stateless/01232_preparing_sets_race_condition.sh b/tests/queries/0_stateless/01232_preparing_sets_race_condition.sh index cc490a1fcd2..e42e68a6589 100755 --- a/tests/queries/0_stateless/01232_preparing_sets_race_condition.sh +++ b/tests/queries/0_stateless/01232_preparing_sets_race_condition.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -o errexit diff --git a/tests/queries/0_stateless/01238_http_memory_tracking.sh b/tests/queries/0_stateless/01238_http_memory_tracking.sh index b317a6c109b..90a7611c7c7 100755 --- a/tests/queries/0_stateless/01238_http_memory_tracking.sh +++ b/tests/queries/0_stateless/01238_http_memory_tracking.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -o pipefail diff --git a/tests/queries/0_stateless/01249_flush_interactive.sh b/tests/queries/0_stateless/01249_flush_interactive.sh index 6049c70b9df..2af75dbcbe5 100755 --- a/tests/queries/0_stateless/01249_flush_interactive.sh +++ b/tests/queries/0_stateless/01249_flush_interactive.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # A question for curious reader: diff --git a/tests/queries/0_stateless/01258_bom_tsv.sh b/tests/queries/0_stateless/01258_bom_tsv.sh index 085214f4b9d..576f16a246e 100755 --- a/tests/queries/0_stateless/01258_bom_tsv.sh +++ b/tests/queries/0_stateless/01258_bom_tsv.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # BOM can be parsed if TSV format has first column that cannot contain arbitrary binary data (such as integer) diff --git a/tests/queries/0_stateless/01268_procfs_metrics.sh b/tests/queries/0_stateless/01268_procfs_metrics.sh index 0ff3b4518d0..cad9b786667 100755 --- a/tests/queries/0_stateless/01268_procfs_metrics.sh +++ b/tests/queries/0_stateless/01268_procfs_metrics.sh @@ -5,6 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh function read_numbers_func() diff --git a/tests/queries/0_stateless/01271_http_code_parse_error.sh b/tests/queries/0_stateless/01271_http_code_parse_error.sh index 887a819edc9..5cc6f83b7be 100755 --- a/tests/queries/0_stateless/01271_http_code_parse_error.sh +++ b/tests/queries/0_stateless/01271_http_code_parse_error.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test" diff --git a/tests/queries/0_stateless/01273_arrow.sh b/tests/queries/0_stateless/01273_arrow.sh index 6d62b2b4e4a..ad8a6f0fdb9 100755 --- a/tests/queries/0_stateless/01273_arrow.sh +++ b/tests/queries/0_stateless/01273_arrow.sh @@ -3,6 +3,7 @@ set -e CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01273_arrow_load.sh b/tests/queries/0_stateless/01273_arrow_load.sh index 7f4b88ec8e2..b2ca0e32af1 100755 --- a/tests/queries/0_stateless/01273_arrow_load.sh +++ b/tests/queries/0_stateless/01273_arrow_load.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh CB_DIR=$(dirname "$CLICKHOUSE_CLIENT_BINARY") diff --git a/tests/queries/0_stateless/01273_arrow_stream.sh b/tests/queries/0_stateless/01273_arrow_stream.sh index 3ad89b636e3..af5931a4bce 100755 --- a/tests/queries/0_stateless/01273_arrow_stream.sh +++ b/tests/queries/0_stateless/01273_arrow_stream.sh @@ -3,6 +3,7 @@ set -e CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01274_generate_random_nested.sh b/tests/queries/0_stateless/01274_generate_random_nested.sh index 966e1f8da28..a4add05e007 100755 --- a/tests/queries/0_stateless/01274_generate_random_nested.sh +++ b/tests/queries/0_stateless/01274_generate_random_nested.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -sS "$CLICKHOUSE_URL" -d "SELECT * FROM generateRandom('\"ParsedParams.Key1\" Array(String), \"ParsedParams.Key2\" Array(Float64), x String', 1, 10, 2) LIMIT 10" > /dev/null; diff --git a/tests/queries/0_stateless/01278_format_multiple_queries.sh b/tests/queries/0_stateless/01278_format_multiple_queries.sh index 8c06b285b6a..55dca42eaef 100755 --- a/tests/queries/0_stateless/01278_format_multiple_queries.sh +++ b/tests/queries/0_stateless/01278_format_multiple_queries.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh index f933283cddf..80af1b2c17f 100755 --- a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh +++ b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # just in case diff --git a/tests/queries/0_stateless/01279_empty_external_table.sh b/tests/queries/0_stateless/01279_empty_external_table.sh index 16a6b5293a9..43dbcf296e6 100755 --- a/tests/queries/0_stateless/01279_empty_external_table.sh +++ b/tests/queries/0_stateless/01279_empty_external_table.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01280_ttl_where_group_by.sh b/tests/queries/0_stateless/01280_ttl_where_group_by.sh index 1d0d627e5bb..5ca79951a46 100755 --- a/tests/queries/0_stateless/01280_ttl_where_group_by.sh +++ b/tests/queries/0_stateless/01280_ttl_where_group_by.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_1" diff --git a/tests/queries/0_stateless/01281_group_by_limit_memory_tracking.sh b/tests/queries/0_stateless/01281_group_by_limit_memory_tracking.sh index 5333d0b4b0b..285e2ab8dad 100755 --- a/tests/queries/0_stateless/01281_group_by_limit_memory_tracking.sh +++ b/tests/queries/0_stateless/01281_group_by_limit_memory_tracking.sh @@ -9,6 +9,7 @@ # - one users' query in background (to avoid reseting max_memory_usage_for_user) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -o pipefail @@ -29,8 +30,8 @@ function execute_group_by() # max_memory_usage_for_user is installed to 0 once there are no more # queries for user. local opts=( - --max_memory_usage_for_user=$((150<<20)) - --max_threads=2 + "--max_memory_usage_for_user="$((150<<20)) + "--max_threads=2" ) execute_null "${opts[@]}" <<<'SELECT uniq(number) FROM numbers_mt(toUInt64(1e6)) GROUP BY number % 5e5' } diff --git a/tests/queries/0_stateless/01285_engine_join_donmikel.sh b/tests/queries/0_stateless/01285_engine_join_donmikel.sh index 2d5c769cf8d..7522ed9924b 100755 --- a/tests/queries/0_stateless/01285_engine_join_donmikel.sh +++ b/tests/queries/0_stateless/01285_engine_join_donmikel.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --multiquery --query " diff --git a/tests/queries/0_stateless/01293_client_interactive_vertical_multiline.sh b/tests/queries/0_stateless/01293_client_interactive_vertical_multiline.sh index 8f4885b29c0..560d9472504 100755 --- a/tests/queries/0_stateless/01293_client_interactive_vertical_multiline.sh +++ b/tests/queries/0_stateless/01293_client_interactive_vertical_multiline.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CURDIR}/01293_client_interactive_vertical_multiline.expect diff --git a/tests/queries/0_stateless/01293_client_interactive_vertical_singleline.sh b/tests/queries/0_stateless/01293_client_interactive_vertical_singleline.sh index 873523e1cac..d1c513e0f83 100755 --- a/tests/queries/0_stateless/01293_client_interactive_vertical_singleline.sh +++ b/tests/queries/0_stateless/01293_client_interactive_vertical_singleline.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CURDIR}/01293_client_interactive_vertical_singleline.expect diff --git a/tests/queries/0_stateless/01293_optimize_final_force.sh b/tests/queries/0_stateless/01293_optimize_final_force.sh index a03bc6e311d..60d45f87385 100755 --- a/tests/queries/0_stateless/01293_optimize_final_force.sh +++ b/tests/queries/0_stateless/01293_optimize_final_force.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh for _ in {1..100}; do $CLICKHOUSE_CLIENT --multiquery --query " diff --git a/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables.sh b/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables.sh index d43a9361d71..d8f72c7837d 100755 --- a/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables.sh +++ b/tests/queries/0_stateless/01294_lazy_database_concurrent_recreate_reattach_and_show_tables.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh export CURR_DATABASE="test_lazy_01294_concurrent_${CLICKHOUSE_DATABASE}" diff --git a/tests/queries/0_stateless/01300_client_save_history_when_terminated.sh b/tests/queries/0_stateless/01300_client_save_history_when_terminated.sh index 2f7c16259e3..8e0f15e57f0 100755 --- a/tests/queries/0_stateless/01300_client_save_history_when_terminated.sh +++ b/tests/queries/0_stateless/01300_client_save_history_when_terminated.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CURDIR}/01300_client_save_history_when_terminated.expect diff --git a/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh b/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh index 25dc43f9e62..65bf5de8333 100755 --- a/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh +++ b/tests/queries/0_stateless/01301_aggregate_state_exception_memory_leak.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh function test() diff --git a/tests/queries/0_stateless/01302_aggregate_state_exception_memory_leak.sh b/tests/queries/0_stateless/01302_aggregate_state_exception_memory_leak.sh index 0afeeef6363..ed8a463cc62 100755 --- a/tests/queries/0_stateless/01302_aggregate_state_exception_memory_leak.sh +++ b/tests/queries/0_stateless/01302_aggregate_state_exception_memory_leak.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh function test() diff --git a/tests/queries/0_stateless/01304_direct_io.sh b/tests/queries/0_stateless/01304_direct_io.sh index 0126a9525ed..3ba3d020d99 100755 --- a/tests/queries/0_stateless/01304_direct_io.sh +++ b/tests/queries/0_stateless/01304_direct_io.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --multiquery --query " diff --git a/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh b/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh index e3e702b7b1f..5dd3d2b38d6 100755 --- a/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh +++ b/tests/queries/0_stateless/01305_replica_create_drop_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01306_benchmark_json.sh b/tests/queries/0_stateless/01306_benchmark_json.sh index 51021ee8361..fed085abd96 100755 --- a/tests/queries/0_stateless/01306_benchmark_json.sh +++ b/tests/queries/0_stateless/01306_benchmark_json.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh b/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh index 3b29922b8fc..24c6199a94a 100755 --- a/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh +++ b/tests/queries/0_stateless/01307_multiple_leaders_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01307_orc_output_format.sh b/tests/queries/0_stateless/01307_orc_output_format.sh index 9293ff6ad64..26c7db5ad1b 100755 --- a/tests/queries/0_stateless/01307_orc_output_format.sh +++ b/tests/queries/0_stateless/01307_orc_output_format.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS orc"; diff --git a/tests/queries/0_stateless/01308_orc_output_format_arrays.sh b/tests/queries/0_stateless/01308_orc_output_format_arrays.sh index 66dc04d5a3b..f24f62673ac 100755 --- a/tests/queries/0_stateless/01308_orc_output_format_arrays.sh +++ b/tests/queries/0_stateless/01308_orc_output_format_arrays.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS orc"; diff --git a/tests/queries/0_stateless/01312_skip_empty_params.sh b/tests/queries/0_stateless/01312_skip_empty_params.sh index 1b9333aab33..5920376db86 100755 --- a/tests/queries/0_stateless/01312_skip_empty_params.sh +++ b/tests/queries/0_stateless/01312_skip_empty_params.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01316_create_user_syntax_hilite.sh b/tests/queries/0_stateless/01316_create_user_syntax_hilite.sh index a7b1b0abb00..bf79b7cb7ab 100755 --- a/tests/queries/0_stateless/01316_create_user_syntax_hilite.sh +++ b/tests/queries/0_stateless/01316_create_user_syntax_hilite.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01317_no_password_in_command_line.sh b/tests/queries/0_stateless/01317_no_password_in_command_line.sh index 07ac2b88832..c9886aca31e 100755 --- a/tests/queries/0_stateless/01317_no_password_in_command_line.sh +++ b/tests/queries/0_stateless/01317_no_password_in_command_line.sh @@ -2,6 +2,7 @@ # shellcheck disable=SC2009 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01318_alter_add_constraint_format.sh b/tests/queries/0_stateless/01318_alter_add_constraint_format.sh index 7494563564b..931f2051274 100755 --- a/tests/queries/0_stateless/01318_alter_add_constraint_format.sh +++ b/tests/queries/0_stateless/01318_alter_add_constraint_format.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01318_long_unsuccessful_mutation_zookeeper.sh b/tests/queries/0_stateless/01318_long_unsuccessful_mutation_zookeeper.sh index 95a8bcb5bb3..ced668e9849 100755 --- a/tests/queries/0_stateless/01318_long_unsuccessful_mutation_zookeeper.sh +++ b/tests/queries/0_stateless/01318_long_unsuccessful_mutation_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS mutation_table" diff --git a/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh b/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh index cc6a66bd6bc..a15d8c8d2cd 100755 --- a/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh +++ b/tests/queries/0_stateless/01320_create_sync_race_condition_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -e diff --git a/tests/queries/0_stateless/01338_long_select_and_alter.sh b/tests/queries/0_stateless/01338_long_select_and_alter.sh index c4ef1cb7f39..499b62564bd 100755 --- a/tests/queries/0_stateless/01338_long_select_and_alter.sh +++ b/tests/queries/0_stateless/01338_long_select_and_alter.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS alter_mt" diff --git a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh index 6a852357fa6..d990a8a1c08 100755 --- a/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh +++ b/tests/queries/0_stateless/01338_long_select_and_alter_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS alter_mt" diff --git a/tests/queries/0_stateless/01339_client_unrecognized_option.sh b/tests/queries/0_stateless/01339_client_unrecognized_option.sh index 13c286cd032..f590ed3807e 100755 --- a/tests/queries/0_stateless/01339_client_unrecognized_option.sh +++ b/tests/queries/0_stateless/01339_client_unrecognized_option.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT xyzgarbage 2>&1 | grep -q "Code: 552" && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/01342_query_parameters_alias.sh b/tests/queries/0_stateless/01342_query_parameters_alias.sh index c17425ec7d7..11fbe37dabb 100755 --- a/tests/queries/0_stateless/01342_query_parameters_alias.sh +++ b/tests/queries/0_stateless/01342_query_parameters_alias.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --param_x '\N' --query 'SELECT {x:Nullable(Nothing)} as a' --format TSVWithNamesAndTypes diff --git a/tests/queries/0_stateless/01355_CSV_input_format_allow_errors.sh b/tests/queries/0_stateless/01355_CSV_input_format_allow_errors.sh index 7ae77eb9f0c..a7cc9739f71 100755 --- a/tests/queries/0_stateless/01355_CSV_input_format_allow_errors.sh +++ b/tests/queries/0_stateless/01355_CSV_input_format_allow_errors.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh SAMPLE_FILE="$CURDIR/01355_sample_data.csv" diff --git a/tests/queries/0_stateless/01358_lc_parquet.sh b/tests/queries/0_stateless/01358_lc_parquet.sh index 8732cc4eefd..3c49adc6185 100755 --- a/tests/queries/0_stateless/01358_lc_parquet.sh +++ b/tests/queries/0_stateless/01358_lc_parquet.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "drop table if exists test_lc" diff --git a/tests/queries/0_stateless/01361_fover_remote_num_tries.sh b/tests/queries/0_stateless/01361_fover_remote_num_tries.sh index 4dcf5e69415..5e49b393c7a 100755 --- a/tests/queries/0_stateless/01361_fover_remote_num_tries.sh +++ b/tests/queries/0_stateless/01361_fover_remote_num_tries.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --connections_with_failover_max_tries 10 --query "SELECT hostName() FROM remote('128.1.2.3', default.tmp)" 2>&1 | grep -o -P 'connect timed out|Network is unreachable' | wc -l diff --git a/tests/queries/0_stateless/01370_client_autocomplete_word_break_characters.sh b/tests/queries/0_stateless/01370_client_autocomplete_word_break_characters.sh index 419d6d58e85..aa7c9a94eb0 100755 --- a/tests/queries/0_stateless/01370_client_autocomplete_word_break_characters.sh +++ b/tests/queries/0_stateless/01370_client_autocomplete_word_break_characters.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CURDIR}/01370_client_autocomplete_word_break_characters.expect diff --git a/tests/queries/0_stateless/01375_output_format_tsv_csv_with_names.sh b/tests/queries/0_stateless/01375_output_format_tsv_csv_with_names.sh index de4486a88a5..ad9cc2c53a8 100755 --- a/tests/queries/0_stateless/01375_output_format_tsv_csv_with_names.sh +++ b/tests/queries/0_stateless/01375_output_format_tsv_csv_with_names.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh opts=( diff --git a/tests/queries/0_stateless/01375_storage_file_tsv_csv_with_names_write_prefix.sh b/tests/queries/0_stateless/01375_storage_file_tsv_csv_with_names_write_prefix.sh index d396981f873..469f7e7008b 100755 --- a/tests/queries/0_stateless/01375_storage_file_tsv_csv_with_names_write_prefix.sh +++ b/tests/queries/0_stateless/01375_storage_file_tsv_csv_with_names_write_prefix.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # zero rows diff --git a/tests/queries/0_stateless/01383_log_broken_table.sh b/tests/queries/0_stateless/01383_log_broken_table.sh index 80efa7e3908..37cd6e239e5 100755 --- a/tests/queries/0_stateless/01383_log_broken_table.sh +++ b/tests/queries/0_stateless/01383_log_broken_table.sh @@ -2,6 +2,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01393_benchmark_secure_port.sh b/tests/queries/0_stateless/01393_benchmark_secure_port.sh index 4ec220efa2e..6928041da14 100755 --- a/tests/queries/0_stateless/01393_benchmark_secure_port.sh +++ b/tests/queries/0_stateless/01393_benchmark_secure_port.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_BENCHMARK --secure -i 100 <<< 'SELECT 1' 2>&1 | grep -F 'Queries executed' | tail -n1 diff --git a/tests/queries/0_stateless/01395_limit_more_cases.sh b/tests/queries/0_stateless/01395_limit_more_cases.sh index 61c0b6ee6e0..32c854e53fb 100755 --- a/tests/queries/0_stateless/01395_limit_more_cases.sh +++ b/tests/queries/0_stateless/01395_limit_more_cases.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh SIZE=13 diff --git a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh index 4d68a8a722e..30b2b665658 100755 --- a/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh +++ b/tests/queries/0_stateless/01396_inactive_replica_cleanup_nodes_zookeeper.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. $CURDIR/../shell_config.sh +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh # Check that if we have one inactive replica and a huge number of INSERTs to active replicas, # the number of nodes in ZooKeeper does not grow unbounded. diff --git a/tests/queries/0_stateless/01399_http_request_headers.sh b/tests/queries/0_stateless/01399_http_request_headers.sh index d7f1c8df608..9b07f018230 100755 --- a/tests/queries/0_stateless/01399_http_request_headers.sh +++ b/tests/queries/0_stateless/01399_http_request_headers.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}" -H 'X-ClickHouse-User: default' -d 'SELECT 1' diff --git a/tests/queries/0_stateless/01401_FORMAT_SETTINGS.sh b/tests/queries/0_stateless/01401_FORMAT_SETTINGS.sh index aa23fcfe7aa..b70c28422c9 100755 --- a/tests/queries/0_stateless/01401_FORMAT_SETTINGS.sh +++ b/tests/queries/0_stateless/01401_FORMAT_SETTINGS.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh set -o pipefail diff --git a/tests/queries/0_stateless/01406_carriage_return_in_tsv_csv.sh b/tests/queries/0_stateless/01406_carriage_return_in_tsv_csv.sh index 5a1edb42a06..9c3d7726763 100755 --- a/tests/queries/0_stateless/01406_carriage_return_in_tsv_csv.sh +++ b/tests/queries/0_stateless/01406_carriage_return_in_tsv_csv.sh @@ -2,6 +2,7 @@ # shellcheck disable=SC2028 CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo 'CSVWithNames' diff --git a/tests/queries/0_stateless/01412_cache_dictionary_race.sh b/tests/queries/0_stateless/01412_cache_dictionary_race.sh index 68f44e10c50..587cec52932 100755 --- a/tests/queries/0_stateless/01412_cache_dictionary_race.sh +++ b/tests/queries/0_stateless/01412_cache_dictionary_race.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh b/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh index cd59c6e46d7..ceeeed41049 100755 --- a/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh +++ b/tests/queries/0_stateless/01414_mutations_and_errors_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS replicated_mutation_table" diff --git a/tests/queries/0_stateless/01415_sticking_mutations.sh b/tests/queries/0_stateless/01415_sticking_mutations.sh index ce34cd09ca3..9bd0a6eeebf 100755 --- a/tests/queries/0_stateless/01415_sticking_mutations.sh +++ b/tests/queries/0_stateless/01415_sticking_mutations.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS sticking_mutations" diff --git a/tests/queries/0_stateless/01417_freeze_partition_verbose.sh b/tests/queries/0_stateless/01417_freeze_partition_verbose.sh index f15eb26e4b8..5294f4fe8f1 100755 --- a/tests/queries/0_stateless/01417_freeze_partition_verbose.sh +++ b/tests/queries/0_stateless/01417_freeze_partition_verbose.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. $CURDIR/../shell_config.sh +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh ALTER_OUT_STRUCTURE='command_type String, partition_id String, part_name String' ATTACH_OUT_STRUCTURE='old_part_name String' diff --git a/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sh b/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sh index d9011e2acdf..480daeefa46 100755 --- a/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sh +++ b/tests/queries/0_stateless/01417_freeze_partition_verbose_zookeeper.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. $CURDIR/../shell_config.sh +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh ALTER_OUT_STRUCTURE='command_type String, partition_id String, part_name String' ATTACH_OUT_STRUCTURE='old_part_name String' diff --git a/tests/queries/0_stateless/01417_query_time_in_system_events.sh b/tests/queries/0_stateless/01417_query_time_in_system_events.sh index ff6d11befb0..4c0701c958e 100755 --- a/tests/queries/0_stateless/01417_query_time_in_system_events.sh +++ b/tests/queries/0_stateless/01417_query_time_in_system_events.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -. $CURDIR/../shell_config.sh +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh DATA_BEFORE=`${CLICKHOUSE_CLIENT} --query="SELECT event,value FROM system.events WHERE event IN ('QueryTimeMicroseconds','SelectQueryTimeMicroseconds','InsertQueryTimeMicroseconds') FORMAT CSV"` diff --git a/tests/queries/0_stateless/01429_empty_arrow_and_parquet.sh b/tests/queries/0_stateless/01429_empty_arrow_and_parquet.sh index 223a5d246ae..5ee8379d431 100755 --- a/tests/queries/0_stateless/01429_empty_arrow_and_parquet.sh +++ b/tests/queries/0_stateless/01429_empty_arrow_and_parquet.sh @@ -3,6 +3,7 @@ set -e CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01442_merge_detach_attach.sh b/tests/queries/0_stateless/01442_merge_detach_attach.sh index a0ed8e42357..c99069b4aa2 100755 --- a/tests/queries/0_stateless/01442_merge_detach_attach.sh +++ b/tests/queries/0_stateless/01442_merge_detach_attach.sh @@ -3,6 +3,7 @@ set -e CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=none/g') diff --git a/tests/queries/0_stateless/01443_merge_truncate.sh b/tests/queries/0_stateless/01443_merge_truncate.sh index 23c5a8f6c77..ffd5f225ffe 100755 --- a/tests/queries/0_stateless/01443_merge_truncate.sh +++ b/tests/queries/0_stateless/01443_merge_truncate.sh @@ -3,6 +3,7 @@ set -e CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=none/g') diff --git a/tests/queries/0_stateless/01444_create_table_drop_database_race.sh b/tests/queries/0_stateless/01444_create_table_drop_database_race.sh index d54731fd7a3..aba83aac207 100755 --- a/tests/queries/0_stateless/01444_create_table_drop_database_race.sh +++ b/tests/queries/0_stateless/01444_create_table_drop_database_race.sh @@ -3,6 +3,7 @@ set -e CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh # This test reproduces "Directory not empty" error in DROP DATABASE query. diff --git a/tests/queries/0_stateless/01445_create_table_as_table_function.sh b/tests/queries/0_stateless/01445_create_table_as_table_function.sh index 6be015fc8a3..f963c700779 100755 --- a/tests/queries/0_stateless/01445_create_table_as_table_function.sh +++ b/tests/queries/0_stateless/01445_create_table_as_table_function.sh @@ -3,6 +3,7 @@ set -e CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query "CREATE TABLE system.columns AS numbers(10);" 2>&1 | grep -F "Code: 57" > /dev/null && echo 'OK' || echo 'FAIL' diff --git a/tests/queries/0_stateless/01446_json_strings_each_row.sh b/tests/queries/0_stateless/01446_json_strings_each_row.sh index af5bac6d2de..a2d98cd7f90 100755 --- a/tests/queries/0_stateless/01446_json_strings_each_row.sh +++ b/tests/queries/0_stateless/01446_json_strings_each_row.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo "DROP TABLE IF EXISTS test_table;" | ${CLICKHOUSE_CLIENT} diff --git a/tests/queries/0_stateless/01451_dist_logs.sh b/tests/queries/0_stateless/01451_dist_logs.sh index d192eb30251..23dee7a827d 100755 --- a/tests/queries/0_stateless/01451_dist_logs.sh +++ b/tests/queries/0_stateless/01451_dist_logs.sh @@ -4,6 +4,7 @@ CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=trace CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # triggered not for the first query diff --git a/tests/queries/0_stateless/01451_wrong_error_long_query.sh b/tests/queries/0_stateless/01451_wrong_error_long_query.sh index 00bfb285aaf..333dab193cd 100755 --- a/tests/queries/0_stateless/01451_wrong_error_long_query.sh +++ b/tests/queries/0_stateless/01451_wrong_error_long_query.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -printf "select 1 in (1, 1, %1048554s (-1))" | ${CLICKHOUSE_CURL} -ss 'http://localhost:8123/?max_query_size=1048576' --data-binary @- | grep -o "Max query size exceeded" -printf "select 1 in (1, 1, %1048554s (-1))" | ${CLICKHOUSE_CURL} -ss 'http://localhost:8123/?max_query_size=1048580' --data-binary @- +printf "select 1 in (1, 1, %1048554s (-1))" " " | ${CLICKHOUSE_CURL} -ss 'http://localhost:8123/?max_query_size=1048576' --data-binary @- | grep -o "Max query size exceeded" +printf "select 1 in (1, 1, %1048554s (-1))" " " | ${CLICKHOUSE_CURL} -ss 'http://localhost:8123/?max_query_size=1048580' --data-binary @- diff --git a/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh b/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh index 8fce5c9065b..49829b288ae 100755 --- a/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh +++ b/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS mem" diff --git a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh index f81a010cf95..d8a8dde966e 100755 --- a/tests/queries/0_stateless/01455_opentelemetry_distributed.sh +++ b/tests/queries/0_stateless/01455_opentelemetry_distributed.sh @@ -2,6 +2,7 @@ set -ue CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh function check_log diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas.sh index 1cf0ed56bc5..0a437d689fa 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh NUM_REPLICAS=10 diff --git a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh index e033e0d5b72..a333d9be8ae 100755 --- a/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh +++ b/tests/queries/0_stateless/01459_manual_write_to_replicas_quorum.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh NUM_REPLICAS=10 diff --git a/tests/queries/0_stateless/01460_line_as_string_format.sh b/tests/queries/0_stateless/01460_line_as_string_format.sh index 4f94111df08..4ab9cb59858 100755 --- a/tests/queries/0_stateless/01460_line_as_string_format.sh +++ b/tests/queries/0_stateless/01460_line_as_string_format.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS line_as_string1"; @@ -18,14 +19,14 @@ $CLICKHOUSE_CLIENT --query="DROP TABLE line_as_string1" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS line_as_string2"; $CLICKHOUSE_CLIENT --query="create table line_as_string2( - a UInt64 default 42, + a UInt64 default 42, b String materialized toString(a), c String ) engine=MergeTree() order by tuple();"; $CLICKHOUSE_CLIENT --query="INSERT INTO line_as_string2(c) values ('ClickHouse')"; -echo 'ClickHouse is a `fast` #open-source# (OLAP) 'database' "management" :system:' | $CLICKHOUSE_CLIENT --query="INSERT INTO line_as_string2(c) FORMAT LineAsString"; +echo 'ClickHouse is a `fast` #open-source# (OLAP) database "management" :system:' | $CLICKHOUSE_CLIENT --query="INSERT INTO line_as_string2(c) FORMAT LineAsString"; $CLICKHOUSE_CLIENT --query="SELECT * FROM line_as_string2 order by c"; $CLICKHOUSE_CLIENT --query="DROP TABLE line_as_string2" diff --git a/tests/queries/0_stateless/01472_obfuscator_uuid.sh b/tests/queries/0_stateless/01472_obfuscator_uuid.sh index 2efd4986faa..6654dcaad71 100755 --- a/tests/queries/0_stateless/01472_obfuscator_uuid.sh +++ b/tests/queries/0_stateless/01472_obfuscator_uuid.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="CREATE TABLE t_uuid(Id UUID) ENGINE=MergeTree ORDER BY (Id)" diff --git a/tests/queries/0_stateless/01474_custom_null_tsv.sh b/tests/queries/0_stateless/01474_custom_null_tsv.sh index ee9bb7900a0..9dc1c4b7777 100755 --- a/tests/queries/0_stateless/01474_custom_null_tsv.sh +++ b/tests/queries/0_stateless/01474_custom_null_tsv.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS tsv_custom_null"; diff --git a/tests/queries/0_stateless/01500_StorageFile_write_to_fd.sh b/tests/queries/0_stateless/01500_StorageFile_write_to_fd.sh index 589a578eb0b..e6fe5d17cd0 100755 --- a/tests/queries/0_stateless/01500_StorageFile_write_to_fd.sh +++ b/tests/queries/0_stateless/01500_StorageFile_write_to_fd.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # The following command will execute: diff --git a/tests/queries/0_stateless/01501_clickhouse_client_INSERT_exception.sh b/tests/queries/0_stateless/01501_clickhouse_client_INSERT_exception.sh index 4e96f393f30..2abb1818529 100755 --- a/tests/queries/0_stateless/01501_clickhouse_client_INSERT_exception.sh +++ b/tests/queries/0_stateless/01501_clickhouse_client_INSERT_exception.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS data" diff --git a/tests/queries/0_stateless/01502_jemalloc_percpu_arena.sh b/tests/queries/0_stateless/01502_jemalloc_percpu_arena.sh index 06f7d38af94..869e3a1d26d 100755 --- a/tests/queries/0_stateless/01502_jemalloc_percpu_arena.sh +++ b/tests/queries/0_stateless/01502_jemalloc_percpu_arena.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ncpus="$(getconf _NPROCESSORS_ONLN)" diff --git a/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh b/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh index 96d2e32f590..667a612ff23 100755 --- a/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh +++ b/tests/queries/0_stateless/01502_log_tinylog_deadlock_race.sh @@ -5,6 +5,7 @@ set -e CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01505_pipeline_executor_UAF.sh b/tests/queries/0_stateless/01505_pipeline_executor_UAF.sh index f259badea8c..645eaea743c 100755 --- a/tests/queries/0_stateless/01505_pipeline_executor_UAF.sh +++ b/tests/queries/0_stateless/01505_pipeline_executor_UAF.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # Regression for UAF in ThreadPool. diff --git a/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh b/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh index 945be0a1324..5b8f0cba639 100755 --- a/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh +++ b/tests/queries/0_stateless/01507_clickhouse_server_start_with_embedded_config.sh @@ -4,6 +4,7 @@ CLICKHOUSE_PORT_TCP=50111 CLICKHOUSE_DATABASE=default CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo "Starting clickhouse-server" diff --git a/tests/queries/0_stateless/01508_explain_header.reference b/tests/queries/0_stateless/01508_explain_header.reference index 8cab5be1687..5f9e8cfed84 100644 --- a/tests/queries/0_stateless/01508_explain_header.reference +++ b/tests/queries/0_stateless/01508_explain_header.reference @@ -1,4 +1,4 @@ -Expression (Projection + Before ORDER BY and SELECT) +Expression (Projection + Before ORDER BY) Header: x UInt8 SettingQuotaAndLimits (Set limits and quota after reading from storage) Header: dummy UInt8 diff --git a/tests/queries/0_stateless/01508_format_regexp_raw.sh b/tests/queries/0_stateless/01508_format_regexp_raw.sh index 699fca1be61..8cf1bd73566 100755 --- a/tests/queries/0_stateless/01508_format_regexp_raw.sh +++ b/tests/queries/0_stateless/01508_format_regexp_raw.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} -n --query " diff --git a/tests/queries/0_stateless/01508_partition_pruning.sh b/tests/queries/0_stateless/01508_partition_pruning.sh index c886946c7d9..b5ec6388d5c 100755 --- a/tests/queries/0_stateless/01508_partition_pruning.sh +++ b/tests/queries/0_stateless/01508_partition_pruning.sh @@ -10,6 +10,7 @@ #------------------------------------------------------------------------------------------- CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh #export CLICKHOUSE_CLIENT="clickhouse-client --send_logs_level=none" diff --git a/tests/queries/0_stateless/01508_query_obfuscator.sh b/tests/queries/0_stateless/01508_query_obfuscator.sh index d60e42489fa..b354f0953fd 100755 --- a/tests/queries/0_stateless/01508_query_obfuscator.sh +++ b/tests/queries/0_stateless/01508_query_obfuscator.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_FORMAT --seed Hello --obfuscate <<< "SELECT 123, 'Test://2020-01-01hello1234 at 2000-01-01T01:02:03', 12e100, Gibberish_id_testCool, hello(World), avgIf(remote('127.0.0.1'))" diff --git a/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper.sh b/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper.sh index 2af1cb214a4..4cb4734b448 100755 --- a/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper.sh +++ b/tests/queries/0_stateless/01508_race_condition_rename_clear_zookeeper.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS table_for_renames0" diff --git a/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts.sh b/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts.sh index 526859bcb58..c5ffad1c4ca 100755 --- a/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts.sh +++ b/tests/queries/0_stateless/01509_check_many_parallel_quorum_inserts.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh NUM_REPLICAS=10 diff --git a/tests/queries/0_stateless/01509_check_parallel_quorum_inserts.sh b/tests/queries/0_stateless/01509_check_parallel_quorum_inserts.sh index eecb06bda5d..898a68d9c77 100755 --- a/tests/queries/0_stateless/01509_check_parallel_quorum_inserts.sh +++ b/tests/queries/0_stateless/01509_check_parallel_quorum_inserts.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh NUM_REPLICAS=2 diff --git a/tests/queries/0_stateless/01509_format_raw_blob.reference b/tests/queries/0_stateless/01509_format_raw_blob.reference index dfa8f538e67..05014001bd9 100644 --- a/tests/queries/0_stateless/01509_format_raw_blob.reference +++ b/tests/queries/0_stateless/01509_format_raw_blob.reference @@ -1,2 +1,2 @@ -96b229180107fd2d23fd0a2ef9326701 - -96b229180107fd2d23fd0a2ef9326701 - +9fd46251e5574c633cbfbb9293671888 - +9fd46251e5574c633cbfbb9293671888 - diff --git a/tests/queries/0_stateless/01509_format_raw_blob.sh b/tests/queries/0_stateless/01509_format_raw_blob.sh index 68d3844d727..3d1d3fbb17b 100755 --- a/tests/queries/0_stateless/01509_format_raw_blob.sh +++ b/tests/queries/0_stateless/01509_format_raw_blob.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} -n --query " diff --git a/tests/queries/0_stateless/01509_parallel_quorum_and_merge.sh b/tests/queries/0_stateless/01509_parallel_quorum_and_merge.sh index 214c39a21cc..ca5f58512a3 100755 --- a/tests/queries/0_stateless/01509_parallel_quorum_and_merge.sh +++ b/tests/queries/0_stateless/01509_parallel_quorum_and_merge.sh @@ -3,6 +3,7 @@ set -e CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS parallel_q1" diff --git a/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh b/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh index 0f65280e1ce..594caca7d04 100755 --- a/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh +++ b/tests/queries/0_stateless/01510_format_regexp_raw_low_cardinality.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} -n --query " diff --git a/tests/queries/0_stateless/01514_distributed_cancel_query_on_error.sh b/tests/queries/0_stateless/01514_distributed_cancel_query_on_error.sh index b0abd99d38c..726dcc8ee6d 100755 --- a/tests/queries/0_stateless/01514_distributed_cancel_query_on_error.sh +++ b/tests/queries/0_stateless/01514_distributed_cancel_query_on_error.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # _shard_num: @@ -9,10 +10,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # max_block_size to fail faster # max_memory_usage/_shard_num/repeat() will allow failure on the first shard earlier. opts=( - --max_memory_usage=1G - --max_block_size=50 - --max_threads=1 - --max_distributed_connections=2 + "--max_memory_usage=1G" + "--max_block_size=50" + "--max_threads=1" + "--max_distributed_connections=2" ) ${CLICKHOUSE_CLIENT} "${opts[@]}" -q "SELECT groupArray(repeat('a', if(_shard_num == 2, 100000, 1))), number%100000 k from remote('127.{2,3}', system.numbers) GROUP BY k LIMIT 10e6" |& { # the query should fail earlier on 127.3 and 127.2 should not even go to the memory limit exceeded error. diff --git a/tests/queries/0_stateless/01515_logtrace_function.sh b/tests/queries/0_stateless/01515_logtrace_function.sh index c536c90e74b..9953fc2ae2b 100755 --- a/tests/queries/0_stateless/01515_logtrace_function.sh +++ b/tests/queries/0_stateless/01515_logtrace_function.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=debug/g') diff --git a/tests/queries/0_stateless/01516_drop_table_stress.sh b/tests/queries/0_stateless/01516_drop_table_stress.sh index 3e2fd613a36..d72104c8c7f 100755 --- a/tests/queries/0_stateless/01516_drop_table_stress.sh +++ b/tests/queries/0_stateless/01516_drop_table_stress.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh function drop_database() diff --git a/tests/queries/0_stateless/01520_client_print_query_id.sh b/tests/queries/0_stateless/01520_client_print_query_id.sh index 21f60b49924..b32f48239c2 100755 --- a/tests/queries/0_stateless/01520_client_print_query_id.sh +++ b/tests/queries/0_stateless/01520_client_print_query_id.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CURDIR}/01520_client_print_query_id.expect diff --git a/tests/queries/0_stateless/01523_client_local_queries_file_parameter.sh b/tests/queries/0_stateless/01523_client_local_queries_file_parameter.sh index bd8cbc03095..f4681907ced 100755 --- a/tests/queries/0_stateless/01523_client_local_queries_file_parameter.sh +++ b/tests/queries/0_stateless/01523_client_local_queries_file_parameter.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo "SELECT 1;" > 01523_client_local_queries_file_parameter_tmp.sql diff --git a/tests/queries/0_stateless/01526_client_start_and_exit.sh b/tests/queries/0_stateless/01526_client_start_and_exit.sh index d6b7b4f72ed..40927cd7599 100755 --- a/tests/queries/0_stateless/01526_client_start_and_exit.sh +++ b/tests/queries/0_stateless/01526_client_start_and_exit.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # Create a huge amount of tables, so Suggest will take a time to load diff --git a/tests/queries/0_stateless/01526_initial_query_id.sh b/tests/queries/0_stateless/01526_initial_query_id.sh index e28f9ee1e40..0cc09f733d0 100755 --- a/tests/queries/0_stateless/01526_initial_query_id.sh +++ b/tests/queries/0_stateless/01526_initial_query_id.sh @@ -2,6 +2,7 @@ set -ue CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh query_id=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))") diff --git a/tests/queries/0_stateless/01526_max_untracked_memory.sh b/tests/queries/0_stateless/01526_max_untracked_memory.sh index 2623d175d82..b11f40b44f4 100755 --- a/tests/queries/0_stateless/01526_max_untracked_memory.sh +++ b/tests/queries/0_stateless/01526_max_untracked_memory.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh query="select randomPrintableASCII(number) from numbers(1000)" diff --git a/tests/queries/0_stateless/01526_param_uuid.sh b/tests/queries/0_stateless/01526_param_uuid.sh index 4f508ddf863..b0c68894fdb 100755 --- a/tests/queries/0_stateless/01526_param_uuid.sh +++ b/tests/queries/0_stateless/01526_param_uuid.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --param_p1='ffffffff-ffff-ffff-ffff-ffffffffffff' --query "SELECT {p1:UUID}" diff --git a/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh b/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh index bbbdf9c65d6..82453c00ca4 100755 --- a/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh +++ b/tests/queries/0_stateless/01527_clickhouse_local_optimize.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh WORKING_FOLDER_01527="${CLICKHOUSE_TMP}/01527_clickhouse_local_optimize" diff --git a/tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.sh b/tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.sh index 9b09edfe27a..8684582ad45 100755 --- a/tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.sh +++ b/tests/queries/0_stateless/01528_clickhouse_local_prepare_parts.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh WORKING_FOLDER_01528="${CLICKHOUSE_TMP}/01528_clickhouse_local_prepare_parts" diff --git a/tests/queries/0_stateless/01528_play.sh b/tests/queries/0_stateless/01528_play.sh index 7182f4dd6e5..09cf503676d 100755 --- a/tests/queries/0_stateless/01528_play.sh +++ b/tests/queries/0_stateless/01528_play.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_PORT_HTTP_PROTO}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}/play" | grep -o '🌞' diff --git a/tests/queries/0_stateless/01529_bad_memory_tracking.sh b/tests/queries/0_stateless/01529_bad_memory_tracking.sh index f91f6ebaf80..5ad2535074a 100755 --- a/tests/queries/0_stateless/01529_bad_memory_tracking.sh +++ b/tests/queries/0_stateless/01529_bad_memory_tracking.sh @@ -3,6 +3,7 @@ CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=fatal CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh for _ in {1..10}; do diff --git a/tests/queries/0_stateless/01529_union_distinct_and_setting_union_default_mode.sql b/tests/queries/0_stateless/01529_union_distinct_and_setting_union_default_mode.sql index e29e43f64ba..6f2fe847f5d 100644 --- a/tests/queries/0_stateless/01529_union_distinct_and_setting_union_default_mode.sql +++ b/tests/queries/0_stateless/01529_union_distinct_and_setting_union_default_mode.sql @@ -1,5 +1,7 @@ SELECT 1; +SET union_default_mode='DISTINCT'; + (((((((SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1; (((((((SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1; diff --git a/tests/queries/0_stateless/01532_clickhouse_local_tmp_folder.sh b/tests/queries/0_stateless/01532_clickhouse_local_tmp_folder.sh index f341fbcdd9b..09f7e023288 100755 --- a/tests/queries/0_stateless/01532_clickhouse_local_tmp_folder.sh +++ b/tests/queries/0_stateless/01532_clickhouse_local_tmp_folder.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # in case when clickhouse-local can't use temp folder it will try to create diff --git a/tests/queries/0_stateless/01541_max_memory_usage_for_user.sh b/tests/queries/0_stateless/01541_max_memory_usage_for_user.sh index 64a90d871e7..c81bd1a6ce4 100755 --- a/tests/queries/0_stateless/01541_max_memory_usage_for_user.sh +++ b/tests/queries/0_stateless/01541_max_memory_usage_for_user.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # Regression for MemoryTracker drift via HTTP queries. diff --git a/tests/queries/0_stateless/01542_dictionary_load_exception_race.sh b/tests/queries/0_stateless/01542_dictionary_load_exception_race.sh index 582a8eb4d12..334fcc87baf 100755 --- a/tests/queries/0_stateless/01542_dictionary_load_exception_race.sh +++ b/tests/queries/0_stateless/01542_dictionary_load_exception_race.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh b/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh index 0971396ec9c..f3f636dee73 100755 --- a/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh +++ b/tests/queries/0_stateless/01543_avro_deserialization_with_lc.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query "CREATE TABLE IF NOT EXISTS test_01543 (value LowCardinality(String)) ENGINE=Memory()" diff --git a/tests/queries/0_stateless/01544_file_engine_settings.sh b/tests/queries/0_stateless/01544_file_engine_settings.sh index ff6b0d3d373..eb0a8a964d0 100755 --- a/tests/queries/0_stateless/01544_file_engine_settings.sh +++ b/tests/queries/0_stateless/01544_file_engine_settings.sh @@ -2,6 +2,7 @@ set -eu CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh the_file="$CLICKHOUSE_TMP/01544-t.csv" diff --git a/tests/queries/0_stateless/01545_system_errors.sh b/tests/queries/0_stateless/01545_system_errors.sh index 402c4e34116..63af6bb8d43 100755 --- a/tests/queries/0_stateless/01545_system_errors.sh +++ b/tests/queries/0_stateless/01545_system_errors.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh prev="$(${CLICKHOUSE_CLIENT} -q "SELECT value FROM system.errors WHERE name = 'FUNCTION_THROW_IF_VALUE_IS_NON_ZERO'")" diff --git a/tests/queries/0_stateless/01548_create_table_compound_column_format.sh b/tests/queries/0_stateless/01548_create_table_compound_column_format.sh index 6c9384e01c1..99e3aed2825 100755 --- a/tests/queries/0_stateless/01548_create_table_compound_column_format.sh +++ b/tests/queries/0_stateless/01548_create_table_compound_column_format.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo "CREATE TABLE test(a Int64, b NESTED(a Int64)) ENGINE=TinyLog" | $CLICKHOUSE_FORMAT diff --git a/tests/queries/0_stateless/01548_parallel_parsing_max_memory.sh b/tests/queries/0_stateless/01548_parallel_parsing_max_memory.sh index 884d5b6e058..d7ee2840763 100755 --- a/tests/queries/0_stateless/01548_parallel_parsing_max_memory.sh +++ b/tests/queries/0_stateless/01548_parallel_parsing_max_memory.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh yes http://foobarfoobarfoobarfoobarfoobarfoobarfoobar.com | head -c1G > 1g.csv diff --git a/tests/queries/0_stateless/01550_query_identifier_parameters.sh b/tests/queries/0_stateless/01550_query_identifier_parameters.sh index 85ca67e4e3c..ece01d83254 100755 --- a/tests/queries/0_stateless/01550_query_identifier_parameters.sh +++ b/tests/queries/0_stateless/01550_query_identifier_parameters.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --param_tbl 'numbers' --query 'select * from system.{tbl:Identifier} limit 1' diff --git a/tests/queries/0_stateless/01550_type_map_formats_input.sh b/tests/queries/0_stateless/01550_type_map_formats_input.sh index 0e167d3b0e9..75bf03a7437 100755 --- a/tests/queries/0_stateless/01550_type_map_formats_input.sh +++ b/tests/queries/0_stateless/01550_type_map_formats_input.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS map_formats_input" diff --git a/tests/queries/0_stateless/01554_row_number_after_cannot_read_all_data.sh b/tests/queries/0_stateless/01554_row_number_after_cannot_read_all_data.sh index a29b44d2f16..a52c5d811b8 100755 --- a/tests/queries/0_stateless/01554_row_number_after_cannot_read_all_data.sh +++ b/tests/queries/0_stateless/01554_row_number_after_cannot_read_all_data.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo -n -e '\x01\x00\x00\x00\x05Hello\x80' | ${CLICKHOUSE_LOCAL} --structure 'x UInt32, s String' --query "SELECT * FROM table" --input-format RowBinary 2>&1 | grep -oF '(at row 2)' diff --git a/tests/queries/0_stateless/01556_explain_select_with_union_query.reference b/tests/queries/0_stateless/01556_explain_select_with_union_query.reference index 442754ec344..e4aac5bda16 100644 --- a/tests/queries/0_stateless/01556_explain_select_with_union_query.reference +++ b/tests/queries/0_stateless/01556_explain_select_with_union_query.reference @@ -1,252 +1,252 @@ Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Distinct Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Distinct Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Distinct Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Distinct Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Distinct Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Distinct Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Distinct Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Distinct Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Distinct Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Distinct Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Distinct Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Distinct Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Distinct Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) Union - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) - Expression (Projection + Before ORDER BY and SELECT) + Expression (Projection + Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (SystemOne) diff --git a/tests/queries/0_stateless/01556_explain_select_with_union_query.sql b/tests/queries/0_stateless/01556_explain_select_with_union_query.sql index 16271113b5f..7abc8a36027 100644 --- a/tests/queries/0_stateless/01556_explain_select_with_union_query.sql +++ b/tests/queries/0_stateless/01556_explain_select_with_union_query.sql @@ -1,3 +1,5 @@ +SET union_default_mode = 'DISTINCT'; + EXPLAIN SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1; EXPLAIN (SELECT 1 UNION ALL SELECT 1) UNION ALL SELECT 1; EXPLAIN SELECT 1 UNION (SELECT 1 UNION ALL SELECT 1); diff --git a/tests/queries/0_stateless/01558_ttest_scipy.sh b/tests/queries/0_stateless/01558_ttest_scipy.sh index bd203478586..fea368f8181 100755 --- a/tests/queries/0_stateless/01558_ttest_scipy.sh +++ b/tests/queries/0_stateless/01558_ttest_scipy.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # We should have correct env vars from shell_config.sh to run this test diff --git a/tests/queries/0_stateless/01559_misplaced_codec_diagnostics.sh b/tests/queries/0_stateless/01559_misplaced_codec_diagnostics.sh index 9904b6388d6..8a3242c7036 100755 --- a/tests/queries/0_stateless/01559_misplaced_codec_diagnostics.sh +++ b/tests/queries/0_stateless/01559_misplaced_codec_diagnostics.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --query "CREATE TABLE t (c CODEC(NONE)) ENGINE = Memory" 2>&1 | grep -oF 'Unknown data type family: CODEC' | uniq diff --git a/tests/queries/0_stateless/01561_clickhouse_client_stage.sh b/tests/queries/0_stateless/01561_clickhouse_client_stage.sh index afe3703f4f3..a01bc7f5065 100755 --- a/tests/queries/0_stateless/01561_clickhouse_client_stage.sh +++ b/tests/queries/0_stateless/01561_clickhouse_client_stage.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh function execute_query() diff --git a/tests/queries/0_stateless/01561_mann_whitney_scipy.sh b/tests/queries/0_stateless/01561_mann_whitney_scipy.sh index e4e9152a97d..a04b630e2f3 100755 --- a/tests/queries/0_stateless/01561_mann_whitney_scipy.sh +++ b/tests/queries/0_stateless/01561_mann_whitney_scipy.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # We should have correct env vars from shell_config.sh to run this test diff --git a/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference b/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference index 203efdab299..feca2cae5ea 100644 --- a/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference +++ b/tests/queries/0_stateless/01562_optimize_monotonous_functions_in_order_by.reference @@ -9,7 +9,7 @@ Expression (Projection) MergingSorted (Merge sorted streams for ORDER BY) MergeSorting (Merge sorted blocks for ORDER BY) PartialSorting (Sort each block for ORDER BY) - Expression (Before ORDER BY and SELECT) + Expression (Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (MergeTree) SELECT @@ -21,7 +21,7 @@ LIMIT 10 Expression (Projection) Limit (preliminary LIMIT) FinishSorting - Expression (Before ORDER BY and SELECT) + Expression (Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (MergeTree with order) SELECT @@ -35,7 +35,7 @@ LIMIT 10 Expression (Projection) Limit (preliminary LIMIT) FinishSorting - Expression (Before ORDER BY and SELECT) + Expression (Before ORDER BY) SettingQuotaAndLimits (Set limits and quota after reading from storage) ReadFromStorage (MergeTree with order) SELECT diff --git a/tests/queries/0_stateless/01563_distributed_query_finish.sh b/tests/queries/0_stateless/01563_distributed_query_finish.sh index 16e4ed8ebd1..a9e6a5b1fce 100755 --- a/tests/queries/0_stateless/01563_distributed_query_finish.sh +++ b/tests/queries/0_stateless/01563_distributed_query_finish.sh @@ -4,6 +4,7 @@ # (NETWORK_ERROR will be in case of connection reset) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -nm < 0; + +-- should work in ORDER BY + +1 +2 +3 select number, max(number) over (partition by intDiv(number, 3) order by number desc) m from numbers(10) order by m desc, number; --- this one doesn't work yet -- looks like the column names clash, and the --- window count() is overwritten with aggregate count() --- select number, count(), count() over (partition by intDiv(number, 3)) from numbers(10) group by number order by count() desc; - --- different windows --- an explain test would also be helpful, but it's too immature now and I don't --- want to change reference all the time +-- also works in ORDER BY if you wrap it in a subquery 9 9 6 8 @@ -101,6 +102,37 @@ select number, max(number) over (partition by intDiv(number, 3) order by number 0 2 1 2 2 2 +select * from (select count(*) over () c from numbers(3)) order by c; + +-- Example with window function only in ORDER BY. Here we make a rank of all +-- numbers sorted descending, and then sort by this rank descending, and must get +-- the ascending order. + +1 +2 +3 +select * from (select * from numbers(5) order by rand()) order by count() over (order by number desc) desc; + +-- Aggregate functions as window function arguments. This query is semantically +-- the same as the above one, only we replace `number` with +-- `any(number) group by number` and so on. + +0 +1 +2 +3 +4 +select * from (select * from numbers(5) order by rand()) group by number order by sum(any(number + 1)) over (order by min(number) desc) desc; + +-- different windows +-- an explain test would also be helpful, but it's too immature now and I don't +-- want to change reference all the time + +0 +1 +2 +3 +4 select number, max(number) over (partition by intDiv(number, 3) order by number desc), count(number) over (partition by intDiv(number, 5) order by number) as m from numbers(31) order by number settings max_block_size = 2; -- two functions over the same window @@ -140,6 +172,8 @@ select number, max(number) over (partition by intDiv(number, 3) order by number 30 30 1 select number, max(number) over (partition by intDiv(number, 3) order by number desc), count(number) over (partition by intDiv(number, 3) order by number desc) as m from numbers(7) order by number settings max_block_size = 2; +-- check that we can work with constant columns + 0 2 3 1 2 2 2 2 1 @@ -147,3 +181,39 @@ select number, max(number) over (partition by intDiv(number, 3) order by number 4 5 2 5 5 1 6 6 1 +select median(x) over (partition by x) from (select 1 x); + +-- an empty window definition is valid as well + +1 +select groupArray(number) over () from numbers(3); + +-- This one tests we properly process the window function arguments. +-- Seen errors like 'column `1` not found' from count(1). + +[0] +[0,1] +[0,1,2] +select count(1) over (), max(number + 1) over () from numbers(3); + +-- Should work in DISTINCT + +1 3 +select distinct sum(0) over () from numbers(2); + +0 +select distinct any(number) over () from numbers(2); + +-- Various kinds of aliases are properly substituted into various parts of window +-- function definition. + +0 +with number + 1 as x select intDiv(number, 3) as y, sum(x + y) over (partition by y order by x) from numbers(7); + +0 1 +0 3 +0 6 +1 5 +1 11 +1 18 +2 9 diff --git a/tests/queries/0_stateless/01591_window_functions.sql b/tests/queries/0_stateless/01591_window_functions.sql index a28d435d3f8..082a6652a65 100644 --- a/tests/queries/0_stateless/01591_window_functions.sql +++ b/tests/queries/0_stateless/01591_window_functions.sql @@ -24,12 +24,24 @@ select number, quantileExact(number) over (partition by intDiv(number, 3)) q fro -- last stage of select, after all other functions. select q * 10, quantileExact(number) over (partition by intDiv(number, 3)) q from numbers(10); -- { serverError 47 } --- should work in ORDER BY though +-- must work in WHERE if you wrap it in a subquery +select * from (select count(*) over () c from numbers(3)) where c > 0; + +-- should work in ORDER BY select number, max(number) over (partition by intDiv(number, 3) order by number desc) m from numbers(10) order by m desc, number; --- this one doesn't work yet -- looks like the column names clash, and the --- window count() is overwritten with aggregate count() --- select number, count(), count() over (partition by intDiv(number, 3)) from numbers(10) group by number order by count() desc; +-- also works in ORDER BY if you wrap it in a subquery +select * from (select count(*) over () c from numbers(3)) order by c; + +-- Example with window function only in ORDER BY. Here we make a rank of all +-- numbers sorted descending, and then sort by this rank descending, and must get +-- the ascending order. +select * from (select * from numbers(5) order by rand()) order by count() over (order by number desc) desc; + +-- Aggregate functions as window function arguments. This query is semantically +-- the same as the above one, only we replace `number` with +-- `any(number) group by number` and so on. +select * from (select * from numbers(5) order by rand()) group by number order by sum(any(number + 1)) over (order by min(number) desc) desc; -- different windows -- an explain test would also be helpful, but it's too immature now and I don't @@ -40,3 +52,21 @@ select number, max(number) over (partition by intDiv(number, 3) order by number -- an explain test would also be helpful, but it's too immature now and I don't -- want to change reference all the time select number, max(number) over (partition by intDiv(number, 3) order by number desc), count(number) over (partition by intDiv(number, 3) order by number desc) as m from numbers(7) order by number settings max_block_size = 2; + +-- check that we can work with constant columns +select median(x) over (partition by x) from (select 1 x); + +-- an empty window definition is valid as well +select groupArray(number) over () from numbers(3); + +-- This one tests we properly process the window function arguments. +-- Seen errors like 'column `1` not found' from count(1). +select count(1) over (), max(number + 1) over () from numbers(3); + +-- Should work in DISTINCT +select distinct sum(0) over () from numbers(2); +select distinct any(number) over () from numbers(2); + +-- Various kinds of aliases are properly substituted into various parts of window +-- function definition. +with number + 1 as x select intDiv(number, 3) as y, sum(x + y) over (partition by y order by x) from numbers(7); diff --git a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill.sh b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill.sh index a521d90b768..7f111538a06 100755 --- a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill.sh +++ b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS concurrent_mutate_kill" diff --git a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas.sh b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas.sh index f5f751d6408..60e2adb4204 100755 --- a/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas.sh +++ b/tests/queries/0_stateless/01593_concurrent_alter_mutations_kill_many_replicas.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh REPLICAS=5 diff --git a/tests/queries/0_stateless/01594_too_low_memory_limits.sh b/tests/queries/0_stateless/01594_too_low_memory_limits.sh index d0890f716cd..0d4dbbcabe1 100755 --- a/tests/queries/0_stateless/01594_too_low_memory_limits.sh +++ b/tests/queries/0_stateless/01594_too_low_memory_limits.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # it is not mandatory to use existing table since it fails earlier, hence just a placeholder. diff --git a/tests/queries/0_stateless/01597_columns_list_ignored.sh b/tests/queries/0_stateless/01597_columns_list_ignored.sh index 3098eb5161a..9ab0f1303c3 100755 --- a/tests/queries/0_stateless/01597_columns_list_ignored.sh +++ b/tests/queries/0_stateless/01597_columns_list_ignored.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_LOCAL} --query "describe table file('', TSV, 'a int, b.c int')" 2>&1 | grep -F -c 'Syntax error' diff --git a/tests/queries/0_stateless/01599_mutation_query_params.sh b/tests/queries/0_stateless/01599_mutation_query_params.sh index 6bc12b5409e..52b0131a9c2 100755 --- a/tests/queries/0_stateless/01599_mutation_query_params.sh +++ b/tests/queries/0_stateless/01599_mutation_query_params.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01600_benchmark_query.sh b/tests/queries/0_stateless/01600_benchmark_query.sh index 6b40b4464c7..a563c87a10f 100755 --- a/tests/queries/0_stateless/01600_benchmark_query.sh +++ b/tests/queries/0_stateless/01600_benchmark_query.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_BENCHMARK --iterations 10 --query "SELECT 1" 1>/dev/null 2>"$CLICKHOUSE_TMP"/err diff --git a/tests/queries/0_stateless/01600_detach_permanently.sh b/tests/queries/0_stateless/01600_detach_permanently.sh index e897f80a33f..087545ec378 100755 --- a/tests/queries/0_stateless/01600_detach_permanently.sh +++ b/tests/queries/0_stateless/01600_detach_permanently.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ## tests with real clickhouse restart would be a bit to heavy, @@ -13,7 +14,7 @@ mkdir -p "${WORKING_FOLDER_01600}" clickhouse_local() { local query="$1" shift - ${CLICKHOUSE_LOCAL} --query "$query" $@ -- --path="${WORKING_FOLDER_01600}" + ${CLICKHOUSE_LOCAL} --query "$query" "$@" -- --path="${WORKING_FOLDER_01600}" } test_detach_attach_sequence() { diff --git a/tests/queries/0_stateless/01600_log_queries_with_extensive_info.sh b/tests/queries/0_stateless/01600_log_queries_with_extensive_info.sh index 2bc0a662cd1..039a68a76f3 100755 --- a/tests/queries/0_stateless/01600_log_queries_with_extensive_info.sh +++ b/tests/queries/0_stateless/01600_log_queries_with_extensive_info.sh @@ -2,6 +2,7 @@ set -ue CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} -q "drop database if exists test_log_queries" "--query_id=01600_log_queries_with_extensive_info_000" diff --git a/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh b/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh index 33f5434dc6c..323dd88efab 100755 --- a/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh +++ b/tests/queries/0_stateless/01600_quota_by_forwarded_ip.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh diff --git a/tests/queries/0_stateless/01601_proxy_protocol.sh b/tests/queries/0_stateless/01601_proxy_protocol.sh index 8a431ba6dae..e8d1a7c45b2 100755 --- a/tests/queries/0_stateless/01601_proxy_protocol.sh +++ b/tests/queries/0_stateless/01601_proxy_protocol.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh printf "PROXY TCP4 255.255.255.255 255.255.255.255 65535 65535\r\n\0\21ClickHouse client\24\r\253\251\3\0\7default\0\4\1\0\1\0\0\t0.0.0.0:0\1\tmilovidov\21milovidov-desktop\vClickHouse \24\r\253\251\3\0\1\0\0\0\2\1\25SELECT 'Hello, world'\2\0\247\203\254l\325\\z|\265\254F\275\333\206\342\24\202\24\0\0\0\n\0\0\0\240\1\0\2\377\377\377\377\0\0\0" | nc "${CLICKHOUSE_HOST}" "${CLICKHOUSE_PORT_TCP_WITH_PROXY}" | head -c150 | grep --text -o -F 'Hello, world' diff --git a/tests/queries/0_stateless/01601_temporary_table_session_scope.sh b/tests/queries/0_stateless/01601_temporary_table_session_scope.sh index fbc45aec216..f6eeb50941f 100755 --- a/tests/queries/0_stateless/01601_temporary_table_session_scope.sh +++ b/tests/queries/0_stateless/01601_temporary_table_session_scope.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&query=DROP+TEMPORARY+TABLE+IF+EXISTS+tmptable&session_id=session_1601a" diff --git a/tests/queries/0_stateless/01602_max_distributed_connections.sh b/tests/queries/0_stateless/01602_max_distributed_connections.sh index 8c19b6f5bb7..93c6071c091 100755 --- a/tests/queries/0_stateless/01602_max_distributed_connections.sh +++ b/tests/queries/0_stateless/01602_max_distributed_connections.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh common_opts=( diff --git a/tests/queries/0_stateless/01606_git_import.sh b/tests/queries/0_stateless/01606_git_import.sh index 66b87d86653..16a0b92abe7 100755 --- a/tests/queries/0_stateless/01606_git_import.sh +++ b/tests/queries/0_stateless/01606_git_import.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh # Clone some not too large repository and create a database from it. diff --git a/tests/queries/0_stateless/01607_arrays_as_nested_csv.sh b/tests/queries/0_stateless/01607_arrays_as_nested_csv.sh index 2f150e934d0..946be7fb4af 100755 --- a/tests/queries/0_stateless/01607_arrays_as_nested_csv.sh +++ b/tests/queries/0_stateless/01607_arrays_as_nested_csv.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh ${CLICKHOUSE_CLIENT} --multiquery --query " diff --git a/tests/queries/0_stateless/01614_with_fill_with_limit.reference b/tests/queries/0_stateless/01614_with_fill_with_limit.reference new file mode 100644 index 00000000000..451e076552e --- /dev/null +++ b/tests/queries/0_stateless/01614_with_fill_with_limit.reference @@ -0,0 +1,4 @@ +1 original +2 +1 original +2 diff --git a/tests/queries/0_stateless/01614_with_fill_with_limit.sql b/tests/queries/0_stateless/01614_with_fill_with_limit.sql new file mode 100644 index 00000000000..119117af287 --- /dev/null +++ b/tests/queries/0_stateless/01614_with_fill_with_limit.sql @@ -0,0 +1,15 @@ +SELECT + toFloat32(number % 10) AS n, + 'original' AS source +FROM numbers(10) +WHERE (number % 3) = 1 +ORDER BY n ASC WITH FILL STEP 1 +LIMIT 2; + +SELECT + toFloat32(number % 10) AS n, + 'original' AS source +FROM numbers(10) +WHERE (number % 3) = 1 +ORDER BY n ASC WITH FILL STEP 1 +LIMIT 2 WITH TIES; diff --git a/tests/queries/0_stateless/01621_clickhouse_compressor.reference b/tests/queries/0_stateless/01621_clickhouse_compressor.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/01621_clickhouse_compressor.sh b/tests/queries/0_stateless/01621_clickhouse_compressor.sh new file mode 100755 index 00000000000..e00270e5db9 --- /dev/null +++ b/tests/queries/0_stateless/01621_clickhouse_compressor.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. "$CURDIR"/../shell_config.sh + +set -e + +TEMP_DIR="$(mktemp -d /tmp/clickhouse.test..XXXXXX)" +cd "${TEMP_DIR:?}" + +function cleanup() +{ + rm -fr "${TEMP_DIR:?}" +} +trap cleanup EXIT + +# This is random garbage, so compression ratio will be very low. +tr -cd 'a-z0-9' < /dev/urandom | head -c1M > input + +# stdin/stdout streams +$CLICKHOUSE_COMPRESSOR < input > output +diff -q <($CLICKHOUSE_COMPRESSOR --decompress < output) input + +# positional arguments, and that fact that input/output will be overwritten +$CLICKHOUSE_COMPRESSOR input output +diff -q <($CLICKHOUSE_COMPRESSOR --decompress output) input + +# --offset-in-decompressed-block +diff -q <($CLICKHOUSE_COMPRESSOR --decompress --offset-in-decompressed-block 10 output) <(tail -c+$((10+1)) input) + +# TODO: --offset-in-compressed-file using some .bin file (via clickhouse-local + check-marks) diff --git a/tests/queries/1_stateful/00090_thread_pool_deadlock.sh b/tests/queries/1_stateful/00090_thread_pool_deadlock.sh index 63ebcd39a99..e94243bfea2 100755 --- a/tests/queries/1_stateful/00090_thread_pool_deadlock.sh +++ b/tests/queries/1_stateful/00090_thread_pool_deadlock.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh echo '1'; diff --git a/tests/queries/1_stateful/00092_obfuscator.sh b/tests/queries/1_stateful/00092_obfuscator.sh index f13fcc341be..85f476c6ae5 100755 --- a/tests/queries/1_stateful/00092_obfuscator.sh +++ b/tests/queries/1_stateful/00092_obfuscator.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT --max_threads 1 --query="SELECT URL, Title, SearchPhrase FROM test.hits LIMIT 1000" > "${CLICKHOUSE_TMP}"/data.tsv diff --git a/tests/queries/shell_config.sh b/tests/queries/shell_config.sh index 5e9c78d26d9..0ca2cee3c77 100644 --- a/tests/queries/shell_config.sh +++ b/tests/queries/shell_config.sh @@ -22,6 +22,7 @@ export CLICKHOUSE_CLIENT=${CLICKHOUSE_CLIENT:="$CLICKHOUSE_CLIENT_BINARY ${CLICK [ -x "${CLICKHOUSE_BINARY}" ] && CLICKHOUSE_LOCAL=${CLICKHOUSE_LOCAL:="${CLICKHOUSE_BINARY} local"} export CLICKHOUSE_LOCAL=${CLICKHOUSE_LOCAL:="${CLICKHOUSE_BINARY}-local"} export CLICKHOUSE_OBFUSCATOR=${CLICKHOUSE_OBFUSCATOR:="${CLICKHOUSE_BINARY}-obfuscator"} +export CLICKHOUSE_COMPRESSOR=${CLICKHOUSE_COMPRESSOR:="${CLICKHOUSE_BINARY}-compressor"} export CLICKHOUSE_BENCHMARK=${CLICKHOUSE_BENCHMARK:="${CLICKHOUSE_BINARY}-benchmark ${CLICKHOUSE_BENCHMARK_OPT0:-}"} export CLICKHOUSE_GIT_IMPORT=${CLICKHOUSE_GIT_IMPORT="${CLICKHOUSE_BINARY}-git-import"} diff --git a/tests/testflows/helpers/cluster.py b/tests/testflows/helpers/cluster.py index d173547a916..3be79132ec3 100755 --- a/tests/testflows/helpers/cluster.py +++ b/tests/testflows/helpers/cluster.py @@ -37,6 +37,23 @@ class Node(object): self.cluster.command(None, f'{self.cluster.docker_compose} restart {self.name}', timeout=timeout) + def start(self, timeout=300, safe=True): + """Start node. + """ + self.cluster.command(None, f'{self.cluster.docker_compose} start {self.name}', timeout=timeout) + + + def stop(self, timeout=300, safe=True): + """Stop node. + """ + with self.cluster.lock: + for key in list(self.cluster._bash.keys()): + if key.endswith(f"-{self.name}"): + shell = self.cluster._bash.pop(key) + shell.__exit__(None, None, None) + + self.cluster.command(None, f'{self.cluster.docker_compose} stop {self.name}', timeout=timeout) + def command(self, *args, **kwargs): return self.cluster.command(self.name, *args, **kwargs) diff --git a/tests/testflows/ldap/authentication/requirements/requirements.md b/tests/testflows/ldap/authentication/requirements/requirements.md index 1c65a29fef4..27ce8c921a0 100644 --- a/tests/testflows/ldap/authentication/requirements/requirements.md +++ b/tests/testflows/ldap/authentication/requirements/requirements.md @@ -1,4 +1,5 @@ # SRS-007 ClickHouse Authentication of Users via LDAP +# Software Requirements Specification ## Table of Contents @@ -57,22 +58,28 @@ * 4.2.25 [RQ.SRS-007.LDAP.Configuration.Server.TLSCACertDir](#rqsrs-007ldapconfigurationservertlscacertdir) * 4.2.26 [RQ.SRS-007.LDAP.Configuration.Server.TLSCACertFile](#rqsrs-007ldapconfigurationservertlscacertfile) * 4.2.27 [RQ.SRS-007.LDAP.Configuration.Server.TLSCipherSuite](#rqsrs-007ldapconfigurationservertlsciphersuite) - * 4.2.28 [RQ.SRS-007.LDAP.Configuration.Server.Syntax](#rqsrs-007ldapconfigurationserversyntax) - * 4.2.29 [RQ.SRS-007.LDAP.Configuration.User.RBAC](#rqsrs-007ldapconfigurationuserrbac) - * 4.2.30 [RQ.SRS-007.LDAP.Configuration.User.Syntax](#rqsrs-007ldapconfigurationusersyntax) - * 4.2.31 [RQ.SRS-007.LDAP.Configuration.User.Name.Empty](#rqsrs-007ldapconfigurationusernameempty) - * 4.2.32 [RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP](#rqsrs-007ldapconfigurationuserbothpasswordandldap) - * 4.2.33 [RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined](#rqsrs-007ldapconfigurationuserldapinvalidservernamenotdefined) - * 4.2.34 [RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty](#rqsrs-007ldapconfigurationuserldapinvalidservernameempty) - * 4.2.35 [RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer](#rqsrs-007ldapconfigurationuseronlyoneserver) - * 4.2.36 [RQ.SRS-007.LDAP.Configuration.User.Name.Long](#rqsrs-007ldapconfigurationusernamelong) - * 4.2.37 [RQ.SRS-007.LDAP.Configuration.User.Name.UTF8](#rqsrs-007ldapconfigurationusernameutf8) - * 4.2.38 [RQ.SRS-007.LDAP.Authentication.Username.Empty](#rqsrs-007ldapauthenticationusernameempty) - * 4.2.39 [RQ.SRS-007.LDAP.Authentication.Username.Long](#rqsrs-007ldapauthenticationusernamelong) - * 4.2.40 [RQ.SRS-007.LDAP.Authentication.Username.UTF8](#rqsrs-007ldapauthenticationusernameutf8) - * 4.2.41 [RQ.SRS-007.LDAP.Authentication.Password.Empty](#rqsrs-007ldapauthenticationpasswordempty) - * 4.2.42 [RQ.SRS-007.LDAP.Authentication.Password.Long](#rqsrs-007ldapauthenticationpasswordlong) - * 4.2.43 [RQ.SRS-007.LDAP.Authentication.Password.UTF8](#rqsrs-007ldapauthenticationpasswordutf8) + * 4.2.28 [RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown](#rqsrs-007ldapconfigurationserververificationcooldown) + * 4.2.29 [RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Default](#rqsrs-007ldapconfigurationserververificationcooldowndefault) + * 4.2.30 [RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Invalid](#rqsrs-007ldapconfigurationserververificationcooldowninvalid) + * 4.2.31 [RQ.SRS-007.LDAP.Configuration.Server.Syntax](#rqsrs-007ldapconfigurationserversyntax) + * 4.2.32 [RQ.SRS-007.LDAP.Configuration.User.RBAC](#rqsrs-007ldapconfigurationuserrbac) + * 4.2.33 [RQ.SRS-007.LDAP.Configuration.User.Syntax](#rqsrs-007ldapconfigurationusersyntax) + * 4.2.34 [RQ.SRS-007.LDAP.Configuration.User.Name.Empty](#rqsrs-007ldapconfigurationusernameempty) + * 4.2.35 [RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP](#rqsrs-007ldapconfigurationuserbothpasswordandldap) + * 4.2.36 [RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined](#rqsrs-007ldapconfigurationuserldapinvalidservernamenotdefined) + * 4.2.37 [RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty](#rqsrs-007ldapconfigurationuserldapinvalidservernameempty) + * 4.2.38 [RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer](#rqsrs-007ldapconfigurationuseronlyoneserver) + * 4.2.39 [RQ.SRS-007.LDAP.Configuration.User.Name.Long](#rqsrs-007ldapconfigurationusernamelong) + * 4.2.40 [RQ.SRS-007.LDAP.Configuration.User.Name.UTF8](#rqsrs-007ldapconfigurationusernameutf8) + * 4.2.41 [RQ.SRS-007.LDAP.Authentication.Username.Empty](#rqsrs-007ldapauthenticationusernameempty) + * 4.2.42 [RQ.SRS-007.LDAP.Authentication.Username.Long](#rqsrs-007ldapauthenticationusernamelong) + * 4.2.43 [RQ.SRS-007.LDAP.Authentication.Username.UTF8](#rqsrs-007ldapauthenticationusernameutf8) + * 4.2.44 [RQ.SRS-007.LDAP.Authentication.Password.Empty](#rqsrs-007ldapauthenticationpasswordempty) + * 4.2.45 [RQ.SRS-007.LDAP.Authentication.Password.Long](#rqsrs-007ldapauthenticationpasswordlong) + * 4.2.46 [RQ.SRS-007.LDAP.Authentication.Password.UTF8](#rqsrs-007ldapauthenticationpasswordutf8) + * 4.2.47 [RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Performance](#rqsrs-007ldapauthenticationverificationcooldownperformance) + * 4.2.48 [RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters](#rqsrs-007ldapauthenticationverificationcooldownresetchangeincoreserverparameters) + * 4.2.49 [RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.InvalidPassword](#rqsrs-007ldapauthenticationverificationcooldownresetinvalidpassword) * 5 [References](#references) ## Revision History @@ -393,9 +400,44 @@ For example, The available suites SHALL depend on the [OpenSSL] library version and variant used to build [ClickHouse] and therefore might change. -#### RQ.SRS-007.LDAP.Configuration.Server.Syntax +#### RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown version: 1.0 +[ClickHouse] SHALL support `verification_cooldown` parameter in the [LDAP] server configuration section +that SHALL define a period of time, in seconds, after a successful bind attempt, during which a user SHALL be assumed +to be successfully authenticated for all consecutive requests without contacting the [LDAP] server. +After period of time since the last successful attempt expires then on the authentication attempt +SHALL result in contacting the [LDAP] server to verify the username and password. + +#### RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Default +version: 1.0 + +[ClickHouse] `verification_cooldown` parameter in the [LDAP] server configuration section +SHALL have a default value of `0` that disables caching and forces contacting +the [LDAP] server for each authentication request. + +#### RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Invalid +version: 1.0 + +[Clickhouse] SHALL return an error if the value provided for the `verification_cooldown` parameter is not a valid positive integer. + +For example: + +* negative integer +* string +* empty value +* extremely large positive value (overflow) +* extremely large negative value (overflow) + +The error SHALL appear in the log and SHALL be similar to the following: + +```bash + Access(user directories): Could not parse LDAP server `openldap1`: Poco::Exception. Code: 1000, e.code() = 0, e.displayText() = Syntax error: Not a valid unsigned integer: *input value* +``` + +#### RQ.SRS-007.LDAP.Configuration.Server.Syntax +version: 2.0 + [ClickHouse] SHALL support the following example syntax to create an entry for an [LDAP] server inside the `config.xml` configuration file or of any configuration file inside the `config.d` directory. @@ -406,6 +448,7 @@ configuration file or of any configuration file inside the `config.d` directory. 636 cn= , ou=users, dc=example, dc=com + 0 yes tls1.2 demand @@ -521,6 +564,33 @@ version: 1.0 [ClickHouse] SHALL support [UTF-8] characters in passwords used to authenticate users using an [LDAP] server. +#### RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Performance +version: 1.0 + +[ClickHouse] SHALL provide better login performance of [LDAP] authenticated users +when `verification_cooldown` parameter is set to a positive value when comparing +to the the case when `verification_cooldown` is turned off either for a single user or multiple users +making a large number of repeated requests. + +#### RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters +version: 1.0 + +[ClickHouse] SHALL reset any currently cached [LDAP] authentication bind requests enabled by the +`verification_cooldown` parameter in the [LDAP] server configuration section +if either `host`, `port`, `auth_dn_prefix`, or `auth_dn_suffix` parameter values +change in the configuration file. The reset SHALL cause any subsequent authentication attempts for any user +to result in contacting the [LDAP] server to verify user's username and password. + +#### RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.InvalidPassword +version: 1.0 + +[ClickHouse] SHALL reset current cached [LDAP] authentication bind request enabled by the +`verification_cooldown` parameter in the [LDAP] server configuration section +for the user if the password provided in the current authentication attempt does not match +the valid password provided during the first successful authentication request that was cached +for this exact user. The reset SHALL cause the next authentication attempt for this user +to result in contacting the [LDAP] server to verify user's username and password. + ## References * **ClickHouse:** https://clickhouse.tech diff --git a/tests/testflows/ldap/authentication/requirements/requirements.py b/tests/testflows/ldap/authentication/requirements/requirements.py index 4e955bf801b..25b943d18c2 100644 --- a/tests/testflows/ldap/authentication/requirements/requirements.py +++ b/tests/testflows/ldap/authentication/requirements/requirements.py @@ -790,9 +790,74 @@ RQ_SRS_007_LDAP_Configuration_Server_TLSCipherSuite = Requirement( level=3, num='4.2.27') +RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `verification_cooldown` parameter in the [LDAP] server configuration section\n' + 'that SHALL define a period of time, in seconds, after a successful bind attempt, during which a user SHALL be assumed\n' + 'to be successfully authenticated for all consecutive requests without contacting the [LDAP] server.\n' + 'After period of time since the last successful attempt expires then on the authentication attempt\n' + 'SHALL result in contacting the [LDAP] server to verify the username and password. \n' + '\n' + ), + link=None, + level=3, + num='4.2.28') + +RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown_Default = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Default', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] `verification_cooldown` parameter in the [LDAP] server configuration section\n' + 'SHALL have a default value of `0` that disables caching and forces contacting\n' + 'the [LDAP] server for each authentication request.\n' + '\n' + ), + link=None, + level=3, + num='4.2.29') + +RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown_Invalid = Requirement( + name='RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Invalid', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[Clickhouse] SHALL return an error if the value provided for the `verification_cooldown` parameter is not a valid positive integer.\n' + '\n' + 'For example:\n' + '\n' + '* negative integer\n' + '* string\n' + '* empty value\n' + '* extremely large positive value (overflow)\n' + '* extremely large negative value (overflow)\n' + '\n' + 'The error SHALL appear in the log and SHALL be similar to the following:\n' + '\n' + '```bash\n' + ' Access(user directories): Could not parse LDAP server `openldap1`: Poco::Exception. Code: 1000, e.code() = 0, e.displayText() = Syntax error: Not a valid unsigned integer: *input value*\n' + '```\n' + '\n' + ), + link=None, + level=3, + num='4.2.30') + RQ_SRS_007_LDAP_Configuration_Server_Syntax = Requirement( name='RQ.SRS-007.LDAP.Configuration.Server.Syntax', - version='1.0', + version='2.0', priority=None, group=None, type=None, @@ -808,6 +873,7 @@ RQ_SRS_007_LDAP_Configuration_Server_Syntax = Requirement( ' 636\n' ' cn=\n' ' , ou=users, dc=example, dc=com\n' + ' 0\n' ' yes\n' ' tls1.2\n' ' demand\n' @@ -823,7 +889,7 @@ RQ_SRS_007_LDAP_Configuration_Server_Syntax = Requirement( ), link=None, level=3, - num='4.2.28') + num='4.2.31') RQ_SRS_007_LDAP_Configuration_User_RBAC = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.RBAC', @@ -843,7 +909,7 @@ RQ_SRS_007_LDAP_Configuration_User_RBAC = Requirement( ), link=None, level=3, - num='4.2.29') + num='4.2.32') RQ_SRS_007_LDAP_Configuration_User_Syntax = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.Syntax', @@ -871,7 +937,7 @@ RQ_SRS_007_LDAP_Configuration_User_Syntax = Requirement( ), link=None, level=3, - num='4.2.30') + num='4.2.33') RQ_SRS_007_LDAP_Configuration_User_Name_Empty = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.Name.Empty', @@ -886,7 +952,7 @@ RQ_SRS_007_LDAP_Configuration_User_Name_Empty = Requirement( ), link=None, level=3, - num='4.2.31') + num='4.2.34') RQ_SRS_007_LDAP_Configuration_User_BothPasswordAndLDAP = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP', @@ -902,7 +968,7 @@ RQ_SRS_007_LDAP_Configuration_User_BothPasswordAndLDAP = Requirement( ), link=None, level=3, - num='4.2.32') + num='4.2.35') RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_NotDefined = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined', @@ -919,7 +985,7 @@ RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_NotDefined = Requireme ), link=None, level=3, - num='4.2.33') + num='4.2.36') RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_Empty = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty', @@ -936,7 +1002,7 @@ RQ_SRS_007_LDAP_Configuration_User_LDAP_InvalidServerName_Empty = Requirement( ), link=None, level=3, - num='4.2.34') + num='4.2.37') RQ_SRS_007_LDAP_Configuration_User_OnlyOneServer = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer', @@ -951,7 +1017,7 @@ RQ_SRS_007_LDAP_Configuration_User_OnlyOneServer = Requirement( ), link=None, level=3, - num='4.2.35') + num='4.2.38') RQ_SRS_007_LDAP_Configuration_User_Name_Long = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.Name.Long', @@ -967,7 +1033,7 @@ RQ_SRS_007_LDAP_Configuration_User_Name_Long = Requirement( ), link=None, level=3, - num='4.2.36') + num='4.2.39') RQ_SRS_007_LDAP_Configuration_User_Name_UTF8 = Requirement( name='RQ.SRS-007.LDAP.Configuration.User.Name.UTF8', @@ -982,7 +1048,7 @@ RQ_SRS_007_LDAP_Configuration_User_Name_UTF8 = Requirement( ), link=None, level=3, - num='4.2.37') + num='4.2.40') RQ_SRS_007_LDAP_Authentication_Username_Empty = Requirement( name='RQ.SRS-007.LDAP.Authentication.Username.Empty', @@ -997,7 +1063,7 @@ RQ_SRS_007_LDAP_Authentication_Username_Empty = Requirement( ), link=None, level=3, - num='4.2.38') + num='4.2.41') RQ_SRS_007_LDAP_Authentication_Username_Long = Requirement( name='RQ.SRS-007.LDAP.Authentication.Username.Long', @@ -1012,7 +1078,7 @@ RQ_SRS_007_LDAP_Authentication_Username_Long = Requirement( ), link=None, level=3, - num='4.2.39') + num='4.2.42') RQ_SRS_007_LDAP_Authentication_Username_UTF8 = Requirement( name='RQ.SRS-007.LDAP.Authentication.Username.UTF8', @@ -1027,7 +1093,7 @@ RQ_SRS_007_LDAP_Authentication_Username_UTF8 = Requirement( ), link=None, level=3, - num='4.2.40') + num='4.2.43') RQ_SRS_007_LDAP_Authentication_Password_Empty = Requirement( name='RQ.SRS-007.LDAP.Authentication.Password.Empty', @@ -1044,7 +1110,7 @@ RQ_SRS_007_LDAP_Authentication_Password_Empty = Requirement( ), link=None, level=3, - num='4.2.41') + num='4.2.44') RQ_SRS_007_LDAP_Authentication_Password_Long = Requirement( name='RQ.SRS-007.LDAP.Authentication.Password.Long', @@ -1060,7 +1126,7 @@ RQ_SRS_007_LDAP_Authentication_Password_Long = Requirement( ), link=None, level=3, - num='4.2.42') + num='4.2.45') RQ_SRS_007_LDAP_Authentication_Password_UTF8 = Requirement( name='RQ.SRS-007.LDAP.Authentication.Password.UTF8', @@ -1076,7 +1142,64 @@ RQ_SRS_007_LDAP_Authentication_Password_UTF8 = Requirement( ), link=None, level=3, - num='4.2.43') + num='4.2.46') + +RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Performance = Requirement( + name='RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Performance', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL provide better login performance of [LDAP] authenticated users\n' + 'when `verification_cooldown` parameter is set to a positive value when comparing\n' + 'to the the case when `verification_cooldown` is turned off either for a single user or multiple users\n' + 'making a large number of repeated requests.\n' + '\n' + ), + link=None, + level=3, + num='4.2.47') + +RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters = Requirement( + name='RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL reset any currently cached [LDAP] authentication bind requests enabled by the\n' + '`verification_cooldown` parameter in the [LDAP] server configuration section\n' + 'if either `host`, `port`, `auth_dn_prefix`, or `auth_dn_suffix` parameter values\n' + 'change in the configuration file. The reset SHALL cause any subsequent authentication attempts for any user\n' + "to result in contacting the [LDAP] server to verify user's username and password.\n" + '\n' + ), + link=None, + level=3, + num='4.2.48') + +RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_InvalidPassword = Requirement( + name='RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.InvalidPassword', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL reset current cached [LDAP] authentication bind request enabled by the\n' + '`verification_cooldown` parameter in the [LDAP] server configuration section\n' + 'for the user if the password provided in the current authentication attempt does not match\n' + 'the valid password provided during the first successful authentication request that was cached\n' + 'for this exact user. The reset SHALL cause the next authentication attempt for this user\n' + "to result in contacting the [LDAP] server to verify user's username and password.\n" + '\n' + ), + link=None, + level=3, + num='4.2.49') SRS_007_ClickHouse_Authentication_of_Users_via_LDAP = Specification( name='SRS-007 ClickHouse Authentication of Users via LDAP', @@ -1150,22 +1273,28 @@ SRS_007_ClickHouse_Authentication_of_Users_via_LDAP = Specification( Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSCACertDir', level=3, num='4.2.25'), Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSCACertFile', level=3, num='4.2.26'), Heading(name='RQ.SRS-007.LDAP.Configuration.Server.TLSCipherSuite', level=3, num='4.2.27'), - Heading(name='RQ.SRS-007.LDAP.Configuration.Server.Syntax', level=3, num='4.2.28'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.RBAC', level=3, num='4.2.29'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.Syntax', level=3, num='4.2.30'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.Name.Empty', level=3, num='4.2.31'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP', level=3, num='4.2.32'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined', level=3, num='4.2.33'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty', level=3, num='4.2.34'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer', level=3, num='4.2.35'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.Name.Long', level=3, num='4.2.36'), - Heading(name='RQ.SRS-007.LDAP.Configuration.User.Name.UTF8', level=3, num='4.2.37'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Username.Empty', level=3, num='4.2.38'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Username.Long', level=3, num='4.2.39'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Username.UTF8', level=3, num='4.2.40'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Password.Empty', level=3, num='4.2.41'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Password.Long', level=3, num='4.2.42'), - Heading(name='RQ.SRS-007.LDAP.Authentication.Password.UTF8', level=3, num='4.2.43'), + Heading(name='RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown', level=3, num='4.2.28'), + Heading(name='RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Default', level=3, num='4.2.29'), + Heading(name='RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Invalid', level=3, num='4.2.30'), + Heading(name='RQ.SRS-007.LDAP.Configuration.Server.Syntax', level=3, num='4.2.31'), + Heading(name='RQ.SRS-007.LDAP.Configuration.User.RBAC', level=3, num='4.2.32'), + Heading(name='RQ.SRS-007.LDAP.Configuration.User.Syntax', level=3, num='4.2.33'), + Heading(name='RQ.SRS-007.LDAP.Configuration.User.Name.Empty', level=3, num='4.2.34'), + Heading(name='RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP', level=3, num='4.2.35'), + Heading(name='RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined', level=3, num='4.2.36'), + Heading(name='RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty', level=3, num='4.2.37'), + Heading(name='RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer', level=3, num='4.2.38'), + Heading(name='RQ.SRS-007.LDAP.Configuration.User.Name.Long', level=3, num='4.2.39'), + Heading(name='RQ.SRS-007.LDAP.Configuration.User.Name.UTF8', level=3, num='4.2.40'), + Heading(name='RQ.SRS-007.LDAP.Authentication.Username.Empty', level=3, num='4.2.41'), + Heading(name='RQ.SRS-007.LDAP.Authentication.Username.Long', level=3, num='4.2.42'), + Heading(name='RQ.SRS-007.LDAP.Authentication.Username.UTF8', level=3, num='4.2.43'), + Heading(name='RQ.SRS-007.LDAP.Authentication.Password.Empty', level=3, num='4.2.44'), + Heading(name='RQ.SRS-007.LDAP.Authentication.Password.Long', level=3, num='4.2.45'), + Heading(name='RQ.SRS-007.LDAP.Authentication.Password.UTF8', level=3, num='4.2.46'), + Heading(name='RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Performance', level=3, num='4.2.47'), + Heading(name='RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters', level=3, num='4.2.48'), + Heading(name='RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.InvalidPassword', level=3, num='4.2.49'), Heading(name='References', level=1, num='5'), ), requirements=( @@ -1218,6 +1347,9 @@ SRS_007_ClickHouse_Authentication_of_Users_via_LDAP = Specification( RQ_SRS_007_LDAP_Configuration_Server_TLSCACertDir, RQ_SRS_007_LDAP_Configuration_Server_TLSCACertFile, RQ_SRS_007_LDAP_Configuration_Server_TLSCipherSuite, + RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown, + RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown_Default, + RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown_Invalid, RQ_SRS_007_LDAP_Configuration_Server_Syntax, RQ_SRS_007_LDAP_Configuration_User_RBAC, RQ_SRS_007_LDAP_Configuration_User_Syntax, @@ -1234,9 +1366,13 @@ SRS_007_ClickHouse_Authentication_of_Users_via_LDAP = Specification( RQ_SRS_007_LDAP_Authentication_Password_Empty, RQ_SRS_007_LDAP_Authentication_Password_Long, RQ_SRS_007_LDAP_Authentication_Password_UTF8, + RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Performance, + RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters, + RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_InvalidPassword, ), content=''' # SRS-007 ClickHouse Authentication of Users via LDAP +# Software Requirements Specification ## Table of Contents @@ -1295,22 +1431,28 @@ SRS_007_ClickHouse_Authentication_of_Users_via_LDAP = Specification( * 4.2.25 [RQ.SRS-007.LDAP.Configuration.Server.TLSCACertDir](#rqsrs-007ldapconfigurationservertlscacertdir) * 4.2.26 [RQ.SRS-007.LDAP.Configuration.Server.TLSCACertFile](#rqsrs-007ldapconfigurationservertlscacertfile) * 4.2.27 [RQ.SRS-007.LDAP.Configuration.Server.TLSCipherSuite](#rqsrs-007ldapconfigurationservertlsciphersuite) - * 4.2.28 [RQ.SRS-007.LDAP.Configuration.Server.Syntax](#rqsrs-007ldapconfigurationserversyntax) - * 4.2.29 [RQ.SRS-007.LDAP.Configuration.User.RBAC](#rqsrs-007ldapconfigurationuserrbac) - * 4.2.30 [RQ.SRS-007.LDAP.Configuration.User.Syntax](#rqsrs-007ldapconfigurationusersyntax) - * 4.2.31 [RQ.SRS-007.LDAP.Configuration.User.Name.Empty](#rqsrs-007ldapconfigurationusernameempty) - * 4.2.32 [RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP](#rqsrs-007ldapconfigurationuserbothpasswordandldap) - * 4.2.33 [RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined](#rqsrs-007ldapconfigurationuserldapinvalidservernamenotdefined) - * 4.2.34 [RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty](#rqsrs-007ldapconfigurationuserldapinvalidservernameempty) - * 4.2.35 [RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer](#rqsrs-007ldapconfigurationuseronlyoneserver) - * 4.2.36 [RQ.SRS-007.LDAP.Configuration.User.Name.Long](#rqsrs-007ldapconfigurationusernamelong) - * 4.2.37 [RQ.SRS-007.LDAP.Configuration.User.Name.UTF8](#rqsrs-007ldapconfigurationusernameutf8) - * 4.2.38 [RQ.SRS-007.LDAP.Authentication.Username.Empty](#rqsrs-007ldapauthenticationusernameempty) - * 4.2.39 [RQ.SRS-007.LDAP.Authentication.Username.Long](#rqsrs-007ldapauthenticationusernamelong) - * 4.2.40 [RQ.SRS-007.LDAP.Authentication.Username.UTF8](#rqsrs-007ldapauthenticationusernameutf8) - * 4.2.41 [RQ.SRS-007.LDAP.Authentication.Password.Empty](#rqsrs-007ldapauthenticationpasswordempty) - * 4.2.42 [RQ.SRS-007.LDAP.Authentication.Password.Long](#rqsrs-007ldapauthenticationpasswordlong) - * 4.2.43 [RQ.SRS-007.LDAP.Authentication.Password.UTF8](#rqsrs-007ldapauthenticationpasswordutf8) + * 4.2.28 [RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown](#rqsrs-007ldapconfigurationserververificationcooldown) + * 4.2.29 [RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Default](#rqsrs-007ldapconfigurationserververificationcooldowndefault) + * 4.2.30 [RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Invalid](#rqsrs-007ldapconfigurationserververificationcooldowninvalid) + * 4.2.31 [RQ.SRS-007.LDAP.Configuration.Server.Syntax](#rqsrs-007ldapconfigurationserversyntax) + * 4.2.32 [RQ.SRS-007.LDAP.Configuration.User.RBAC](#rqsrs-007ldapconfigurationuserrbac) + * 4.2.33 [RQ.SRS-007.LDAP.Configuration.User.Syntax](#rqsrs-007ldapconfigurationusersyntax) + * 4.2.34 [RQ.SRS-007.LDAP.Configuration.User.Name.Empty](#rqsrs-007ldapconfigurationusernameempty) + * 4.2.35 [RQ.SRS-007.LDAP.Configuration.User.BothPasswordAndLDAP](#rqsrs-007ldapconfigurationuserbothpasswordandldap) + * 4.2.36 [RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.NotDefined](#rqsrs-007ldapconfigurationuserldapinvalidservernamenotdefined) + * 4.2.37 [RQ.SRS-007.LDAP.Configuration.User.LDAP.InvalidServerName.Empty](#rqsrs-007ldapconfigurationuserldapinvalidservernameempty) + * 4.2.38 [RQ.SRS-007.LDAP.Configuration.User.OnlyOneServer](#rqsrs-007ldapconfigurationuseronlyoneserver) + * 4.2.39 [RQ.SRS-007.LDAP.Configuration.User.Name.Long](#rqsrs-007ldapconfigurationusernamelong) + * 4.2.40 [RQ.SRS-007.LDAP.Configuration.User.Name.UTF8](#rqsrs-007ldapconfigurationusernameutf8) + * 4.2.41 [RQ.SRS-007.LDAP.Authentication.Username.Empty](#rqsrs-007ldapauthenticationusernameempty) + * 4.2.42 [RQ.SRS-007.LDAP.Authentication.Username.Long](#rqsrs-007ldapauthenticationusernamelong) + * 4.2.43 [RQ.SRS-007.LDAP.Authentication.Username.UTF8](#rqsrs-007ldapauthenticationusernameutf8) + * 4.2.44 [RQ.SRS-007.LDAP.Authentication.Password.Empty](#rqsrs-007ldapauthenticationpasswordempty) + * 4.2.45 [RQ.SRS-007.LDAP.Authentication.Password.Long](#rqsrs-007ldapauthenticationpasswordlong) + * 4.2.46 [RQ.SRS-007.LDAP.Authentication.Password.UTF8](#rqsrs-007ldapauthenticationpasswordutf8) + * 4.2.47 [RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Performance](#rqsrs-007ldapauthenticationverificationcooldownperformance) + * 4.2.48 [RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters](#rqsrs-007ldapauthenticationverificationcooldownresetchangeincoreserverparameters) + * 4.2.49 [RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.InvalidPassword](#rqsrs-007ldapauthenticationverificationcooldownresetinvalidpassword) * 5 [References](#references) ## Revision History @@ -1631,9 +1773,44 @@ For example, The available suites SHALL depend on the [OpenSSL] library version and variant used to build [ClickHouse] and therefore might change. -#### RQ.SRS-007.LDAP.Configuration.Server.Syntax +#### RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown version: 1.0 +[ClickHouse] SHALL support `verification_cooldown` parameter in the [LDAP] server configuration section +that SHALL define a period of time, in seconds, after a successful bind attempt, during which a user SHALL be assumed +to be successfully authenticated for all consecutive requests without contacting the [LDAP] server. +After period of time since the last successful attempt expires then on the authentication attempt +SHALL result in contacting the [LDAP] server to verify the username and password. + +#### RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Default +version: 1.0 + +[ClickHouse] `verification_cooldown` parameter in the [LDAP] server configuration section +SHALL have a default value of `0` that disables caching and forces contacting +the [LDAP] server for each authentication request. + +#### RQ.SRS-007.LDAP.Configuration.Server.VerificationCooldown.Invalid +version: 1.0 + +[Clickhouse] SHALL return an error if the value provided for the `verification_cooldown` parameter is not a valid positive integer. + +For example: + +* negative integer +* string +* empty value +* extremely large positive value (overflow) +* extremely large negative value (overflow) + +The error SHALL appear in the log and SHALL be similar to the following: + +```bash + Access(user directories): Could not parse LDAP server `openldap1`: Poco::Exception. Code: 1000, e.code() = 0, e.displayText() = Syntax error: Not a valid unsigned integer: *input value* +``` + +#### RQ.SRS-007.LDAP.Configuration.Server.Syntax +version: 2.0 + [ClickHouse] SHALL support the following example syntax to create an entry for an [LDAP] server inside the `config.xml` configuration file or of any configuration file inside the `config.d` directory. @@ -1644,6 +1821,7 @@ configuration file or of any configuration file inside the `config.d` directory. 636 cn= , ou=users, dc=example, dc=com + 0 yes tls1.2 demand @@ -1759,6 +1937,33 @@ version: 1.0 [ClickHouse] SHALL support [UTF-8] characters in passwords used to authenticate users using an [LDAP] server. +#### RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Performance +version: 1.0 + +[ClickHouse] SHALL provide better login performance of [LDAP] authenticated users +when `verification_cooldown` parameter is set to a positive value when comparing +to the the case when `verification_cooldown` is turned off either for a single user or multiple users +making a large number of repeated requests. + +#### RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters +version: 1.0 + +[ClickHouse] SHALL reset any currently cached [LDAP] authentication bind requests enabled by the +`verification_cooldown` parameter in the [LDAP] server configuration section +if either `host`, `port`, `auth_dn_prefix`, or `auth_dn_suffix` parameter values +change in the configuration file. The reset SHALL cause any subsequent authentication attempts for any user +to result in contacting the [LDAP] server to verify user's username and password. + +#### RQ.SRS-007.LDAP.Authentication.VerificationCooldown.Reset.InvalidPassword +version: 1.0 + +[ClickHouse] SHALL reset current cached [LDAP] authentication bind request enabled by the +`verification_cooldown` parameter in the [LDAP] server configuration section +for the user if the password provided in the current authentication attempt does not match +the valid password provided during the first successful authentication request that was cached +for this exact user. The reset SHALL cause the next authentication attempt for this user +to result in contacting the [LDAP] server to verify user's username and password. + ## References * **ClickHouse:** https://clickhouse.tech diff --git a/tests/testflows/ldap/authentication/tests/authentications.py b/tests/testflows/ldap/authentication/tests/authentications.py index a64a37ed686..46bcae000b8 100644 --- a/tests/testflows/ldap/authentication/tests/authentications.py +++ b/tests/testflows/ldap/authentication/tests/authentications.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- import random +import time from multiprocessing.dummy import Pool from testflows.core import * @@ -27,6 +28,7 @@ servers = { @TestStep(When) @Name("I login as {username} and execute query") +@Args(format_name=True) def login_and_execute_query(self, username, password, exitcode=None, message=None, steps=True): """Execute query as some user. """ @@ -129,7 +131,7 @@ def login_after_user_is_deleted_from_ldap(self, server, rbac=False): user = add_user_to_ldap(**user) with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml", - restart=True, rbac=rbac): + restart=True, rbac=rbac): login_and_execute_query(username=user["cn"], password=user["userpassword"]) with When("I delete this user from LDAP"): @@ -200,7 +202,7 @@ def login_after_user_cn_changed_in_ldap(self, server, rbac=False): user = add_user_to_ldap(**user) with ldap_authenticated_users({"username": user["cn"], "server": server}, - config_file=f"ldap_users_{getuid()}.xml", restart=True, rbac=rbac): + config_file=f"ldap_users_{getuid()}.xml", restart=True, rbac=rbac): login_and_execute_query(username=user["cn"], password=user["userpassword"]) with When("I change user password in LDAP"): @@ -474,6 +476,470 @@ def empty_username_and_empty_password(self, server=None, rbac=False): """ login_and_execute_query(username="", password="") +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown_Default("1.0") +) +def default_verification_cooldown_value(self, server, rbac=False, timeout=20): + """Check that the default value (0) for the verification cooldown parameter + disables caching and forces contacting the LDAP server for each + authentication request. + """ + + error_message = "DB::Exception: testVCD: Authentication failed: password is incorrect or there is no user with such name" + error_exitcode = 4 + user = None + + with Given("I have an LDAP configuration that uses the default verification_cooldown value (0)"): + servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }} + + self.context.ldap_node = self.context.cluster.node(server) + + try: + with Given("I add user to LDAP"): + user = {"cn": "testVCD", "userpassword": "testVCD"} + user = add_user_to_ldap(**user) + + with ldap_servers(servers): + with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml"): + with When("I login and execute a query"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("I change user password in LDAP"): + change_user_password_in_ldap(user, "newpassword") + + with Then("when I try to login immediately with the old user password it should fail"): + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=error_exitcode, message=error_message) + + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown("1.0") +) +def valid_verification_cooldown_value_cn_change(self, server, rbac=False, timeout=20): + """Check that we can perform requests without contacting the LDAP server + after successful authentication when the verification_cooldown parameter + is set and the user cn is changed. + """ + + error_message = "DB::Exception: testVCD: Authentication failed: password is incorrect or there is no user with such name" + error_exitcode = 4 + user = None + new_user = None + + with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"): + servers = { "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "2" + }} + + self.context.ldap_node = self.context.cluster.node(server) + + try: + with Given("I add user to LDAP"): + user = {"cn": "testVCD", "userpassword": "testVCD"} + user = add_user_to_ldap(**user) + + with ldap_servers(servers): + with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml"): + with When("I login and execute a query"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("I change user cn in LDAP"): + new_user = change_user_cn_in_ldap(user, "testVCD2") + + with Then("when I try to login again with the old user cn it should work"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("when I sleep for 2 seconds and try to log in, it should fail"): + time.sleep(2) + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=error_exitcode, message=error_message) + + finally: + with Finally("I make sure LDAP user is deleted"): + if new_user is not None: + delete_user_from_ldap(new_user, exitcode=None) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown("1.0") +) +def valid_verification_cooldown_value_password_change(self, server, rbac=False, timeout=20): + """Check that we can perform requests without contacting the LDAP server + after successful authentication when the verification_cooldown parameter + is set and the user password is changed. + """ + + error_message = "DB::Exception: testVCD: Authentication failed: password is incorrect or there is no user with such name" + error_exitcode = 4 + user = None + + with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"): + servers = { "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "2" + }} + + self.context.ldap_node = self.context.cluster.node(server) + + try: + with Given("I add user to LDAP"): + user = {"cn": "testVCD", "userpassword": "testVCD"} + user = add_user_to_ldap(**user) + + with ldap_servers(servers): + with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml"): + with When("I login and execute a query"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("I change user password in LDAP"): + change_user_password_in_ldap(user, "newpassword") + + with Then("when I try to login again with the old password it should work"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("when I sleep for 2 seconds and try to log in, it should fail"): + time.sleep(2) + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=error_exitcode, message=error_message) + + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown("1.0") +) +def valid_verification_cooldown_value_ldap_unavailable(self, server, rbac=False, timeout=20): + """Check that we can perform requests without contacting the LDAP server + after successful authentication when the verification_cooldown parameter + is set, even when the LDAP server is offline. + """ + + error_message = "DB::Exception: testVCD: Authentication failed: password is incorrect or there is no user with such name" + error_exitcode = 4 + user = None + + with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"): + servers = { "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "2" + }} + + self.context.ldap_node = self.context.cluster.node(server) + + try: + with Given("I add a new user to LDAP"): + user = {"cn": "testVCD", "userpassword": "testVCD"} + user = add_user_to_ldap(**user) + + with ldap_servers(servers): + with ldap_authenticated_users({"username": user["cn"], "server": server}, + config_file=f"ldap_users_{getuid()}.xml"): + + with When("I login and execute a query"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + try: + with And("then I stop the ldap server"): + self.context.ldap_node.stop() + + with Then("when I try to login again with the server offline it should work"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("when I sleep for 2 seconds and try to log in, it should fail"): + time.sleep(2) + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=error_exitcode, message=error_message) + + finally: + with Finally("I start the ldap server back up"): + self.context.ldap_node.start() + + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + +@TestOutline +def repeat_requests(self, server, iterations, vcd_value, rbac=False): + """Run repeated requests from some user to the LDAP server. + """ + + user = None + + with Given(f"I have an LDAP configuration that sets verification_cooldown parameter to {vcd_value} sec"): + servers = { "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": vcd_value + }} + + self.context.ldap_node = self.context.cluster.node(server) + + try: + with And("I add a new user to LDAP"): + user = {"cn": "testVCD", "userpassword": "testVCD"} + user = add_user_to_ldap(**user) + + with ldap_servers(servers): + with ldap_authenticated_users({"username": user["cn"], "server": server}, config_file=f"ldap_users_{getuid()}.xml"): + with When(f"I login and execute some query {iterations} times"): + start_time = time.time() + r = self.context.node.command(f"time for i in {{1..{iterations}}}; do clickhouse client -q \"SELECT 1\" --user {user['cn']} --password {user['userpassword']} > /dev/null; done") + end_time = time.time() + + return end_time - start_time + + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Performance("1.0") +) +def verification_cooldown_performance(self, server, rbac=False, iterations=5000): + """Check that login performance is better when the verification cooldown + parameter is set to a positive value when comparing to the case when + the verification cooldown parameter is turned off. + """ + + vcd_time = 0 + no_vcd_time = 0 + + with Example(f"Repeated requests with verification cooldown parameter set to 600 seconds, {iterations} iterations"): + vcd_time = repeat_requests(server=server, iterations=iterations, vcd_value="600", rbac=rbac) + metric("login_with_vcd_value_600", units="seconds", value=vcd_time) + + with Example(f"Repeated requests with verification cooldown parameter set to 0 seconds, {iterations} iterations"): + no_vcd_time = repeat_requests(server=server, iterations=iterations, vcd_value="0", rbac=rbac) + metric("login_with_vcd_value_0", units="seconds", value=no_vcd_time) + + with Then("The performance with verification cooldown parameter set is better than the performance with no verification cooldown parameter."): + assert no_vcd_time > vcd_time, error() + + with And("Log the performance improvement as a percentage."): + metric("percentage_improvement", units="%", value=100*(no_vcd_time - vcd_time)/vcd_time) + +@TestOutline +def check_verification_cooldown_reset_on_core_server_parameter_change(self, server, + parameter_name, parameter_value, rbac=False): + """Check that the LDAP login cache is reset for all the LDAP authentication users + when verification_cooldown parameter is set after one of the core server + parameters is changed in the LDAP server configuration. + """ + + config_d_dir="/etc/clickhouse-server/config.d" + config_file="ldap_servers.xml" + error_message = "DB::Exception: {user}: Authentication failed: password is incorrect or there is no user with such name" + error_exitcode = 4 + user = None + config=None + updated_config=None + + with Given("I have an LDAP configuration that sets verification_cooldown parameter to 600 sec"): + servers = { "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "600" + }} + + self.context.ldap_node = self.context.cluster.node(server) + + with And("LDAP authenticated user"): + users = [ + {"cn": f"testVCD_0", "userpassword": "testVCD_0"}, + {"cn": f"testVCD_1", "userpassword": "testVCD_1"} + ] + + with And("I create LDAP servers configuration file"): + config = create_ldap_servers_config_content(servers, config_d_dir, config_file) + + with ldap_users(*users) as users: + with ldap_servers(servers, restart=True): + with ldap_authenticated_users(*[{"username": user["cn"], "server": server} for user in users]): + with When("I login and execute a query"): + for user in users: + with By(f"as user {user['cn']}"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("I change user password in LDAP"): + for user in users: + with By(f"for user {user['cn']}"): + change_user_password_in_ldap(user, "newpassword") + + with And(f"I change the server {parameter_name} core parameter", description=f"{parameter_value}"): + servers["openldap1"][parameter_name] = parameter_value + + with And("I create an updated the config file that has a different server host name"): + updated_config = create_ldap_servers_config_content(servers, config_d_dir, config_file) + + with modify_config(updated_config, restart=False): + with Then("when I try to log in it should fail as cache should have been reset"): + for user in users: + with By(f"as user {user['cn']}"): + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=error_exitcode, message=error_message.format(user=user["cn"])) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") +) +def verification_cooldown_reset_on_server_host_parameter_change(self, server, rbac=False): + """Check that the LDAP login cache is reset for all the LDAP authentication users + when verification_cooldown parameter is set after server host name + is changed in the LDAP server configuration. + """ + + check_verification_cooldown_reset_on_core_server_parameter_change(server=server, + parameter_name="host", parameter_value="openldap2", rbac=rbac) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") +) +def verification_cooldown_reset_on_server_port_parameter_change(self, server, rbac=False): + """Check that the LDAP login cache is reset for all the LDAP authentication users + when verification_cooldown parameter is set after server port is changed in the + LDAP server configuration. + """ + + check_verification_cooldown_reset_on_core_server_parameter_change(server=server, + parameter_name="port", parameter_value="9006", rbac=rbac) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") +) +def verification_cooldown_reset_on_server_auth_dn_prefix_parameter_change(self, server, rbac=False): + """Check that the LDAP login cache is reset for all the LDAP authentication users + when verification_cooldown parameter is set after server auth_dn_prefix + is changed in the LDAP server configuration. + """ + + check_verification_cooldown_reset_on_core_server_parameter_change(server=server, + parameter_name="auth_dn_prefix", parameter_value="cxx=", rbac=rbac) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") +) +def verification_cooldown_reset_on_server_auth_dn_suffix_parameter_change(self, server, rbac=False): + """Check that the LDAP login cache is reset for all the LDAP authentication users + when verification_cooldown parameter is set after server auth_dn_suffix + is changed in the LDAP server configuration. + """ + + check_verification_cooldown_reset_on_core_server_parameter_change(server=server, + parameter_name="auth_dn_suffix", + parameter_value=",ou=company,dc=users,dc=com", rbac=rbac) + + +@TestScenario +@Name("verification cooldown reset when invalid password is provided") +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_007_LDAP_Authentication_VerificationCooldown_Reset_InvalidPassword("1.0") +) +def scenario(self, server, rbac=False): + """Check that cached bind requests for the user are discarded when + the user provides invalid login credentials. + """ + + user = None + error_exitcode = 4 + error_message = "DB::Exception: testVCD: Authentication failed: password is incorrect or there is no user with such name" + + with Given("I have an LDAP configuration that sets verification_cooldown parameter to 600 sec"): + servers = { "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "600" + }} + + self.context.ldap_node = self.context.cluster.node(server) + + try: + with Given("I add a new user to LDAP"): + user = {"cn": "testVCD", "userpassword": "testVCD"} + user = add_user_to_ldap(**user) + + with ldap_servers(servers): + with ldap_authenticated_users({"username": user["cn"], "server": server}, + config_file=f"ldap_users_{getuid()}.xml"): + + with When("I login and execute a query"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("I change user password in LDAP"): + change_user_password_in_ldap(user, "newpassword") + + with Then("When I try to log in with the cached password it should work"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("When I try to log in with an incorrect password it should fail"): + login_and_execute_query(username=user["cn"], password="incorrect", exitcode=error_exitcode, + message=error_message) + + with And("When I try to log in with the cached password it should fail"): + login_and_execute_query(username=user["cn"], password="incorrect", exitcode=error_exitcode, + message=error_message) + + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + +@TestFeature +def verification_cooldown(self, rbac, servers=None, node="clickhouse1"): + """Check verification cooldown parameter functionality. + """ + for scenario in loads(current_module(), Scenario, filter=has.tag("verification_cooldown")): + scenario(server="openldap1", rbac=rbac) + + @TestOutline(Feature) @Name("user authentications") @Requirements( @@ -493,5 +959,11 @@ def feature(self, rbac, servers=None, node="clickhouse1"): servers = globals()["servers"] with ldap_servers(servers): - for scenario in loads(current_module(), Scenario): + for scenario in loads(current_module(), Scenario, filter=~has.tag("verification_cooldown")): scenario(server="openldap1", rbac=rbac) + + Feature(test=verification_cooldown)(rbac=rbac, servers=servers, node=node) + + + + diff --git a/tests/testflows/ldap/authentication/tests/common.py b/tests/testflows/ldap/authentication/tests/common.py index ed8d46df92b..8efb389a23f 100644 --- a/tests/testflows/ldap/authentication/tests/common.py +++ b/tests/testflows/ldap/authentication/tests/common.py @@ -78,7 +78,7 @@ def restart(node=None, safe=False, timeout=60): f"ConfigReloader: Loaded config '/etc/clickhouse-server/config.xml', performed update on configuration", timeout=timeout) -def add_config(config, timeout=60, restart=False): +def add_config(config, timeout=60, restart=False, modify=False): """Add dynamic configuration file to ClickHouse. :param node: node @@ -165,19 +165,20 @@ def add_config(config, timeout=60, restart=False): wait_for_config_to_be_loaded() yield finally: - with Finally(f"I remove {config.name}"): - with node.cluster.shell(node.name) as bash: - bash.expect(bash.prompt) - bash.send("tail -n 0 -f /var/log/clickhouse-server/clickhouse-server.log") + if not modify: + with Finally(f"I remove {config.name}"): + with node.cluster.shell(node.name) as bash: + bash.expect(bash.prompt) + bash.send("tail -n 0 -f /var/log/clickhouse-server/clickhouse-server.log") - with By("removing the config file", description=config.path): - node.command(f"rm -rf {config.path}", exitcode=0) + with By("removing the config file", description=config.path): + node.command(f"rm -rf {config.path}", exitcode=0) - with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"): - check_preprocessed_config_is_updated(after_removal=True) + with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"): + check_preprocessed_config_is_updated(after_removal=True) - with And("I wait for config to be reloaded"): - wait_for_config_to_be_loaded() + with And("I wait for config to be reloaded"): + wait_for_config_to_be_loaded() def create_ldap_servers_config_content(servers, config_d_dir="/etc/clickhouse-server/config.d", config_file="ldap_servers.xml"): """Create LDAP servers configuration content. @@ -201,12 +202,19 @@ def create_ldap_servers_config_content(servers, config_d_dir="/etc/clickhouse-se return Config(content, path, name, uid, "config.xml") +@contextmanager +def modify_config(config, restart=False): + """Apply updated configuration file. + """ + return add_config(config, restart=restart, modify=True) + @contextmanager def ldap_servers(servers, config_d_dir="/etc/clickhouse-server/config.d", config_file="ldap_servers.xml", - timeout=60, restart=False): + timeout=60, restart=False, config=None): """Add LDAP servers configuration. """ - config = create_ldap_servers_config_content(servers, config_d_dir, config_file) + if config is None: + config = create_ldap_servers_config_content(servers, config_d_dir, config_file) return add_config(config, restart=restart) def create_ldap_users_config_content(*users, config_d_dir="/etc/clickhouse-server/users.d", config_file="ldap_users.xml"): diff --git a/tests/testflows/ldap/authentication/tests/server_config.py b/tests/testflows/ldap/authentication/tests/server_config.py index f62fda0bbf7..38ec859226b 100644 --- a/tests/testflows/ldap/authentication/tests/server_config.py +++ b/tests/testflows/ldap/authentication/tests/server_config.py @@ -217,9 +217,39 @@ def auth_dn_value(self): login(servers, user) +@TestOutline(Scenario) +@Examples("invalid_value", [ + ("-1", Name("negative int")), + ("foo", Name("string")), + ("", Name("empty string")), + ("36893488147419103232", Name("overflow with extremely large int value")), + ("-36893488147419103232", Name("overflow with extremely large negative int value")), + ("@#", Name("special characters")) +]) +@Requirements( + RQ_SRS_007_LDAP_Configuration_Server_VerificationCooldown_Invalid("1.0") +) +def invalid_verification_cooldown_value(self, invalid_value, timeout=20): + """Check that server returns an error when LDAP server + verification cooldown parameter is invalid. + """ + + error_message = (" Access(user directories): Could not parse LDAP server" + " \\`openldap1\\`: Poco::Exception. Code: 1000, e.code() = 0," + f" e.displayText() = Syntax error: Not a valid unsigned integer{': ' + invalid_value if invalid_value else invalid_value}") + + with Given("LDAP server configuration that uses a negative integer for the verification_cooldown parameter"): + servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": f"{invalid_value}" + }} + + with When("I try to use this configuration then it should not work"): + invalid_server_config(servers, message=error_message, tail=17, timeout=timeout) + @TestScenario @Requirements( - RQ_SRS_007_LDAP_Configuration_Server_Syntax("1.0") + RQ_SRS_007_LDAP_Configuration_Server_Syntax("2.0") ) def syntax(self): """Check that server configuration with valid syntax can be loaded. @@ -230,6 +260,7 @@ def syntax(self): 636 cn= , ou=users, dc=example, dc=com + 0 yes tls1.2 demand @@ -248,6 +279,7 @@ def syntax(self): "port": "389", "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "0", "enable_tls": "yes", "tls_minimum_protocol_version": "tls1.2" , "tls_require_cert": "demand", diff --git a/tests/testflows/ldap/external_user_directory/requirements/requirements.md b/tests/testflows/ldap/external_user_directory/requirements/requirements.md index 46532c3945d..cf9650f2cae 100644 --- a/tests/testflows/ldap/external_user_directory/requirements/requirements.md +++ b/tests/testflows/ldap/external_user_directory/requirements/requirements.md @@ -80,20 +80,23 @@ * 4.2.3.26 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertDir](#rqsrs-009ldapexternaluserdirectoryconfigurationservertlscacertdir) * 4.2.3.27 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertFile](#rqsrs-009ldapexternaluserdirectoryconfigurationservertlscacertfile) * 4.2.3.28 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCipherSuite](#rqsrs-009ldapexternaluserdirectoryconfigurationservertlsciphersuite) - * 4.2.3.29 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax](#rqsrs-009ldapexternaluserdirectoryconfigurationserversyntax) - * 4.2.3.30 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory](#rqsrs-009ldapexternaluserdirectoryconfigurationusersldapuserdirectory) - * 4.2.3.31 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory.MoreThanOne](#rqsrs-009ldapexternaluserdirectoryconfigurationusersldapuserdirectorymorethanone) - * 4.2.3.32 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Syntax](#rqsrs-009ldapexternaluserdirectoryconfigurationuserssyntax) - * 4.2.3.33 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersserver) - * 4.2.3.34 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Empty](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersserverempty) - * 4.2.3.35 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Missing](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersservermissing) - * 4.2.3.36 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.MoreThanOne](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersservermorethanone) - * 4.2.3.37 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Invalid](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersserverinvalid) - * 4.2.3.38 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersroles) - * 4.2.3.39 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.MoreThanOne](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesmorethanone) - * 4.2.3.40 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Invalid](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesinvalid) - * 4.2.3.41 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Empty](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesempty) - * 4.2.3.42 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Missing](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesmissing) + * 4.2.3.29 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown](#rqsrs-009ldapexternaluserdirectoryconfigurationserververificationcooldown) + * 4.2.3.30 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Default](#rqsrs-009ldapexternaluserdirectoryconfigurationserververificationcooldowndefault) + * 4.2.3.31 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Invalid](#rqsrs-009ldapexternaluserdirectoryconfigurationserververificationcooldowninvalid) + * 4.2.3.32 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax](#rqsrs-009ldapexternaluserdirectoryconfigurationserversyntax) + * 4.2.3.33 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory](#rqsrs-009ldapexternaluserdirectoryconfigurationusersldapuserdirectory) + * 4.2.3.34 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory.MoreThanOne](#rqsrs-009ldapexternaluserdirectoryconfigurationusersldapuserdirectorymorethanone) + * 4.2.3.35 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Syntax](#rqsrs-009ldapexternaluserdirectoryconfigurationuserssyntax) + * 4.2.3.36 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersserver) + * 4.2.3.37 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Empty](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersserverempty) + * 4.2.3.38 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Missing](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersservermissing) + * 4.2.3.39 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.MoreThanOne](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersservermorethanone) + * 4.2.3.40 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Invalid](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersserverinvalid) + * 4.2.3.41 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersroles) + * 4.2.3.42 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.MoreThanOne](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesmorethanone) + * 4.2.3.43 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Invalid](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesinvalid) + * 4.2.3.44 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Empty](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesempty) + * 4.2.3.45 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Missing](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesmissing) * 4.2.4 [Authentication](#authentication) * 4.2.4.1 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Empty](#rqsrs-009ldapexternaluserdirectoryauthenticationusernameempty) * 4.2.4.2 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Long](#rqsrs-009ldapexternaluserdirectoryauthenticationusernamelong) @@ -101,6 +104,9 @@ * 4.2.4.4 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.Empty](#rqsrs-009ldapexternaluserdirectoryauthenticationpasswordempty) * 4.2.4.5 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.Long](#rqsrs-009ldapexternaluserdirectoryauthenticationpasswordlong) * 4.2.4.6 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.UTF8](#rqsrs-009ldapexternaluserdirectoryauthenticationpasswordutf8) + * 4.2.4.7 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Performance](#rqsrs-009ldapexternaluserdirectoryauthenticationverificationcooldownperformance) + * 4.2.4.8 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters](#rqsrs-009ldapexternaluserdirectoryauthenticationverificationcooldownresetchangeincoreserverparameters) + * 4.2.4.9 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.InvalidPassword](#rqsrs-009ldapexternaluserdirectoryauthenticationverificationcooldownresetinvalidpassword) * 5 [References](#references) ## Revision History @@ -556,9 +562,44 @@ For example, The available suites SHALL depend on the [OpenSSL] library version and variant used to build [ClickHouse] and therefore might change. -##### RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax +##### RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown version: 1.0 +[ClickHouse] SHALL support `verification_cooldown` parameter in the [LDAP] server configuration section +that SHALL define a period of time, in seconds, after a successful bind attempt, during which a user SHALL be assumed +to be successfully authenticated for all consecutive requests without contacting the [LDAP] server. +After period of time since the last successful attempt expires then on the authentication attempt +SHALL result in contacting the [LDAP] server to verify the username and password. + +##### RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Default +version: 1.0 + +[ClickHouse] `verification_cooldown` parameter in the [LDAP] server configuration section +SHALL have a default value of `0` that disables caching and forces contacting +the [LDAP] server for each authentication request. + +##### RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Invalid +version: 1.0 + +[Clickhouse] SHALL return an error if the value provided for the `verification_cooldown` parameter is not a valid positive integer. + +For example: + +* negative integer +* string +* empty value +* extremely large positive value (overflow) +* extremely large negative value (overflow) + +The error SHALL appear in the log and SHALL be similar to the following: + +```bash + Access(user directories): Could not parse LDAP server `openldap1`: Poco::Exception. Code: 1000, e.code() = 0, e.displayText() = Syntax error: Not a valid unsigned integer: *input value* +``` + +##### RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax +version: 2.0 + [ClickHouse] SHALL support the following example syntax to create an entry for an [LDAP] server inside the `config.xml` configuration file or of any configuration file inside the `config.d` directory. @@ -569,6 +610,7 @@ configuration file or of any configuration file inside the `config.d` directory. 636 cn= , ou=users, dc=example, dc=com + 0 yes tls1.2 demand @@ -717,6 +759,33 @@ version: 1.0 [ClickHouse] SHALL support [UTF-8] characters in passwords used to authenticate users when using [LDAP] external user directory. +##### RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Performance +version: 1.0 + +[ClickHouse] SHALL provide better login performance of users authenticated using [LDAP] external user directory +when `verification_cooldown` parameter is set to a positive value when comparing +to the the case when `verification_cooldown` is turned off either for a single user or multiple users +making a large number of repeated requests. + +##### RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters +version: 1.0 + +[ClickHouse] SHALL reset any currently cached [LDAP] authentication bind requests enabled by the +`verification_cooldown` parameter in the [LDAP] server configuration section +if either `host`, `port`, `auth_dn_prefix`, or `auth_dn_suffix` parameter values +change in the configuration file. The reset SHALL cause any subsequent authentication attempts for any user +to result in contacting the [LDAP] server to verify user's username and password. + +##### RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.InvalidPassword +version: 1.0 + +[ClickHouse] SHALL reset current cached [LDAP] authentication bind request enabled by the +`verification_cooldown` parameter in the [LDAP] server configuration section +for the user if the password provided in the current authentication attempt does not match +the valid password provided during the first successful authentication request that was cached +for this exact user. The reset SHALL cause the next authentication attempt for this user +to result in contacting the [LDAP] server to verify user's username and password. + ## References * **Access Control and Account Management**: https://clickhouse.tech/docs/en/operations/access-rights/ diff --git a/tests/testflows/ldap/external_user_directory/requirements/requirements.py b/tests/testflows/ldap/external_user_directory/requirements/requirements.py index 3b77685188e..0e9f9320b14 100644 --- a/tests/testflows/ldap/external_user_directory/requirements/requirements.py +++ b/tests/testflows/ldap/external_user_directory/requirements/requirements.py @@ -1073,9 +1073,74 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCipherSuite = Requ level=4, num='4.2.3.28') +RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown = Requirement( + name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL support `verification_cooldown` parameter in the [LDAP] server configuration section\n' + 'that SHALL define a period of time, in seconds, after a successful bind attempt, during which a user SHALL be assumed\n' + 'to be successfully authenticated for all consecutive requests without contacting the [LDAP] server.\n' + 'After period of time since the last successful attempt expires then on the authentication attempt\n' + 'SHALL result in contacting the [LDAP] server to verify the username and password.\n' + '\n' + ), + link=None, + level=4, + num='4.2.3.29') + +RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown_Default = Requirement( + name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Default', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] `verification_cooldown` parameter in the [LDAP] server configuration section\n' + 'SHALL have a default value of `0` that disables caching and forces contacting\n' + 'the [LDAP] server for each authentication request.\n' + '\n' + ), + link=None, + level=4, + num='4.2.3.30') + +RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown_Invalid = Requirement( + name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Invalid', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[Clickhouse] SHALL return an error if the value provided for the `verification_cooldown` parameter is not a valid positive integer.\n' + '\n' + 'For example:\n' + '\n' + '* negative integer\n' + '* string\n' + '* empty value\n' + '* extremely large positive value (overflow)\n' + '* extremely large negative value (overflow)\n' + '\n' + 'The error SHALL appear in the log and SHALL be similar to the following:\n' + '\n' + '```bash\n' + ' Access(user directories): Could not parse LDAP server `openldap1`: Poco::Exception. Code: 1000, e.code() = 0, e.displayText() = Syntax error: Not a valid unsigned integer: *input value*\n' + '```\n' + '\n' + ), + link=None, + level=4, + num='4.2.3.31') + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Syntax = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax', - version='1.0', + version='2.0', priority=None, group=None, type=None, @@ -1091,6 +1156,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Syntax = Requirement( ' 636\n' ' cn=\n' ' , ou=users, dc=example, dc=com\n' + ' 0\n' ' yes\n' ' tls1.2\n' ' demand\n' @@ -1106,7 +1172,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Syntax = Requirement( ), link=None, level=4, - num='4.2.3.29') + num='4.2.3.32') RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_LDAPUserDirectory = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory', @@ -1122,7 +1188,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_LDAPUserDirectory = Re ), link=None, level=4, - num='4.2.3.30') + num='4.2.3.33') RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_LDAPUserDirectory_MoreThanOne = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory.MoreThanOne', @@ -1139,7 +1205,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_LDAPUserDirectory_More ), link=None, level=4, - num='4.2.3.31') + num='4.2.3.34') RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Syntax = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Syntax', @@ -1168,7 +1234,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Syntax = Requirement( ), link=None, level=4, - num='4.2.3.32') + num='4.2.3.35') RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server', @@ -1185,7 +1251,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server = Re ), link=None, level=4, - num='4.2.3.33') + num='4.2.3.36') RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Empty = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Empty', @@ -1201,7 +1267,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Empt ), link=None, level=4, - num='4.2.3.34') + num='4.2.3.37') RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Missing = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Missing', @@ -1217,7 +1283,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Miss ), link=None, level=4, - num='4.2.3.35') + num='4.2.3.38') RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_MoreThanOne = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.MoreThanOne', @@ -1233,7 +1299,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_More ), link=None, level=4, - num='4.2.3.36') + num='4.2.3.39') RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Invalid = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Invalid', @@ -1249,7 +1315,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Server_Inva ), link=None, level=4, - num='4.2.3.37') + num='4.2.3.40') RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles', @@ -1266,7 +1332,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles = Req ), link=None, level=4, - num='4.2.3.38') + num='4.2.3.41') RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_MoreThanOne = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.MoreThanOne', @@ -1283,7 +1349,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_MoreT ), link=None, level=4, - num='4.2.3.39') + num='4.2.3.42') RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Invalid = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Invalid', @@ -1299,7 +1365,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Inval ), link=None, level=4, - num='4.2.3.40') + num='4.2.3.43') RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Empty = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Empty', @@ -1316,7 +1382,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Empty ), link=None, level=4, - num='4.2.3.41') + num='4.2.3.44') RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Missing = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Missing', @@ -1333,7 +1399,7 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_Parameters_Roles_Missi ), link=None, level=4, - num='4.2.3.42') + num='4.2.3.45') RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Username_Empty = Requirement( name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Empty', @@ -1432,6 +1498,63 @@ RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_UTF8 = Requirement level=4, num='4.2.4.6') +RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Performance = Requirement( + name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Performance', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL provide better login performance of users authenticated using [LDAP] external user directory\n' + 'when `verification_cooldown` parameter is set to a positive value when comparing\n' + 'to the the case when `verification_cooldown` is turned off either for a single user or multiple users\n' + 'making a large number of repeated requests.\n' + '\n' + ), + link=None, + level=4, + num='4.2.4.7') + +RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters = Requirement( + name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL reset any currently cached [LDAP] authentication bind requests enabled by the\n' + '`verification_cooldown` parameter in the [LDAP] server configuration section\n' + 'if either `host`, `port`, `auth_dn_prefix`, or `auth_dn_suffix` parameter values\n' + 'change in the configuration file. The reset SHALL cause any subsequent authentication attempts for any user\n' + "to result in contacting the [LDAP] server to verify user's username and password.\n" + '\n' + ), + link=None, + level=4, + num='4.2.4.8') + +RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_InvalidPassword = Requirement( + name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.InvalidPassword', + version='1.0', + priority=None, + group=None, + type=None, + uid=None, + description=( + '[ClickHouse] SHALL reset current cached [LDAP] authentication bind request enabled by the\n' + '`verification_cooldown` parameter in the [LDAP] server configuration section\n' + 'for the user if the password provided in the current authentication attempt does not match\n' + 'the valid password provided during the first successful authentication request that was cached\n' + 'for this exact user. The reset SHALL cause the next authentication attempt for this user\n' + "to result in contacting the [LDAP] server to verify user's username and password.\n" + '\n' + ), + link=None, + level=4, + num='4.2.4.9') + SRS_009_ClickHouse_LDAP_External_User_Directory = Specification( name='SRS-009 ClickHouse LDAP External User Directory', description=None, @@ -1526,20 +1649,23 @@ SRS_009_ClickHouse_LDAP_External_User_Directory = Specification( Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertDir', level=4, num='4.2.3.26'), Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertFile', level=4, num='4.2.3.27'), Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCipherSuite', level=4, num='4.2.3.28'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax', level=4, num='4.2.3.29'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory', level=4, num='4.2.3.30'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory.MoreThanOne', level=4, num='4.2.3.31'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Syntax', level=4, num='4.2.3.32'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server', level=4, num='4.2.3.33'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Empty', level=4, num='4.2.3.34'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Missing', level=4, num='4.2.3.35'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.MoreThanOne', level=4, num='4.2.3.36'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Invalid', level=4, num='4.2.3.37'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles', level=4, num='4.2.3.38'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.MoreThanOne', level=4, num='4.2.3.39'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Invalid', level=4, num='4.2.3.40'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Empty', level=4, num='4.2.3.41'), - Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Missing', level=4, num='4.2.3.42'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown', level=4, num='4.2.3.29'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Default', level=4, num='4.2.3.30'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Invalid', level=4, num='4.2.3.31'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax', level=4, num='4.2.3.32'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory', level=4, num='4.2.3.33'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory.MoreThanOne', level=4, num='4.2.3.34'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Syntax', level=4, num='4.2.3.35'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server', level=4, num='4.2.3.36'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Empty', level=4, num='4.2.3.37'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Missing', level=4, num='4.2.3.38'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.MoreThanOne', level=4, num='4.2.3.39'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Invalid', level=4, num='4.2.3.40'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles', level=4, num='4.2.3.41'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.MoreThanOne', level=4, num='4.2.3.42'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Invalid', level=4, num='4.2.3.43'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Empty', level=4, num='4.2.3.44'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Missing', level=4, num='4.2.3.45'), Heading(name='Authentication', level=3, num='4.2.4'), Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Empty', level=4, num='4.2.4.1'), Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Long', level=4, num='4.2.4.2'), @@ -1547,6 +1673,9 @@ SRS_009_ClickHouse_LDAP_External_User_Directory = Specification( Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.Empty', level=4, num='4.2.4.4'), Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.Long', level=4, num='4.2.4.5'), Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.UTF8', level=4, num='4.2.4.6'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Performance', level=4, num='4.2.4.7'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters', level=4, num='4.2.4.8'), + Heading(name='RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.InvalidPassword', level=4, num='4.2.4.9'), Heading(name='References', level=1, num='5'), ), requirements=( @@ -1615,6 +1744,9 @@ SRS_009_ClickHouse_LDAP_External_User_Directory = Specification( RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCACertDir, RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCACertFile, RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_TLSCipherSuite, + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown, + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown_Default, + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown_Invalid, RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Syntax, RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_LDAPUserDirectory, RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Users_LDAPUserDirectory_MoreThanOne, @@ -1635,6 +1767,9 @@ SRS_009_ClickHouse_LDAP_External_User_Directory = Specification( RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_Empty, RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_Long, RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_Password_UTF8, + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Performance, + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters, + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_InvalidPassword, ), content=''' # SRS-009 ClickHouse LDAP External User Directory @@ -1719,20 +1854,23 @@ SRS_009_ClickHouse_LDAP_External_User_Directory = Specification( * 4.2.3.26 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertDir](#rqsrs-009ldapexternaluserdirectoryconfigurationservertlscacertdir) * 4.2.3.27 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCACertFile](#rqsrs-009ldapexternaluserdirectoryconfigurationservertlscacertfile) * 4.2.3.28 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.TLSCipherSuite](#rqsrs-009ldapexternaluserdirectoryconfigurationservertlsciphersuite) - * 4.2.3.29 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax](#rqsrs-009ldapexternaluserdirectoryconfigurationserversyntax) - * 4.2.3.30 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory](#rqsrs-009ldapexternaluserdirectoryconfigurationusersldapuserdirectory) - * 4.2.3.31 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory.MoreThanOne](#rqsrs-009ldapexternaluserdirectoryconfigurationusersldapuserdirectorymorethanone) - * 4.2.3.32 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Syntax](#rqsrs-009ldapexternaluserdirectoryconfigurationuserssyntax) - * 4.2.3.33 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersserver) - * 4.2.3.34 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Empty](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersserverempty) - * 4.2.3.35 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Missing](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersservermissing) - * 4.2.3.36 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.MoreThanOne](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersservermorethanone) - * 4.2.3.37 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Invalid](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersserverinvalid) - * 4.2.3.38 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersroles) - * 4.2.3.39 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.MoreThanOne](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesmorethanone) - * 4.2.3.40 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Invalid](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesinvalid) - * 4.2.3.41 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Empty](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesempty) - * 4.2.3.42 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Missing](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesmissing) + * 4.2.3.29 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown](#rqsrs-009ldapexternaluserdirectoryconfigurationserververificationcooldown) + * 4.2.3.30 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Default](#rqsrs-009ldapexternaluserdirectoryconfigurationserververificationcooldowndefault) + * 4.2.3.31 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Invalid](#rqsrs-009ldapexternaluserdirectoryconfigurationserververificationcooldowninvalid) + * 4.2.3.32 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax](#rqsrs-009ldapexternaluserdirectoryconfigurationserversyntax) + * 4.2.3.33 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory](#rqsrs-009ldapexternaluserdirectoryconfigurationusersldapuserdirectory) + * 4.2.3.34 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.LDAPUserDirectory.MoreThanOne](#rqsrs-009ldapexternaluserdirectoryconfigurationusersldapuserdirectorymorethanone) + * 4.2.3.35 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Syntax](#rqsrs-009ldapexternaluserdirectoryconfigurationuserssyntax) + * 4.2.3.36 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersserver) + * 4.2.3.37 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Empty](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersserverempty) + * 4.2.3.38 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Missing](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersservermissing) + * 4.2.3.39 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.MoreThanOne](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersservermorethanone) + * 4.2.3.40 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Server.Invalid](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersserverinvalid) + * 4.2.3.41 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersroles) + * 4.2.3.42 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.MoreThanOne](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesmorethanone) + * 4.2.3.43 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Invalid](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesinvalid) + * 4.2.3.44 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Empty](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesempty) + * 4.2.3.45 [RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Users.Parameters.Roles.Missing](#rqsrs-009ldapexternaluserdirectoryconfigurationusersparametersrolesmissing) * 4.2.4 [Authentication](#authentication) * 4.2.4.1 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Empty](#rqsrs-009ldapexternaluserdirectoryauthenticationusernameempty) * 4.2.4.2 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Username.Long](#rqsrs-009ldapexternaluserdirectoryauthenticationusernamelong) @@ -1740,6 +1878,9 @@ SRS_009_ClickHouse_LDAP_External_User_Directory = Specification( * 4.2.4.4 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.Empty](#rqsrs-009ldapexternaluserdirectoryauthenticationpasswordempty) * 4.2.4.5 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.Long](#rqsrs-009ldapexternaluserdirectoryauthenticationpasswordlong) * 4.2.4.6 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.Password.UTF8](#rqsrs-009ldapexternaluserdirectoryauthenticationpasswordutf8) + * 4.2.4.7 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Performance](#rqsrs-009ldapexternaluserdirectoryauthenticationverificationcooldownperformance) + * 4.2.4.8 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters](#rqsrs-009ldapexternaluserdirectoryauthenticationverificationcooldownresetchangeincoreserverparameters) + * 4.2.4.9 [RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.InvalidPassword](#rqsrs-009ldapexternaluserdirectoryauthenticationverificationcooldownresetinvalidpassword) * 5 [References](#references) ## Revision History @@ -2195,9 +2336,44 @@ For example, The available suites SHALL depend on the [OpenSSL] library version and variant used to build [ClickHouse] and therefore might change. -##### RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax +##### RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown version: 1.0 +[ClickHouse] SHALL support `verification_cooldown` parameter in the [LDAP] server configuration section +that SHALL define a period of time, in seconds, after a successful bind attempt, during which a user SHALL be assumed +to be successfully authenticated for all consecutive requests without contacting the [LDAP] server. +After period of time since the last successful attempt expires then on the authentication attempt +SHALL result in contacting the [LDAP] server to verify the username and password. + +##### RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Default +version: 1.0 + +[ClickHouse] `verification_cooldown` parameter in the [LDAP] server configuration section +SHALL have a default value of `0` that disables caching and forces contacting +the [LDAP] server for each authentication request. + +##### RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.VerificationCooldown.Invalid +version: 1.0 + +[Clickhouse] SHALL return an error if the value provided for the `verification_cooldown` parameter is not a valid positive integer. + +For example: + +* negative integer +* string +* empty value +* extremely large positive value (overflow) +* extremely large negative value (overflow) + +The error SHALL appear in the log and SHALL be similar to the following: + +```bash + Access(user directories): Could not parse LDAP server `openldap1`: Poco::Exception. Code: 1000, e.code() = 0, e.displayText() = Syntax error: Not a valid unsigned integer: *input value* +``` + +##### RQ.SRS-009.LDAP.ExternalUserDirectory.Configuration.Server.Syntax +version: 2.0 + [ClickHouse] SHALL support the following example syntax to create an entry for an [LDAP] server inside the `config.xml` configuration file or of any configuration file inside the `config.d` directory. @@ -2208,6 +2384,7 @@ configuration file or of any configuration file inside the `config.d` directory. 636 cn= , ou=users, dc=example, dc=com + 0 yes tls1.2 demand @@ -2356,6 +2533,33 @@ version: 1.0 [ClickHouse] SHALL support [UTF-8] characters in passwords used to authenticate users when using [LDAP] external user directory. +##### RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Performance +version: 1.0 + +[ClickHouse] SHALL provide better login performance of users authenticated using [LDAP] external user directory +when `verification_cooldown` parameter is set to a positive value when comparing +to the the case when `verification_cooldown` is turned off either for a single user or multiple users +making a large number of repeated requests. + +##### RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.ChangeInCoreServerParameters +version: 1.0 + +[ClickHouse] SHALL reset any currently cached [LDAP] authentication bind requests enabled by the +`verification_cooldown` parameter in the [LDAP] server configuration section +if either `host`, `port`, `auth_dn_prefix`, or `auth_dn_suffix` parameter values +change in the configuration file. The reset SHALL cause any subsequent authentication attempts for any user +to result in contacting the [LDAP] server to verify user's username and password. + +##### RQ.SRS-009.LDAP.ExternalUserDirectory.Authentication.VerificationCooldown.Reset.InvalidPassword +version: 1.0 + +[ClickHouse] SHALL reset current cached [LDAP] authentication bind request enabled by the +`verification_cooldown` parameter in the [LDAP] server configuration section +for the user if the password provided in the current authentication attempt does not match +the valid password provided during the first successful authentication request that was cached +for this exact user. The reset SHALL cause the next authentication attempt for this user +to result in contacting the [LDAP] server to verify user's username and password. + ## References * **Access Control and Account Management**: https://clickhouse.tech/docs/en/operations/access-rights/ diff --git a/tests/testflows/ldap/external_user_directory/tests/authentications.py b/tests/testflows/ldap/external_user_directory/tests/authentications.py index 531a1b2f3ea..8229947adf7 100644 --- a/tests/testflows/ldap/external_user_directory/tests/authentications.py +++ b/tests/testflows/ldap/external_user_directory/tests/authentications.py @@ -698,6 +698,460 @@ def empty_username_and_empty_password(self, server=None): """ login_and_execute_query(username="", password="") +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown_Default("1.0") +) +def default_verification_cooldown_value(self, server, rbac=False, timeout=20): + """Check that the default value (0) for the verification cooldown parameter + disables caching and forces contacting the LDAP server for each + authentication request. + """ + + error_message = "DB::Exception: testVCD: Authentication failed: password is incorrect or there is no user with such name" + error_exitcode = 4 + user = None + + with Given("I have an LDAP configuration that uses the default verification_cooldown value (0)"): + servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com" + }} + + self.context.ldap_node = self.context.cluster.node(server) + + try: + with Given("I add user to LDAP"): + user = {"cn": "testVCD", "userpassword": "testVCD"} + user = add_user_to_ldap(**user) + + with ldap_servers(servers): + with rbac_roles("ldap_role") as roles: + with ldap_external_user_directory(server=server, roles=roles, restart=True): + with When("I login and execute a query"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("I change user password in LDAP"): + change_user_password_in_ldap(user, "newpassword") + + with Then("when I try to login immediately with the old user password it should fail"): + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=error_exitcode, message=error_message) + + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown("1.0") +) +def valid_verification_cooldown_value_cn_change(self, server, rbac=False, timeout=20): + """Check that we can perform requests without contacting the LDAP server + after successful authentication when the verification_cooldown parameter + is set and the user cn is changed. + """ + + error_message = "DB::Exception: testVCD: Authentication failed: password is incorrect or there is no user with such name" + error_exitcode = 4 + user = None + new_user = None + + with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"): + servers = { "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "2" + }} + + self.context.ldap_node = self.context.cluster.node(server) + + try: + with Given("I add user to LDAP"): + user = {"cn": "testVCD", "userpassword": "testVCD"} + user = add_user_to_ldap(**user) + + with ldap_servers(servers): + with rbac_roles("ldap_role") as roles: + with ldap_external_user_directory(server=server, roles=roles, restart=True): + with When("I login and execute a query"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("I change user cn in LDAP"): + new_user = change_user_cn_in_ldap(user, "testVCD2") + + with Then("when I try to login again with the old user cn it should work"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("when I sleep for 2 seconds and try to log in, it should fail"): + time.sleep(2) + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=error_exitcode, message=error_message) + + finally: + with Finally("I make sure LDAP user is deleted"): + if new_user is not None: + delete_user_from_ldap(new_user, exitcode=None) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown("1.0") +) +def valid_verification_cooldown_value_password_change(self, server, rbac=False, timeout=20): + """Check that we can perform requests without contacting the LDAP server + after successful authentication when the verification_cooldown parameter + is set and the user password is changed. + """ + + error_message = "DB::Exception: testVCD: Authentication failed: password is incorrect or there is no user with such name" + error_exitcode = 4 + user = None + + with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"): + servers = { "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "2" + }} + + self.context.ldap_node = self.context.cluster.node(server) + + try: + with Given("I add user to LDAP"): + user = {"cn": "testVCD", "userpassword": "testVCD"} + user = add_user_to_ldap(**user) + + with ldap_servers(servers): + with rbac_roles("ldap_role") as roles: + with ldap_external_user_directory(server=server, roles=roles, restart=True): + with When("I login and execute a query"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("I change user password in LDAP"): + change_user_password_in_ldap(user, "newpassword") + + with Then("when I try to login again with the old password it should work"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("when I sleep for 2 seconds and try to log in, it should fail"): + time.sleep(2) + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=error_exitcode, message=error_message) + + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown("1.0") +) +def valid_verification_cooldown_value_ldap_unavailable(self, server, rbac=False, timeout=20): + """Check that we can perform requests without contacting the LDAP server + after successful authentication when the verification_cooldown parameter + is set, even when the LDAP server is offline. + """ + + error_message = "DB::Exception: testVCD: Authentication failed: password is incorrect or there is no user with such name" + error_exitcode = 4 + user = None + + with Given("I have an LDAP configuration that sets verification_cooldown parameter to 2 sec"): + servers = { "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "2" + }} + + self.context.ldap_node = self.context.cluster.node(server) + + try: + with Given("I add a new user to LDAP"): + user = {"cn": "testVCD", "userpassword": "testVCD"} + user = add_user_to_ldap(**user) + + with ldap_servers(servers): + with rbac_roles("ldap_role") as roles: + with ldap_external_user_directory(server=server, roles=roles, restart=True): + + with When("I login and execute a query"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + try: + with And("then I stop the ldap server"): + self.context.ldap_node.stop() + + with Then("when I try to login again with the server offline it should work"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("when I sleep for 2 seconds and try to log in, it should fail"): + time.sleep(2) + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=error_exitcode, message=error_message) + + finally: + with Finally("I start the ldap server back up"): + self.context.ldap_node.start() + + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + +@TestOutline +def repeat_requests(self, server, iterations, vcd_value, rbac=False): + """Run repeated requests from some user to the LDAP server. + """ + + user = None + + with Given(f"I have an LDAP configuration that sets verification_cooldown parameter to {vcd_value} sec"): + servers = { "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": vcd_value + }} + + self.context.ldap_node = self.context.cluster.node(server) + + try: + with And("I add a new user to LDAP"): + user = {"cn": "testVCD", "userpassword": "testVCD"} + user = add_user_to_ldap(**user) + + with ldap_servers(servers): + with rbac_roles("ldap_role") as roles: + with ldap_external_user_directory(server=server, roles=roles, restart=True): + with When(f"I login and execute some query {iterations} times"): + start_time = time.time() + r = self.context.node.command(f"time for i in {{1..{iterations}}}; do clickhouse client -q \"SELECT 1\" --user {user['cn']} --password {user['userpassword']} > /dev/null; done") + end_time = time.time() + + return end_time - start_time + + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Performance("1.0") +) +def verification_cooldown_performance(self, server, rbac=False, iterations=5000): + """Check that login performance is better when the verification cooldown + parameter is set to a positive value when comparing to the case when + the verification cooldown parameter is turned off. + """ + + vcd_time = 0 + no_vcd_time = 0 + + with Example(f"Repeated requests with verification cooldown parameter set to 600 seconds, {iterations} iterations"): + vcd_time = repeat_requests(server=server, iterations=iterations, vcd_value="600", rbac=rbac) + metric("login_with_vcd_value_600", units="seconds", value=vcd_time) + + with Example(f"Repeated requests with verification cooldown parameter set to 0 seconds, {iterations} iterations"): + no_vcd_time = repeat_requests(server=server, iterations=iterations, vcd_value="0", rbac=rbac) + metric("login_with_vcd_value_0", units="seconds", value=no_vcd_time) + + with Then("The performance with verification cooldown parameter set is better than the performance with no verification cooldown parameter."): + assert no_vcd_time > vcd_time, error() + + with And("Log the performance improvement as a percentage."): + metric("percentage_improvement", units="%", value=100*(no_vcd_time - vcd_time)/vcd_time) + +@TestOutline +def check_verification_cooldown_reset_on_core_server_parameter_change(self, server, + parameter_name, parameter_value, rbac=False): + """Check that the LDAP login cache is reset for all the LDAP authentication users + when verification_cooldown parameter is set after one of the core server + parameters is changed in the LDAP server configuration. + """ + config_d_dir="/etc/clickhouse-server/config.d" + config_file="ldap_servers.xml" + error_message = "DB::Exception: {user}: Authentication failed: password is incorrect or there is no user with such name" + error_exitcode = 4 + user = None + config=None + updated_config=None + + with Given("I have an LDAP configuration that sets verification_cooldown parameter to 600 sec"): + servers = { "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "600" + }} + + self.context.ldap_node = self.context.cluster.node(server) + + with And("LDAP authenticated user"): + users = [ + {"cn": f"testVCD_0", "userpassword": "testVCD_0"}, + {"cn": f"testVCD_1", "userpassword": "testVCD_1"} + ] + + with And("I create LDAP servers configuration file"): + config = create_ldap_servers_config_content(servers, config_d_dir, config_file) + + with ldap_users(*users) as users: + with ldap_servers(servers=None, restart=False, config=config): + with rbac_roles("ldap_role") as roles: + with ldap_external_user_directory(server=server, roles=roles, restart=True): + with When("I login and execute a query"): + for user in users: + with By(f"as user {user['cn']}"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("I change user password in LDAP"): + for user in users: + with By(f"for user {user['cn']}"): + change_user_password_in_ldap(user, "newpassword") + + with And(f"I change the server {parameter_name} core parameter", description=f"{parameter_value}"): + servers["openldap1"][parameter_name] = parameter_value + + with And("I create an updated the config file that has a different server host name"): + updated_config = create_ldap_servers_config_content(servers, config_d_dir, config_file) + + with modify_config(updated_config, restart=False): + with Then("when I try to log in it should fail as cache should have been reset"): + for user in users: + with By(f"as user {user['cn']}"): + login_and_execute_query(username=user["cn"], password=user["userpassword"], + exitcode=error_exitcode, message=error_message.format(user=user["cn"])) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") +) +def verification_cooldown_reset_on_server_host_parameter_change(self, server, rbac=False): + """Check that the LDAP login cache is reset for all the LDAP authentication users + when verification_cooldown parameter is set after server host name + is changed in the LDAP server configuration. + """ + check_verification_cooldown_reset_on_core_server_parameter_change(server=server, + parameter_name="host", parameter_value="openldap2", rbac=rbac) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") +) +def verification_cooldown_reset_on_server_port_parameter_change(self, server, rbac=False): + """Check that the LDAP login cache is reset for all the LDAP authentication users + when verification_cooldown parameter is set after server port is changed in the + LDAP server configuration. + """ + check_verification_cooldown_reset_on_core_server_parameter_change(server=server, + parameter_name="port", parameter_value="9006", rbac=rbac) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") +) +def verification_cooldown_reset_on_server_auth_dn_prefix_parameter_change(self, server, rbac=False): + """Check that the LDAP login cache is reset for all the LDAP authentication users + when verification_cooldown parameter is set after server auth_dn_prefix + is changed in the LDAP server configuration. + """ + check_verification_cooldown_reset_on_core_server_parameter_change(server=server, + parameter_name="auth_dn_prefix", parameter_value="cxx=", rbac=rbac) + +@TestScenario +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_ChangeInCoreServerParameters("1.0") +) +def verification_cooldown_reset_on_server_auth_dn_suffix_parameter_change(self, server, rbac=False): + """Check that the LDAP login cache is reset for all the LDAP authentication users + when verification_cooldown parameter is set after server auth_dn_suffix + is changed in the LDAP server configuration. + """ + check_verification_cooldown_reset_on_core_server_parameter_change(server=server, + parameter_name="auth_dn_suffix", + parameter_value=",ou=company,dc=users,dc=com", rbac=rbac) + +@TestScenario +@Name("verification cooldown reset when invalid password is provided") +@Tags("verification_cooldown") +@Requirements( + RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication_VerificationCooldown_Reset_InvalidPassword("1.0") +) +def scenario(self, server, rbac=False): + """Check that cached bind requests for the user are discarded when + the user provides invalid login credentials. + """ + user = None + error_exitcode = 4 + error_message = "DB::Exception: testVCD: Authentication failed: password is incorrect or there is no user with such name" + + with Given("I have an LDAP configuration that sets verification_cooldown parameter to 600 sec"): + servers = { "openldap1": { + "host": "openldap1", + "port": "389", + "enable_tls": "no", + "auth_dn_prefix": "cn=", + "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "600" + }} + + self.context.ldap_node = self.context.cluster.node(server) + + try: + with Given("I add a new user to LDAP"): + user = {"cn": "testVCD", "userpassword": "testVCD"} + user = add_user_to_ldap(**user) + + with ldap_servers(servers): + with rbac_roles("ldap_role") as roles: + with ldap_external_user_directory(server=server, roles=roles, restart=True): + + with When("I login and execute a query"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("I change user password in LDAP"): + change_user_password_in_ldap(user, "newpassword") + + with Then("When I try to log in with the cached password it should work"): + login_and_execute_query(username=user["cn"], password=user["userpassword"]) + + with And("When I try to log in with an incorrect password it should fail"): + login_and_execute_query(username=user["cn"], password="incorrect", exitcode=error_exitcode, + message=error_message) + + with And("When I try to log in with the cached password it should fail"): + login_and_execute_query(username=user["cn"], password="incorrect", exitcode=error_exitcode, + message=error_message) + + finally: + with Finally("I make sure LDAP user is deleted"): + if user is not None: + delete_user_from_ldap(user, exitcode=None) + @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Users_Lookup_Priority("2.0") @@ -734,7 +1188,6 @@ def user_lookup_priority(self, server): with When("I try to login as 'ldap' user defined only in LDAP it should work"): login_and_execute_query(**users["ldap"]) - @TestOutline(Feature) @Name("user authentications") @Requirements( @@ -755,8 +1208,11 @@ def feature(self, servers=None, server=None, node="clickhouse1"): with ldap_servers(servers): with rbac_roles("ldap_role") as roles: with ldap_external_user_directory(server=server, roles=roles, restart=True): - for scenario in loads(current_module(), Scenario, filter=~has.tag("custom config")): - Scenario(test=scenario, flags=TE)(server=server) + for scenario in loads(current_module(), Scenario, filter=~has.tag("custom config") & ~has.tag("verification_cooldown")): + Scenario(test=scenario)(server=server) for scenario in loads(current_module(), Scenario, filter=has.tag("custom config")): - Scenario(test=scenario, flags=TE)(server=server) + Scenario(test=scenario)(server=server) + + for scenario in loads(current_module(), Scenario, filter=has.tag("verification_cooldown")): + Scenario(test=scenario)(server=server) diff --git a/tests/testflows/ldap/external_user_directory/tests/common.py b/tests/testflows/ldap/external_user_directory/tests/common.py index 38b53ca6e9f..6d8a97e8611 100644 --- a/tests/testflows/ldap/external_user_directory/tests/common.py +++ b/tests/testflows/ldap/external_user_directory/tests/common.py @@ -5,10 +5,11 @@ from contextlib import contextmanager import testflows.settings as settings from testflows.core import * from testflows.asserts import error -from ldap.authentication.tests.common import getuid, Config, ldap_servers, add_config, restart +from ldap.authentication.tests.common import getuid, Config, ldap_servers, add_config, modify_config, restart from ldap.authentication.tests.common import xmltree, xml_indent, xml_append, xml_with_utf8 from ldap.authentication.tests.common import ldap_user, ldap_users, add_user_to_ldap, delete_user_from_ldap from ldap.authentication.tests.common import change_user_password_in_ldap, change_user_cn_in_ldap +from ldap.authentication.tests.common import create_ldap_servers_config_content from ldap.authentication.tests.common import randomword def join(tasks, timeout): diff --git a/tests/testflows/ldap/external_user_directory/tests/server_config.py b/tests/testflows/ldap/external_user_directory/tests/server_config.py index 5df343b53df..617c0ee32e5 100644 --- a/tests/testflows/ldap/external_user_directory/tests/server_config.py +++ b/tests/testflows/ldap/external_user_directory/tests/server_config.py @@ -99,7 +99,6 @@ def invalid_port(self): }] login(servers, "openldap1", *users) - @TestScenario @Requirements( RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Invalid("1.0"), @@ -232,9 +231,39 @@ def auth_dn_value(self): login(servers, "openldap1", user) +@TestOutline(Scenario) +@Examples("invalid_value", [ + ("-1", Name("negative int")), + ("foo", Name("string")), + ("", Name("empty string")), + ("36893488147419103232", Name("overflow with extremely large int value")), + ("-36893488147419103232", Name("overflow with extremely large negative int value")), + ("@#", Name("special characters")) +]) +@Requirements( + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_VerificationCooldown_Invalid("1.0") +) +def invalid_verification_cooldown_value(self, invalid_value, timeout=20): + """Check that server returns an error when LDAP server + verification cooldown parameter is invalid. + """ + + error_message = (" Access(user directories): Could not parse LDAP server" + " \\`openldap1\\`: Poco::Exception. Code: 1000, e.code() = 0," + f" e.displayText() = Syntax error: Not a valid unsigned integer{': ' + invalid_value if invalid_value else invalid_value}") + + with Given("LDAP server configuration that uses a negative integer for the verification_cooldown parameter"): + servers = {"openldap1": {"host": "openldap1", "port": "389", "enable_tls": "no", + "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": f"{invalid_value}" + }} + + with When("I try to use this configuration then it should not work"): + invalid_server_config(servers, message=error_message, tail=17, timeout=timeout) + @TestScenario @Requirements( - RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Syntax("1.0") + RQ_SRS_009_LDAP_ExternalUserDirectory_Configuration_Server_Syntax("2.0") ) def syntax(self): """Check that server configuration with valid syntax can be loaded. @@ -245,6 +274,7 @@ def syntax(self): 636 cn= , ou=users, dc=example, dc=com + 0 yes tls1.2 demand @@ -263,6 +293,7 @@ def syntax(self): "port": "389", "auth_dn_prefix": "cn=", "auth_dn_suffix": ",ou=users,dc=company,dc=com", + "verification_cooldown": "0", "enable_tls": "yes", "tls_minimum_protocol_version": "tls1.2" , "tls_require_cert": "demand", diff --git a/utils/check-marks/main.cpp b/utils/check-marks/main.cpp index 5590616bde6..2b244dcf0b6 100644 --- a/utils/check-marks/main.cpp +++ b/utils/check-marks/main.cpp @@ -13,7 +13,7 @@ #include -/** This program checks correctness of .mrk (marks) file for corresponding compressed .bin file. +/** This program checks correctness of .mrk/.mrk2 (marks) file for corresponding compressed .bin file. */ static void checkByCompressedReadBuffer(const std::string & mrk_path, const std::string & bin_path) @@ -42,9 +42,10 @@ static void checkByCompressedReadBuffer(const std::string & mrk_path, const std: out << ", has rows after " << index_granularity_rows; } - out << ".\n" << DB::flush; - bin_in.seek(offset_in_compressed_file, offset_in_decompressed_block); + out << ", decompressed size " << bin_in.available(); + + out << ".\n" << DB::flush; } } @@ -61,7 +62,7 @@ int main(int argc, char ** argv) if (options.count("help") || argc != 3) { - std::cout << "Usage: " << argv[0] << " file.mrk file.bin" << std::endl; + std::cout << "Usage: " << argv[0] << " file.mrk[2] file.bin" << std::endl; std::cout << desc << std::endl; return 1; } diff --git a/utils/check-style/check-duplicate-includes.sh b/utils/check-style/check-duplicate-includes.sh index df843ead623..64aca4d180d 100755 --- a/utils/check-style/check-duplicate-includes.sh +++ b/utils/check-style/check-duplicate-includes.sh @@ -3,4 +3,4 @@ ROOT_PATH=$(git rev-parse --show-toplevel) # Find duplicate include directives -find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | while read file; do grep -P '^#include ' $file | sort | uniq -c | grep -v -P '^\s+1\s' && echo $file; done +find $ROOT_PATH/{src,base,programs,utils} -name '*.h' -or -name '*.cpp' | while read file; do grep -P '^#include ' $file | sort | uniq -c | grep -v -P '^\s+1\s' && echo $file; done | sed '/^[[:space:]]*$/d' diff --git a/utils/check-style/check-style b/utils/check-style/check-style index d3653deb980..ddad0a5ddac 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -85,16 +85,6 @@ find $ROOT_PATH -name '.gitmodules' | while read i; do grep -F 'url = ' $i | gre # There shouldn't be any code snippets under GPL or LGPL find $ROOT_PATH/{src,base,programs} -name '*.h' -or -name '*.cpp' 2>/dev/null | xargs grep -i -F 'General Public License' && echo "There shouldn't be any code snippets under GPL or LGPL" -# Check for typos in code -CURDIR=$(dirname "${BASH_SOURCE[0]}") -"${CURDIR}"/check-typos - -# Check sh tests with Shellcheck -(cd $ROOT_PATH/tests/queries/0_stateless/ && shellcheck --check-sourced --external-sources --severity info --exclude SC1071,SC2086 *.sh ../1_stateful/*.sh) - -# Check docker scripts with shellcheck -find "$ROOT_PATH/docker" -executable -type f -exec file -F' ' --mime-type {} \; | awk -F' ' '$2==" text/x-shellscript" {print $1}' | xargs shellcheck - # There shouldn't be any docker containers outside docker directory find $ROOT_PATH -not -path $ROOT_PATH'/docker*' -not -path $ROOT_PATH'/contrib*' -name Dockerfile -type f 2>/dev/null | xargs --no-run-if-empty -n1 echo "Please move Dockerfile to docker directory:" diff --git a/utils/check-style/shellcheck-run.sh b/utils/check-style/shellcheck-run.sh new file mode 100755 index 00000000000..c0063d4b191 --- /dev/null +++ b/utils/check-style/shellcheck-run.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +ROOT_PATH=$(git rev-parse --show-toplevel) +EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|memcpy/|consistent-hashing/|Parsers/New' +# Check sh tests with Shellcheck +(cd $ROOT_PATH/tests/queries/0_stateless/ && shellcheck --check-sourced --external-sources --severity info --exclude SC1071,SC2086,SC2016 *.sh ../1_stateful/*.sh) + +# Check docker scripts with shellcheck +find "$ROOT_PATH/docker" -executable -type f -exec file -F' ' --mime-type {} \; | awk -F' ' '$2==" text/x-shellscript" {print $1}' | grep -v "entrypoint.alpine.sh" | grep -v "compare.sh"| xargs shellcheck + diff --git a/utils/compressor/main.cpp b/utils/compressor/main.cpp deleted file mode 100644 index 087bfa116de..00000000000 --- a/utils/compressor/main.cpp +++ /dev/null @@ -1,6 +0,0 @@ -int mainEntryClickHouseCompressor(int argc, char ** argv); - -int main(int argc, char ** argv) -{ - return mainEntryClickHouseCompressor(argc, argv); -} diff --git a/utils/db-generator/query_db_generator.cpp b/utils/db-generator/query_db_generator.cpp index b6e287278ff..33b8e6ce8af 100644 --- a/utils/db-generator/query_db_generator.cpp +++ b/utils/db-generator/query_db_generator.cpp @@ -56,9 +56,9 @@ std::string randomDate() int32_t year = rng() % 136 + 1970; int32_t month = rng() % 12 + 1; int32_t day = rng() % 12 + 1; - char ans[13]; - sprintf(ans, "'%04u-%02u-%02u'", year, month, day); - return std::string(ans); + char answer[13]; + sprintf(answer, "'%04u-%02u-%02u'", year, month, day); + return std::string(answer); } std::string randomDatetime() @@ -69,9 +69,9 @@ std::string randomDatetime() int32_t hours = rng() % 24; int32_t minutes = rng() % 60; int32_t seconds = rng() % 60; - char ans[22]; + char answer[22]; sprintf( - ans, + answer, "'%04u-%02u-%02u %02u:%02u:%02u'", year, month, @@ -79,7 +79,7 @@ std::string randomDatetime() hours, minutes, seconds); - return std::string(ans); + return std::string(answer); } TableAndColumn get_table_a_column(const std::string & c) {