diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index 0fbcb95fc12..e5b797beebd 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -2870,6 +2870,216 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
+ IntegrationTestsAnalyzerAsan0:
+ needs: [BuilderDebAsan]
+ runs-on: [self-hosted, stress-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/integration_tests_asan
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Integration tests (asan, analyzer)
+ REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
+ RUN_BY_HASH_NUM=0
+ RUN_BY_HASH_TOTAL=6
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Integration test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 integration_test_check.py "$CHECK_NAME"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ IntegrationTestsAnalyzerAsan1:
+ needs: [BuilderDebAsan]
+ runs-on: [self-hosted, stress-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/integration_tests_asan
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Integration tests (asan, analyzer)
+ REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
+ RUN_BY_HASH_NUM=1
+ RUN_BY_HASH_TOTAL=6
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Integration test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 integration_test_check.py "$CHECK_NAME"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ IntegrationTestsAnalyzerAsan2:
+ needs: [BuilderDebAsan]
+ runs-on: [self-hosted, stress-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/integration_tests_asan
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Integration tests (asan, analyzer)
+ REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
+ RUN_BY_HASH_NUM=2
+ RUN_BY_HASH_TOTAL=6
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Integration test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 integration_test_check.py "$CHECK_NAME"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ IntegrationTestsAnalyzerAsan3:
+ needs: [BuilderDebAsan]
+ runs-on: [self-hosted, stress-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/integration_tests_asan
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Integration tests (asan, analyzer)
+ REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
+ RUN_BY_HASH_NUM=3
+ RUN_BY_HASH_TOTAL=6
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Integration test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 integration_test_check.py "$CHECK_NAME"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ IntegrationTestsAnalyzerAsan4:
+ needs: [BuilderDebAsan]
+ runs-on: [self-hosted, stress-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/integration_tests_asan
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Integration tests (asan, analyzer)
+ REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
+ RUN_BY_HASH_NUM=4
+ RUN_BY_HASH_TOTAL=6
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Integration test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 integration_test_check.py "$CHECK_NAME"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
+ IntegrationTestsAnalyzerAsan5:
+ needs: [BuilderDebAsan]
+ runs-on: [self-hosted, stress-tester]
+ steps:
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ TEMP_PATH=${{runner.temp}}/integration_tests_asan
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ CHECK_NAME=Integration tests (asan, analyzer)
+ REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse
+ RUN_BY_HASH_NUM=5
+ RUN_BY_HASH_TOTAL=6
+ EOF
+ - name: Download json reports
+ uses: actions/download-artifact@v3
+ with:
+ path: ${{ env.REPORTS_PATH }}
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ with:
+ clear-repository: true
+ - name: Integration test
+ run: |
+ sudo rm -fr "$TEMP_PATH"
+ mkdir -p "$TEMP_PATH"
+ cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
+ cd "$REPO_COPY/tests/ci"
+ python3 integration_test_check.py "$CHECK_NAME"
+ - name: Cleanup
+ if: always()
+ run: |
+ docker ps --quiet | xargs --no-run-if-empty docker kill ||:
+ docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
+ sudo rm -fr "$TEMP_PATH"
IntegrationTestsTsan0:
needs: [BuilderDebTsan]
runs-on: [self-hosted, stress-tester]
@@ -3963,6 +4173,12 @@ jobs:
- IntegrationTestsAsan3
- IntegrationTestsAsan4
- IntegrationTestsAsan5
+ - IntegrationTestsAnalyzerAsan0
+ - IntegrationTestsAnalyzerAsan1
+ - IntegrationTestsAnalyzerAsan2
+ - IntegrationTestsAnalyzerAsan3
+ - IntegrationTestsAnalyzerAsan4
+ - IntegrationTestsAnalyzerAsan5
- IntegrationTestsRelease0
- IntegrationTestsRelease1
- IntegrationTestsRelease2
diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml
index d8030c12128..dd834959578 100644
--- a/.github/workflows/pull_request.yml
+++ b/.github/workflows/pull_request.yml
@@ -5099,6 +5099,12 @@ jobs:
- IntegrationTestsAsan3
- IntegrationTestsAsan4
- IntegrationTestsAsan5
+ - IntegrationTestsAnalyzerAsan0
+ - IntegrationTestsAnalyzerAsan1
+ - IntegrationTestsAnalyzerAsan2
+ - IntegrationTestsAnalyzerAsan3
+ - IntegrationTestsAnalyzerAsan4
+ - IntegrationTestsAnalyzerAsan5
- IntegrationTestsRelease0
- IntegrationTestsRelease1
- IntegrationTestsRelease2
diff --git a/cmake/limit_jobs.cmake b/cmake/limit_jobs.cmake
index 3a33b3b9989..acc38b6fa2a 100644
--- a/cmake/limit_jobs.cmake
+++ b/cmake/limit_jobs.cmake
@@ -1,43 +1,38 @@
-# Usage:
-# set (MAX_COMPILER_MEMORY 2000 CACHE INTERNAL "") # In megabytes
-# set (MAX_LINKER_MEMORY 3500 CACHE INTERNAL "")
-# include (cmake/limit_jobs.cmake)
+# Limit compiler/linker job concurrency to avoid OOMs on subtrees where compilation/linking is memory-intensive.
+#
+# Usage from CMake:
+# set (MAX_COMPILER_MEMORY 2000 CACHE INTERNAL "") # megabyte
+# set (MAX_LINKER_MEMORY 3500 CACHE INTERNAL "") # megabyte
+# include (cmake/limit_jobs.cmake)
+#
+# (bigger values mean fewer jobs)
-cmake_host_system_information(RESULT TOTAL_PHYSICAL_MEMORY QUERY TOTAL_PHYSICAL_MEMORY) # Not available under freebsd
+cmake_host_system_information(RESULT TOTAL_PHYSICAL_MEMORY QUERY TOTAL_PHYSICAL_MEMORY)
cmake_host_system_information(RESULT NUMBER_OF_LOGICAL_CORES QUERY NUMBER_OF_LOGICAL_CORES)
-# 1 if not set
-option(PARALLEL_COMPILE_JOBS "Maximum number of concurrent compilation jobs" "")
+# Set to disable the automatic job-limiting
+option(PARALLEL_COMPILE_JOBS "Maximum number of concurrent compilation jobs" OFF)
+option(PARALLEL_LINK_JOBS "Maximum number of concurrent link jobs" OFF)
-# 1 if not set
-option(PARALLEL_LINK_JOBS "Maximum number of concurrent link jobs" "")
-
-if (NOT PARALLEL_COMPILE_JOBS AND TOTAL_PHYSICAL_MEMORY AND MAX_COMPILER_MEMORY)
+if (NOT PARALLEL_COMPILE_JOBS AND MAX_COMPILER_MEMORY)
math(EXPR PARALLEL_COMPILE_JOBS ${TOTAL_PHYSICAL_MEMORY}/${MAX_COMPILER_MEMORY})
if (NOT PARALLEL_COMPILE_JOBS)
set (PARALLEL_COMPILE_JOBS 1)
endif ()
- if (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES)
- set (PARALLEL_COMPILE_JOBS_LESS TRUE)
+ if (PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES)
+ message(WARNING "The auto-calculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.")
endif()
endif ()
-if (PARALLEL_COMPILE_JOBS AND (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES))
- set(CMAKE_JOB_POOL_COMPILE compile_job_pool${CMAKE_CURRENT_SOURCE_DIR})
- string (REGEX REPLACE "[^a-zA-Z0-9]+" "_" CMAKE_JOB_POOL_COMPILE ${CMAKE_JOB_POOL_COMPILE})
- set_property(GLOBAL APPEND PROPERTY JOB_POOLS ${CMAKE_JOB_POOL_COMPILE}=${PARALLEL_COMPILE_JOBS})
-endif ()
-
-
-if (NOT PARALLEL_LINK_JOBS AND TOTAL_PHYSICAL_MEMORY AND MAX_LINKER_MEMORY)
+if (NOT PARALLEL_LINK_JOBS AND MAX_LINKER_MEMORY)
math(EXPR PARALLEL_LINK_JOBS ${TOTAL_PHYSICAL_MEMORY}/${MAX_LINKER_MEMORY})
if (NOT PARALLEL_LINK_JOBS)
set (PARALLEL_LINK_JOBS 1)
endif ()
- if (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES)
- set (PARALLEL_LINK_JOBS_LESS TRUE)
+ if (PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES)
+ message(WARNING "The auto-calculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.")
endif()
endif ()
@@ -52,20 +47,16 @@ if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND ENABLE_THINLTO AND PARALLE
set (PARALLEL_LINK_JOBS 2)
endif()
-if (PARALLEL_LINK_JOBS AND (NOT NUMBER_OF_LOGICAL_CORES OR PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES))
+message(STATUS "Building sub-tree with ${PARALLEL_COMPILE_JOBS} compile jobs and ${PARALLEL_LINK_JOBS} linker jobs (system: ${NUMBER_OF_LOGICAL_CORES} cores, ${TOTAL_PHYSICAL_MEMORY} MB DRAM, 'OFF' means the native core count).")
+
+if (PARALLEL_COMPILE_JOBS LESS NUMBER_OF_LOGICAL_CORES)
+ set(CMAKE_JOB_POOL_COMPILE compile_job_pool${CMAKE_CURRENT_SOURCE_DIR})
+ string (REGEX REPLACE "[^a-zA-Z0-9]+" "_" CMAKE_JOB_POOL_COMPILE ${CMAKE_JOB_POOL_COMPILE})
+ set_property(GLOBAL APPEND PROPERTY JOB_POOLS ${CMAKE_JOB_POOL_COMPILE}=${PARALLEL_COMPILE_JOBS})
+endif ()
+
+if (PARALLEL_LINK_JOBS LESS NUMBER_OF_LOGICAL_CORES)
set(CMAKE_JOB_POOL_LINK link_job_pool${CMAKE_CURRENT_SOURCE_DIR})
string (REGEX REPLACE "[^a-zA-Z0-9]+" "_" CMAKE_JOB_POOL_LINK ${CMAKE_JOB_POOL_LINK})
set_property(GLOBAL APPEND PROPERTY JOB_POOLS ${CMAKE_JOB_POOL_LINK}=${PARALLEL_LINK_JOBS})
endif ()
-
-if (PARALLEL_COMPILE_JOBS OR PARALLEL_LINK_JOBS)
- message(STATUS
- "${CMAKE_CURRENT_SOURCE_DIR}: Have ${TOTAL_PHYSICAL_MEMORY} megabytes of memory.
- Limiting concurrent linkers jobs to ${PARALLEL_LINK_JOBS} and compiler jobs to ${PARALLEL_COMPILE_JOBS} (system has ${NUMBER_OF_LOGICAL_CORES} logical cores)")
- if (PARALLEL_COMPILE_JOBS_LESS)
- message(WARNING "The autocalculated compile jobs limit (${PARALLEL_COMPILE_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_COMPILE_JOBS to override.")
- endif()
- if (PARALLEL_LINK_JOBS_LESS)
- message(WARNING "The autocalculated link jobs limit (${PARALLEL_LINK_JOBS}) underutilizes CPU cores (${NUMBER_OF_LOGICAL_CORES}). Set PARALLEL_LINK_JOBS to override.")
- endif()
-endif ()
diff --git a/contrib/cctz b/contrib/cctz
index 5e05432420f..8529bcef5cd 160000
--- a/contrib/cctz
+++ b/contrib/cctz
@@ -1 +1 @@
-Subproject commit 5e05432420f9692418e2e12aff09859e420b14a2
+Subproject commit 8529bcef5cd996b7c0f4d7475286b76b5d126c4c
diff --git a/docs/en/operations/backup.md b/docs/en/operations/backup.md
index c3ddee07d0b..62f931a76b4 100644
--- a/docs/en/operations/backup.md
+++ b/docs/en/operations/backup.md
@@ -30,7 +30,7 @@ description: In order to effectively mitigate possible human errors, you should
```
:::note ALL
-`ALL` is only applicable to the `RESTORE` command prior to version 23.4 of Clickhouse.
+Prior to version 23.4 of ClickHouse, `ALL` was only applicable to the `RESTORE` command.
:::
## Background
diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md
index 8b969f87a4d..22aeecf4335 100644
--- a/docs/en/operations/settings/settings.md
+++ b/docs/en/operations/settings/settings.md
@@ -4524,6 +4524,7 @@ This setting allows to specify renaming pattern for files processed by `file` ta
### Placeholders
+- `%a` — Full original filename (e.g., "sample.csv").
- `%f` — Original filename without extension (e.g., "sample").
- `%e` — Original file extension with dot (e.g., ".csv").
- `%t` — Timestamp (in microseconds).
diff --git a/docs/en/sql-reference/aggregate-functions/reference/arrayconcatagg.md b/docs/en/sql-reference/aggregate-functions/reference/arrayconcatagg.md
new file mode 100644
index 00000000000..3c71129bdb5
--- /dev/null
+++ b/docs/en/sql-reference/aggregate-functions/reference/arrayconcatagg.md
@@ -0,0 +1,32 @@
+---
+slug: /en/sql-reference/aggregate-functions/reference/array_concat_agg
+sidebar_position: 110
+---
+
+# array_concat_agg
+- Alias of `groupArrayArray`. The function is case insensitive.
+
+**Example**
+
+```text
+SELECT *
+FROM t
+
+┌─a───────┐
+│ [1,2,3] │
+│ [4,5] │
+│ [6] │
+└─────────┘
+
+```
+
+Query:
+
+```sql
+SELECT array_concat_agg(a) AS a
+FROM t
+
+┌─a─────────────┐
+│ [1,2,3,4,5,6] │
+└───────────────┘
+```
diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md
index 19eeda967fe..ce1a4f4d283 100644
--- a/docs/en/sql-reference/functions/date-time-functions.md
+++ b/docs/en/sql-reference/functions/date-time-functions.md
@@ -722,7 +722,7 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
## age
-Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 second.
+Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 microsecond.
E.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for `day` unit, 0 months for `month` unit, 0 years for `year` unit.
For an alternative to `age`, see function `date\_diff`.
@@ -738,6 +738,8 @@ age('unit', startdate, enddate, [timezone])
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
Possible values:
+ - `microsecond` (possible abbreviations: `us`, `u`)
+ - `millisecond` (possible abbreviations: `ms`)
- `second` (possible abbreviations: `ss`, `s`)
- `minute` (possible abbreviations: `mi`, `n`)
- `hour` (possible abbreviations: `hh`, `h`)
@@ -813,6 +815,8 @@ Aliases: `dateDiff`, `DATE_DIFF`, `timestampDiff`, `timestamp_diff`, `TIMESTAMP_
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
Possible values:
+ - `microsecond` (possible abbreviations: `us`, `u`)
+ - `millisecond` (possible abbreviations: `ms`)
- `second` (possible abbreviations: `ss`, `s`)
- `minute` (possible abbreviations: `mi`, `n`)
- `hour` (possible abbreviations: `hh`, `h`)
diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md
index 4f174a53ad6..9890d257e84 100644
--- a/docs/en/sql-reference/functions/string-functions.md
+++ b/docs/en/sql-reference/functions/string-functions.md
@@ -1267,3 +1267,36 @@ Like [initcap](#initcap), assuming that the string contains valid UTF-8 encoded
Does not detect the language, e.g. for Turkish the result might not be exactly correct (i/İ vs. i/I).
If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point.
+
+## firstLine
+
+Returns the first line from a multi-line string.
+
+**Syntax**
+
+```sql
+firstLine(val)
+```
+
+**Arguments**
+
+- `val` - Input value. [String](../data-types/string.md)
+
+**Returned value**
+
+- The first line of the input value or the whole value if there is no line
+ separators. [String](../data-types/string.md)
+
+**Example**
+
+```sql
+select firstLine('foo\nbar\nbaz');
+```
+
+Result:
+
+```result
+┌─firstLine('foo\nbar\nbaz')─┐
+│ foo │
+└────────────────────────────┘
+```
diff --git a/docs/en/sql-reference/statements/select/with.md b/docs/en/sql-reference/statements/select/with.md
index 4654f249548..a59ef463419 100644
--- a/docs/en/sql-reference/statements/select/with.md
+++ b/docs/en/sql-reference/statements/select/with.md
@@ -5,7 +5,27 @@ sidebar_label: WITH
# WITH Clause
-ClickHouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)), that is provides to use results of `WITH` clause in the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression.
+ClickHouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)) and substitutes the code defined in the `WITH` clause in all places of use for the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression.
+
+Please note that CTEs do not guarantee the same results in all places they are called because the query will be re-executed for each use case.
+
+An example of such behavior is below
+``` sql
+with cte_numbers as
+(
+ select
+ num
+ from generateRandom('num UInt64', NULL)
+ limit 1000000
+)
+select
+ count()
+from cte_numbers
+where num in (select num from cte_numbers)
+```
+If CTEs were to pass exactly the results and not just a piece of code, you would always see `1000000`
+
+However, due to the fact that we are referring `cte_numbers` twice, random numbers are generated each time and, accordingly, we see different random results, `280501, 392454, 261636, 196227` and so on...
## Syntax
diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md
index c78ffc1d61c..00917414e0c 100644
--- a/docs/en/sql-reference/table-functions/file.md
+++ b/docs/en/sql-reference/table-functions/file.md
@@ -134,7 +134,7 @@ Multiple path components can have globs. For being processed file must exist and
- `*` — Substitutes any number of any characters except `/` including empty string.
- `?` — Substitutes any single character.
-- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`.
+- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`, including `/`.
- `{N..M}` — Substitutes any number in range from N to M including both borders.
- `**` - Fetches all files inside the folder recursively.
diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md
index 42e21f6140b..957a917c780 100644
--- a/docs/ru/operations/settings/settings.md
+++ b/docs/ru/operations/settings/settings.md
@@ -4201,6 +4201,7 @@ SELECT *, timezone() FROM test_tz WHERE d = '2000-01-01 00:00:00' SETTINGS sessi
### Шаблон
Шаблон поддерживает следующие виды плейсхолдеров:
+- `%a` — Полное исходное имя файла (например "sample.csv").
- `%f` — Исходное имя файла без расширения (например "sample").
- `%e` — Оригинальное расширение файла с точкой (например ".csv").
- `%t` — Текущее время (в микросекундах).
diff --git a/docs/ru/sql-reference/functions/date-time-functions.md b/docs/ru/sql-reference/functions/date-time-functions.md
index 779728ca0fe..4db8a1ec6f8 100644
--- a/docs/ru/sql-reference/functions/date-time-functions.md
+++ b/docs/ru/sql-reference/functions/date-time-functions.md
@@ -625,7 +625,7 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
## age
-Вычисляет компонент `unit` разницы между `startdate` и `enddate`. Разница вычисляется с точностью в 1 секунду.
+Вычисляет компонент `unit` разницы между `startdate` и `enddate`. Разница вычисляется с точностью в 1 микросекунду.
Например, разница между `2021-12-29` и `2022-01-01` 3 дня для единицы `day`, 0 месяцев для единицы `month`, 0 лет для единицы `year`.
**Синтаксис**
@@ -639,6 +639,8 @@ age('unit', startdate, enddate, [timezone])
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
Возможные значения:
+ - `microsecond` (возможные сокращения: `us`, `u`)
+ - `millisecond` (возможные сокращения: `ms`)
- `second` (возможные сокращения: `ss`, `s`)
- `minute` (возможные сокращения: `mi`, `n`)
- `hour` (возможные сокращения: `hh`, `h`)
@@ -712,6 +714,8 @@ date_diff('unit', startdate, enddate, [timezone])
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
Возможные значения:
+ - `microsecond` (возможные сокращения: `us`, `u`)
+ - `millisecond` (возможные сокращения: `ms`)
- `second` (возможные сокращения: `ss`, `s`)
- `minute` (возможные сокращения: `mi`, `n`)
- `hour` (возможные сокращения: `hh`, `h`)
diff --git a/docs/ru/sql-reference/functions/string-functions.md b/docs/ru/sql-reference/functions/string-functions.md
index b872200f99b..276dfc2ef20 100644
--- a/docs/ru/sql-reference/functions/string-functions.md
+++ b/docs/ru/sql-reference/functions/string-functions.md
@@ -1124,3 +1124,39 @@ Do Nothing for 2 Minutes 2:00
Не учитывает язык. То есть, для турецкого языка, результат может быть не совсем верным.
Если длина UTF-8 последовательности байтов различна для верхнего и нижнего регистра кодовой точки, то для этой кодовой точки результат работы может быть некорректным.
Если строка содержит набор байтов, не являющийся UTF-8, то поведение не определено.
+
+## firstLine
+
+Возвращает первую строку в многострочном тексте.
+
+**Синтаксис**
+
+```sql
+firstLine(val)
+```
+
+**Аргументы**
+
+- `val` - текст для обработки. [String](../data-types/string.md)
+
+**Returned value**
+
+- Первая строка текста или весь текст, если переносы строк отсутствуют.
+
+Тип: [String](../data-types/string.md)
+
+**Пример**
+
+Запрос:
+
+```sql
+select firstLine('foo\nbar\nbaz');
+```
+
+Результат:
+
+```result
+┌─firstLine('foo\nbar\nbaz')─┐
+│ foo │
+└────────────────────────────┘
+```
diff --git a/docs/ru/sql-reference/table-functions/file.md b/docs/ru/sql-reference/table-functions/file.md
index 0983c51d954..83ef115aacd 100644
--- a/docs/ru/sql-reference/table-functions/file.md
+++ b/docs/ru/sql-reference/table-functions/file.md
@@ -79,7 +79,7 @@ SELECT * FROM file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 U
- `*` — заменяет любое количество любых символов кроме `/`, включая отсутствие символов.
- `?` — заменяет ровно один любой символ.
-- `{some_string,another_string,yet_another_one}` — заменяет любую из строк `'some_string', 'another_string', 'yet_another_one'`.
+- `{some_string,another_string,yet_another_one}` — заменяет любую из строк `'some_string', 'another_string', 'yet_another_one'`, причём строка может содержать `/`.
- `{N..M}` — заменяет любое число в интервале от `N` до `M` включительно (может содержать ведущие нули).
Конструкция с `{}` аналогична табличной функции [remote](remote.md).
diff --git a/docs/zh/sql-reference/functions/date-time-functions.md b/docs/zh/sql-reference/functions/date-time-functions.md
index 53dadc23c6d..e4b70322477 100644
--- a/docs/zh/sql-reference/functions/date-time-functions.md
+++ b/docs/zh/sql-reference/functions/date-time-functions.md
@@ -643,6 +643,8 @@ date_diff('unit', startdate, enddate, [timezone])
- `unit` — `value`对应的时间单位。类型为[String](../../sql-reference/data-types/string.md)。
可能的值:
+ - `microsecond`
+ - `millisecond`
- `second`
- `minute`
- `hour`
diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp
index 88ad7ebe100..259476b4e65 100644
--- a/programs/server/Server.cpp
+++ b/programs/server/Server.cpp
@@ -888,6 +888,7 @@ try
#endif
global_context->setRemoteHostFilter(config());
+ global_context->setHTTPHeaderFilter(config());
std::string path_str = getCanonicalPath(config().getString("path", DBMS_DEFAULT_PATH));
fs::path path = path_str;
@@ -1201,6 +1202,7 @@ try
}
global_context->setRemoteHostFilter(*config);
+ global_context->setHTTPHeaderFilter(*config);
global_context->setMaxTableSizeToDrop(server_settings_.max_table_size_to_drop);
global_context->setMaxPartitionSizeToDrop(server_settings_.max_partition_size_to_drop);
diff --git a/programs/server/config.xml b/programs/server/config.xml
index 3fda011f32b..193fd460d67 100644
--- a/programs/server/config.xml
+++ b/programs/server/config.xml
@@ -873,6 +873,14 @@
-->
+
+
+
-
+
s3
- s3_common_disk/
+ s3_disk/
http://localhost:11111/test/common/
clickhouse
clickhouse
20000
-
-
- s3
- s3_disk/
- http://localhost:11111/test/00170_test/
- clickhouse
- clickhouse
- 20000
-
- s3
- s3_disk_2/
- http://localhost:11111/test/00170_test/
- clickhouse
- clickhouse
- 20000
-
-
- s3
- s3_disk_3/
- http://localhost:11111/test/00170_test/
- clickhouse
- clickhouse
- 20000
-
-
- s3
- s3_disk_4/
- http://localhost:11111/test/00170_test/
- clickhouse
- clickhouse
- 20000
-
-
- s3
- s3_disk_5/
- http://localhost:11111/test/00170_test/
- clickhouse
- clickhouse
- 20000
-
-
- s3
- s3_disk_6/
- http://localhost:11111/test/00170_test/
- clickhouse
- clickhouse
- 20000
-
-
cache
s3_disk
@@ -67,65 +17,6 @@
1
100
-
- cache
- s3_disk_2
- s3_cache_2/
- 128Mi
- 100Mi
- 100
-
-
- cache
- s3_disk_3
- s3_disk_3_cache/
- 128Mi
- 22548578304
- 1
- 1
- 100
-
-
- cache
- s3_disk_4
- s3_cache_4/
- 128Mi
- 1
- 1
- 100
-
-
- cache
- s3_disk_5
- s3_cache_5/
- 128Mi
- 100
-
-
- cache
- s3_disk_6
- s3_cache_6/
- 128Mi
- 1
- 100
- 100
-
-
- cache
- s3_disk_6
- s3_cache_small/
- 1000
- 100
-
-
- cache
- s3_disk_6
- s3_cache_small_segment_size/
- 128Mi
- 10Ki
- 1
- 100
-
local_blob_storage
@@ -167,7 +58,7 @@
cache
- s3_cache_5
+ s3_cache
s3_cache_multi/
22548578304
100
@@ -188,34 +79,6 @@
-
-
-
- s3_cache_2
-
-
-
-
-
-
- s3_cache_3
-
-
-
-
-
-
- s3_cache_4
-
-
-
-
-
-
- s3_cache_6
-
-
-
@@ -223,13 +86,6 @@
-
-
-
- s3_cache_small
-
-
-
@@ -251,13 +107,6 @@
-
-
-
- s3_cache_small_segment_size
-
-
-
diff --git a/tests/config/install.sh b/tests/config/install.sh
index 50f2627d37c..d75a652f084 100755
--- a/tests/config/install.sh
+++ b/tests/config/install.sh
@@ -51,6 +51,7 @@ ln -sf $SRC_PATH/config.d/session_log.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/system_unfreeze.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/enable_zero_copy_replication.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/nlp.xml $DEST_SERVER_PATH/config.d/
+ln -sf $SRC_PATH/config.d/forbidden_headers.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/enable_keeper_map.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/custom_disks_base_path.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/display_name.xml $DEST_SERVER_PATH/config.d/
diff --git a/tests/integration/helpers/network.py b/tests/integration/helpers/network.py
index 60b46926589..e6e79dc7947 100644
--- a/tests/integration/helpers/network.py
+++ b/tests/integration/helpers/network.py
@@ -219,10 +219,15 @@ class _NetworkManager:
def __init__(
self,
- container_expire_timeout=120,
- container_exit_timeout=120,
+ container_expire_timeout=600,
+ container_exit_timeout=660,
docker_api_version=os.environ.get("DOCKER_API_VERSION"),
):
+ # container should be alive for at least 15 seconds then the expiration
+ # timeout, this is the protection from the case when the container will
+ # be destroyed just when some test will try to use it.
+ assert container_exit_timeout >= container_expire_timeout + 15
+
self.container_expire_timeout = container_expire_timeout
self.container_exit_timeout = container_exit_timeout
diff --git a/tests/integration/test_lost_part/test.py b/tests/integration/test_lost_part/test.py
index 0bc24268040..382539df7de 100644
--- a/tests/integration/test_lost_part/test.py
+++ b/tests/integration/test_lost_part/test.py
@@ -39,257 +39,261 @@ def test_lost_part_same_replica(start_cluster):
node1.query("DROP TABLE IF EXISTS mt0 SYNC")
node2.query("DROP TABLE IF EXISTS mt0 SYNC")
- for node in [node1, node2]:
- node.query(
- f"CREATE TABLE mt0 (id UInt64, date Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/t', '{node.name}') ORDER BY tuple() PARTITION BY date "
- "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0,"
- "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000"
- )
+ try:
+ for node in [node1, node2]:
+ node.query(
+ f"CREATE TABLE mt0 (id UInt64, date Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/t', '{node.name}') ORDER BY tuple() PARTITION BY date "
+ "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0,"
+ "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000"
+ )
- node1.query("SYSTEM STOP MERGES mt0")
- node2.query("SYSTEM STOP REPLICATION QUEUES")
+ node1.query("SYSTEM STOP MERGES mt0")
+ node2.query("SYSTEM STOP REPLICATION QUEUES")
- for i in range(5):
- node1.query(f"INSERT INTO mt0 VALUES ({i}, toDate('2020-10-01'))")
+ for i in range(5):
+ node1.query(f"INSERT INTO mt0 VALUES ({i}, toDate('2020-10-01'))")
- for i in range(20):
- parts_to_merge = node1.query(
- "SELECT parts_to_merge FROM system.replication_queue WHERE table='mt0' AND length(parts_to_merge) > 0"
- )
- if parts_to_merge:
- parts_list = list(sorted(ast.literal_eval(parts_to_merge)))
- print("Got parts list", parts_list)
- if len(parts_list) < 3:
- raise Exception(f"Got too small parts list {parts_list}")
- break
- time.sleep(1)
+ for i in range(20):
+ parts_to_merge = node1.query(
+ "SELECT parts_to_merge FROM system.replication_queue WHERE table='mt0' AND length(parts_to_merge) > 0"
+ )
+ if parts_to_merge:
+ parts_list = list(sorted(ast.literal_eval(parts_to_merge)))
+ print("Got parts list", parts_list)
+ if len(parts_list) < 3:
+ raise Exception(f"Got too small parts list {parts_list}")
+ break
+ time.sleep(1)
- victim_part_from_the_middle = random.choice(parts_list[1:-1])
- print("Will corrupt part", victim_part_from_the_middle)
+ victim_part_from_the_middle = random.choice(parts_list[1:-1])
+ print("Will corrupt part", victim_part_from_the_middle)
- remove_part_from_disk(node1, "mt0", victim_part_from_the_middle)
+ remove_part_from_disk(node1, "mt0", victim_part_from_the_middle)
- node1.query("DETACH TABLE mt0")
+ node1.query("DETACH TABLE mt0")
- node1.query("ATTACH TABLE mt0")
+ node1.query("ATTACH TABLE mt0")
- node1.query("SYSTEM START MERGES mt0")
- res, err = node1.query_and_get_answer_with_error("SYSTEM SYNC REPLICA mt0")
- print("result: ", res)
- print("error: ", res)
+ node1.query("SYSTEM START MERGES mt0")
+ res, err = node1.query_and_get_answer_with_error("SYSTEM SYNC REPLICA mt0")
+ print("result: ", res)
+ print("error: ", res)
- for i in range(10):
- result = node1.query("SELECT count() FROM system.replication_queue")
- if int(result) == 0:
- break
- time.sleep(1)
- else:
- assert False, "Still have something in replication queue:\n" + node1.query(
- "SELECT count() FROM system.replication_queue FORMAT Vertical"
- )
+ for i in range(10):
+ result = node1.query("SELECT count() FROM system.replication_queue")
+ if int(result) == 0:
+ break
+ time.sleep(1)
+ else:
+ assert False, "Still have something in replication queue:\n" + node1.query(
+ "SELECT count() FROM system.replication_queue FORMAT Vertical"
+ )
- assert node1.contains_in_log(
- "Created empty part"
- ), f"Seems like empty part {victim_part_from_the_middle} is not created or log message changed"
+ assert node1.contains_in_log(
+ "Created empty part"
+ ), f"Seems like empty part {victim_part_from_the_middle} is not created or log message changed"
- assert node1.query("SELECT COUNT() FROM mt0") == "4\n"
+ assert node1.query("SELECT COUNT() FROM mt0") == "4\n"
- node2.query("SYSTEM START REPLICATION QUEUES")
+ node2.query("SYSTEM START REPLICATION QUEUES")
- assert_eq_with_retry(node2, "SELECT COUNT() FROM mt0", "4")
- assert_eq_with_retry(node2, "SELECT COUNT() FROM system.replication_queue", "0")
-
- node1.query("DROP TABLE IF EXISTS mt0 SYNC")
- node2.query("DROP TABLE IF EXISTS mt0 SYNC")
+ assert_eq_with_retry(node2, "SELECT COUNT() FROM mt0", "4")
+ assert_eq_with_retry(node2, "SELECT COUNT() FROM system.replication_queue", "0")
+ finally:
+ node1.query("DROP TABLE IF EXISTS mt0 SYNC")
+ node2.query("DROP TABLE IF EXISTS mt0 SYNC")
def test_lost_part_other_replica(start_cluster):
node1.query("DROP TABLE IF EXISTS mt1 SYNC")
node2.query("DROP TABLE IF EXISTS mt1 SYNC")
- for node in [node1, node2]:
- node.query(
- f"CREATE TABLE mt1 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t1', '{node.name}') ORDER BY tuple() "
- "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0,"
- "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000"
+ try:
+ for node in [node1, node2]:
+ node.query(
+ f"CREATE TABLE mt1 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t1', '{node.name}') ORDER BY tuple() "
+ "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0,"
+ "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000"
+ )
+
+ node1.query("SYSTEM STOP MERGES mt1")
+ node2.query("SYSTEM STOP REPLICATION QUEUES")
+
+ for i in range(5):
+ node1.query(f"INSERT INTO mt1 VALUES ({i})")
+
+ for i in range(20):
+ parts_to_merge = node1.query(
+ "SELECT parts_to_merge FROM system.replication_queue WHERE table='mt1' AND length(parts_to_merge) > 0"
+ )
+ if parts_to_merge:
+ parts_list = list(sorted(ast.literal_eval(parts_to_merge)))
+ print("Got parts list", parts_list)
+ if len(parts_list) < 3:
+ raise Exception("Got too small parts list {}".format(parts_list))
+ break
+ time.sleep(1)
+
+ victim_part_from_the_middle = random.choice(parts_list[1:-1])
+ print("Will corrupt part", victim_part_from_the_middle)
+
+ remove_part_from_disk(node1, "mt1", victim_part_from_the_middle)
+
+ # other way to detect broken parts
+ node1.query("CHECK TABLE mt1")
+
+ node2.query("SYSTEM START REPLICATION QUEUES")
+ res, err = node1.query_and_get_answer_with_error("SYSTEM SYNC REPLICA mt1")
+ print("result: ", res)
+ print("error: ", res)
+
+ for i in range(10):
+ result = node2.query("SELECT count() FROM system.replication_queue")
+ if int(result) == 0:
+ break
+ time.sleep(1)
+ else:
+ assert False, "Still have something in replication queue:\n" + node2.query(
+ "SELECT * FROM system.replication_queue FORMAT Vertical"
+ )
+
+ assert node1.contains_in_log(
+ "Created empty part"
+ ), "Seems like empty part {} is not created or log message changed".format(
+ victim_part_from_the_middle
)
- node1.query("SYSTEM STOP MERGES mt1")
- node2.query("SYSTEM STOP REPLICATION QUEUES")
+ assert_eq_with_retry(node2, "SELECT COUNT() FROM mt1", "4")
+ assert_eq_with_retry(node2, "SELECT COUNT() FROM system.replication_queue", "0")
- for i in range(5):
- node1.query(f"INSERT INTO mt1 VALUES ({i})")
+ node1.query("SYSTEM START MERGES mt1")
- for i in range(20):
- parts_to_merge = node1.query(
- "SELECT parts_to_merge FROM system.replication_queue WHERE table='mt1' AND length(parts_to_merge) > 0"
- )
- if parts_to_merge:
- parts_list = list(sorted(ast.literal_eval(parts_to_merge)))
- print("Got parts list", parts_list)
- if len(parts_list) < 3:
- raise Exception("Got too small parts list {}".format(parts_list))
- break
- time.sleep(1)
-
- victim_part_from_the_middle = random.choice(parts_list[1:-1])
- print("Will corrupt part", victim_part_from_the_middle)
-
- remove_part_from_disk(node1, "mt1", victim_part_from_the_middle)
-
- # other way to detect broken parts
- node1.query("CHECK TABLE mt1")
-
- node2.query("SYSTEM START REPLICATION QUEUES")
- res, err = node1.query_and_get_answer_with_error("SYSTEM SYNC REPLICA mt1")
- print("result: ", res)
- print("error: ", res)
-
- for i in range(10):
- result = node2.query("SELECT count() FROM system.replication_queue")
- if int(result) == 0:
- break
- time.sleep(1)
- else:
- assert False, "Still have something in replication queue:\n" + node2.query(
- "SELECT * FROM system.replication_queue FORMAT Vertical"
- )
-
- assert node1.contains_in_log(
- "Created empty part"
- ), "Seems like empty part {} is not created or log message changed".format(
- victim_part_from_the_middle
- )
-
- assert_eq_with_retry(node2, "SELECT COUNT() FROM mt1", "4")
- assert_eq_with_retry(node2, "SELECT COUNT() FROM system.replication_queue", "0")
-
- node1.query("SYSTEM START MERGES mt1")
-
- assert_eq_with_retry(node1, "SELECT COUNT() FROM mt1", "4")
- assert_eq_with_retry(node1, "SELECT COUNT() FROM system.replication_queue", "0")
-
- node1.query("DROP TABLE IF EXISTS mt1 SYNC")
- node2.query("DROP TABLE IF EXISTS mt1 SYNC")
+ assert_eq_with_retry(node1, "SELECT COUNT() FROM mt1", "4")
+ assert_eq_with_retry(node1, "SELECT COUNT() FROM system.replication_queue", "0")
+ finally:
+ node1.query("DROP TABLE IF EXISTS mt1 SYNC")
+ node2.query("DROP TABLE IF EXISTS mt1 SYNC")
def test_lost_part_mutation(start_cluster):
node1.query("DROP TABLE IF EXISTS mt2 SYNC")
node2.query("DROP TABLE IF EXISTS mt2 SYNC")
- for node in [node1, node2]:
- node.query(
- f"CREATE TABLE mt2 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t2', '{node.name}') ORDER BY tuple() "
- "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0,"
- "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000"
+ try:
+ for node in [node1, node2]:
+ node.query(
+ f"CREATE TABLE mt2 (id UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/t2', '{node.name}') ORDER BY tuple() "
+ "SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0,"
+ "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000"
+ )
+
+ node1.query("SYSTEM STOP MERGES mt2")
+ node2.query("SYSTEM STOP REPLICATION QUEUES")
+
+ for i in range(2):
+ node1.query(f"INSERT INTO mt2 VALUES ({i})")
+
+ node1.query(
+ "ALTER TABLE mt2 UPDATE id = 777 WHERE 1", settings={"mutations_sync": "0"}
)
- node1.query("SYSTEM STOP MERGES mt2")
- node2.query("SYSTEM STOP REPLICATION QUEUES")
+ for i in range(20):
+ parts_to_mutate = node1.query(
+ "SELECT count() FROM system.replication_queue WHERE table='mt2'"
+ )
+ # two mutations for both replicas
+ if int(parts_to_mutate) == 4:
+ break
+ time.sleep(1)
- for i in range(2):
- node1.query(f"INSERT INTO mt2 VALUES ({i})")
+ remove_part_from_disk(node1, "mt2", "all_1_1_0")
- node1.query(
- "ALTER TABLE mt2 UPDATE id = 777 WHERE 1", settings={"mutations_sync": "0"}
- )
+ # other way to detect broken parts
+ node1.query("CHECK TABLE mt2")
- for i in range(20):
- parts_to_mutate = node1.query(
- "SELECT count() FROM system.replication_queue WHERE table='mt2'"
- )
- # two mutations for both replicas
- if int(parts_to_mutate) == 4:
- break
- time.sleep(1)
+ node1.query("SYSTEM START MERGES mt2")
+ res, err = node1.query_and_get_answer_with_error("SYSTEM SYNC REPLICA mt2")
+ print("result: ", res)
+ print("error: ", res)
- remove_part_from_disk(node1, "mt2", "all_1_1_0")
+ for i in range(10):
+ result = node1.query("SELECT count() FROM system.replication_queue")
+ if int(result) == 0:
+ break
+ time.sleep(1)
+ else:
+ assert False, "Still have something in replication queue:\n" + node1.query(
+ "SELECT * FROM system.replication_queue FORMAT Vertical"
+ )
- # other way to detect broken parts
- node1.query("CHECK TABLE mt2")
+ assert_eq_with_retry(node1, "SELECT COUNT() FROM mt2", "1")
+ assert_eq_with_retry(node1, "SELECT SUM(id) FROM mt2", "777")
+ assert_eq_with_retry(node1, "SELECT COUNT() FROM system.replication_queue", "0")
- node1.query("SYSTEM START MERGES mt2")
- res, err = node1.query_and_get_answer_with_error("SYSTEM SYNC REPLICA mt2")
- print("result: ", res)
- print("error: ", res)
+ node2.query("SYSTEM START REPLICATION QUEUES")
- for i in range(10):
- result = node1.query("SELECT count() FROM system.replication_queue")
- if int(result) == 0:
- break
- time.sleep(1)
- else:
- assert False, "Still have something in replication queue:\n" + node1.query(
- "SELECT * FROM system.replication_queue FORMAT Vertical"
- )
-
- assert_eq_with_retry(node1, "SELECT COUNT() FROM mt2", "1")
- assert_eq_with_retry(node1, "SELECT SUM(id) FROM mt2", "777")
- assert_eq_with_retry(node1, "SELECT COUNT() FROM system.replication_queue", "0")
-
- node2.query("SYSTEM START REPLICATION QUEUES")
-
- assert_eq_with_retry(node2, "SELECT COUNT() FROM mt2", "1")
- assert_eq_with_retry(node2, "SELECT SUM(id) FROM mt2", "777")
- assert_eq_with_retry(node2, "SELECT COUNT() FROM system.replication_queue", "0")
-
- node1.query("DROP TABLE IF EXISTS mt2 SYNC")
- node2.query("DROP TABLE IF EXISTS mt2 SYNC")
+ assert_eq_with_retry(node2, "SELECT COUNT() FROM mt2", "1")
+ assert_eq_with_retry(node2, "SELECT SUM(id) FROM mt2", "777")
+ assert_eq_with_retry(node2, "SELECT COUNT() FROM system.replication_queue", "0")
+ finally:
+ node1.query("DROP TABLE IF EXISTS mt2 SYNC")
+ node2.query("DROP TABLE IF EXISTS mt2 SYNC")
def test_lost_last_part(start_cluster):
node1.query("DROP TABLE IF EXISTS mt3 SYNC")
node2.query("DROP TABLE IF EXISTS mt3 SYNC")
- for node in [node1, node2]:
- node.query(
- f"CREATE TABLE mt3 (id UInt64, p String) ENGINE ReplicatedMergeTree('/clickhouse/tables/t3', '{node.name}') "
- "ORDER BY tuple() PARTITION BY p SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0,"
- "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000"
+ try:
+ for node in [node1, node2]:
+ node.query(
+ f"CREATE TABLE mt3 (id UInt64, p String) ENGINE ReplicatedMergeTree('/clickhouse/tables/t3', '{node.name}') "
+ "ORDER BY tuple() PARTITION BY p SETTINGS cleanup_delay_period=1, cleanup_delay_period_random_add=1, cleanup_thread_preferred_points_per_iteration=0,"
+ "merge_selecting_sleep_ms=100, max_merge_selecting_sleep_ms=1000"
+ )
+
+ node1.query("SYSTEM STOP MERGES mt3")
+ node2.query("SYSTEM STOP REPLICATION QUEUES")
+
+ for i in range(1):
+ node1.query(f"INSERT INTO mt3 VALUES ({i}, 'x')")
+
+ # actually not important
+ node1.query(
+ "ALTER TABLE mt3 UPDATE id = 777 WHERE 1", settings={"mutations_sync": "0"}
)
- node1.query("SYSTEM STOP MERGES mt3")
- node2.query("SYSTEM STOP REPLICATION QUEUES")
+ partition_id = node1.query("select partitionId('x')").strip()
+ remove_part_from_disk(node1, "mt3", f"{partition_id}_0_0_0")
- for i in range(1):
- node1.query(f"INSERT INTO mt3 VALUES ({i}, 'x')")
+ # other way to detect broken parts
+ node1.query("CHECK TABLE mt3")
- # actually not important
- node1.query(
- "ALTER TABLE mt3 UPDATE id = 777 WHERE 1", settings={"mutations_sync": "0"}
- )
+ node1.query("SYSTEM START MERGES mt3")
- partition_id = node1.query("select partitionId('x')").strip()
- remove_part_from_disk(node1, "mt3", f"{partition_id}_0_0_0")
+ for i in range(100):
+ result = node1.query(
+ "SELECT count() FROM system.replication_queue WHERE table='mt3'"
+ )
+ assert int(result) <= 2, "Have a lot of entries in queue {}".format(
+ node1.query("SELECT * FROM system.replication_queue FORMAT Vertical")
+ )
+ if node1.contains_in_log(
+ "Cannot create empty part"
+ ) and node1.contains_in_log("DROP/DETACH PARTITION"):
+ break
+ if node1.contains_in_log(
+ "Created empty part 8b8f0fede53df97513a9fb4cb19dc1e4_0_0_0 "
+ ):
+ break
+ time.sleep(0.5)
+ else:
+ assert False, "Don't have required messages in node1 log"
- # other way to detect broken parts
- node1.query("CHECK TABLE mt3")
+ node1.query(f"ALTER TABLE mt3 DROP PARTITION ID '{partition_id}'")
- node1.query("SYSTEM START MERGES mt3")
-
- for i in range(10):
- result = node1.query(
- "SELECT count() FROM system.replication_queue WHERE table='mt3'"
- )
- assert int(result) <= 2, "Have a lot of entries in queue {}".format(
- node1.query("SELECT * FROM system.replication_queue FORMAT Vertical")
- )
- if node1.contains_in_log("Cannot create empty part") and node1.contains_in_log(
- "DROP/DETACH PARTITION"
- ):
- break
- if node1.contains_in_log(
- "Created empty part 8b8f0fede53df97513a9fb4cb19dc1e4_0_0_0 "
- ):
- break
- time.sleep(1)
- else:
- assert False, "Don't have required messages in node1 log"
-
- node1.query(f"ALTER TABLE mt3 DROP PARTITION ID '{partition_id}'")
-
- assert_eq_with_retry(node1, "SELECT COUNT() FROM mt3", "0")
- assert_eq_with_retry(node1, "SELECT COUNT() FROM system.replication_queue", "0")
-
- node1.query("DROP TABLE IF EXISTS mt3 SYNC")
- node2.query("DROP TABLE IF EXISTS mt3 SYNC")
+ assert_eq_with_retry(node1, "SELECT COUNT() FROM mt3", "0")
+ assert_eq_with_retry(node1, "SELECT COUNT() FROM system.replication_queue", "0")
+ finally:
+ node1.query("DROP TABLE IF EXISTS mt3 SYNC")
+ node2.query("DROP TABLE IF EXISTS mt3 SYNC")
diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py
index 5ac1d3bea6f..8ff88791a3a 100644
--- a/tests/integration/test_storage_hdfs/test.py
+++ b/tests/integration/test_storage_hdfs/test.py
@@ -85,6 +85,32 @@ def test_read_write_storage_with_globs(started_cluster):
assert "in readonly mode" in str(ex)
+def test_storage_with_multidirectory_glob(started_cluster):
+ hdfs_api = started_cluster.hdfs_api
+ for i in ["1", "2"]:
+ hdfs_api.write_data(
+ f"/multiglob/p{i}/path{i}/postfix/data{i}", f"File{i}\t{i}{i}\n"
+ )
+ assert (
+ hdfs_api.read_data(f"/multiglob/p{i}/path{i}/postfix/data{i}")
+ == f"File{i}\t{i}{i}\n"
+ )
+
+ r = node1.query(
+ "SELECT * FROM hdfs('hdfs://hdfs1:9000/multiglob/{p1/path1,p2/path2}/postfix/data{1,2}', TSV)"
+ )
+ assert (r == f"File1\t11\nFile2\t22\n") or (r == f"File2\t22\nFile1\t11\n")
+
+ try:
+ node1.query(
+ "SELECT * FROM hdfs('hdfs://hdfs1:9000/multiglob/{p4/path1,p2/path3}/postfix/data{1,2}.nonexist', TSV)"
+ )
+ assert False, "Exception have to be thrown"
+ except Exception as ex:
+ print(ex)
+ assert "no files" in str(ex)
+
+
def test_read_write_table(started_cluster):
hdfs_api = started_cluster.hdfs_api
diff --git a/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper_long.sql b/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper_long.sql
index 0ee8ba07006..a5e33bffb0d 100644
--- a/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper_long.sql
+++ b/tests/queries/0_stateless/00502_custom_partitioning_replicated_zookeeper_long.sql
@@ -15,6 +15,7 @@ INSERT INTO not_partitioned_replica1_00502 VALUES (4), (5);
SELECT 'Parts before OPTIMIZE:';
SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'not_partitioned_replica1_00502' AND active ORDER BY name;
+SYSTEM SYNC REPLICA not_partitioned_replica1_00502 PULL;
SYSTEM SYNC REPLICA not_partitioned_replica2_00502;
OPTIMIZE TABLE not_partitioned_replica1_00502 PARTITION tuple() FINAL;
SELECT 'Parts after OPTIMIZE:';
@@ -42,6 +43,7 @@ INSERT INTO partitioned_by_week_replica1 VALUES ('2000-01-03', 4), ('2000-01-03'
SELECT 'Parts before OPTIMIZE:'; -- Select parts on the first replica to avoid waiting for replication.
SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_week_replica1' AND active ORDER BY name;
+SYSTEM SYNC REPLICA partitioned_by_week_replica1 PULL;
SYSTEM SYNC REPLICA partitioned_by_week_replica2;
OPTIMIZE TABLE partitioned_by_week_replica1 PARTITION '2000-01-03' FINAL;
SELECT 'Parts after OPTIMIZE:'; -- After OPTIMIZE with replication_alter_partitions_sync=2 replicas must be in sync.
@@ -68,6 +70,7 @@ INSERT INTO partitioned_by_tuple_replica1_00502 VALUES ('2000-01-02', 1, 4), ('2
SELECT 'Parts before OPTIMIZE:';
SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_tuple_replica1_00502' AND active ORDER BY name;
+SYSTEM SYNC REPLICA partitioned_by_tuple_replica1_00502 PULL;
SYSTEM SYNC REPLICA partitioned_by_tuple_replica2_00502;
OPTIMIZE TABLE partitioned_by_tuple_replica1_00502 PARTITION ('2000-01-01', 1) FINAL;
OPTIMIZE TABLE partitioned_by_tuple_replica1_00502 PARTITION ('2000-01-02', 1) FINAL;
@@ -95,6 +98,7 @@ INSERT INTO partitioned_by_string_replica1 VALUES ('bbb', 4), ('aaa', 5);
SELECT 'Parts before OPTIMIZE:';
SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_string_replica1' AND active ORDER BY name;
+SYSTEM SYNC REPLICA partitioned_by_string_replica1 PULL;
SYSTEM SYNC REPLICA partitioned_by_string_replica2;
OPTIMIZE TABLE partitioned_by_string_replica2 PARTITION 'aaa' FINAL;
SELECT 'Parts after OPTIMIZE:';
@@ -119,6 +123,7 @@ CREATE TABLE without_fixed_size_columns_replica2(s String) ENGINE ReplicatedMerg
INSERT INTO without_fixed_size_columns_replica1 VALUES ('a'), ('aa'), ('b'), ('cc');
-- Wait for replication.
+SYSTEM SYNC REPLICA without_fixed_size_columns_replica1 PULL;
SYSTEM SYNC REPLICA without_fixed_size_columns_replica2;
OPTIMIZE TABLE without_fixed_size_columns_replica2 PARTITION 1 FINAL;
diff --git a/tests/queries/0_stateless/00834_kill_mutation.reference b/tests/queries/0_stateless/00834_kill_mutation.reference
index 1685343c2b1..49fabab2f8a 100644
--- a/tests/queries/0_stateless/00834_kill_mutation.reference
+++ b/tests/queries/0_stateless/00834_kill_mutation.reference
@@ -2,7 +2,7 @@
1
waiting default kill_mutation mutation_3.txt DELETE WHERE toUInt32(s) = 1
*** Create and kill invalid mutation that blocks another mutation ***
-happened during execution of mutations 'mutation_4.txt, mutation_5.txt'
+happened during execution of mutation
1
waiting default kill_mutation mutation_4.txt DELETE WHERE toUInt32(s) = 1
2001-01-01 2 b
diff --git a/tests/queries/0_stateless/00834_kill_mutation.sh b/tests/queries/0_stateless/00834_kill_mutation.sh
index 46b10ccbff4..4c6455d2f53 100755
--- a/tests/queries/0_stateless/00834_kill_mutation.sh
+++ b/tests/queries/0_stateless/00834_kill_mutation.sh
@@ -27,8 +27,22 @@ ${CLICKHOUSE_CLIENT} --query="SELECT mutation_id FROM system.mutations WHERE dat
${CLICKHOUSE_CLIENT} --query="SELECT '*** Create and kill invalid mutation that blocks another mutation ***'"
+# Note: there is a benign race condition.
+# The mutation can fail with the message
+# "Cannot parse string 'a' as UInt32"
+# or
+# "Cannot parse string 'b' as UInt32"
+# depending on which parts are processed first.
+# The mutations are also coalesced together, and the subsequent mutation inherits the failure status of the original mutation.
+# When we are waiting for mutations, we are listing all the mutations with identical error messages.
+# But due to a race condition and to repeated runs, the original and subsequent mutations can have different error messages,
+# therefore the original mutation will not be included in the list.
+
+# Originally, there was grep "happened during execution of mutations 'mutation_4.txt, mutation_5.txt'",
+# but due to this race condition, I've replaced it to grep "happened during execution of mutation"
+
${CLICKHOUSE_CLIENT} --query="ALTER TABLE kill_mutation DELETE WHERE toUInt32(s) = 1"
-${CLICKHOUSE_CLIENT} --query="ALTER TABLE kill_mutation DELETE WHERE x = 1 SETTINGS mutations_sync = 1" 2>&1 | grep -o "happened during execution of mutations 'mutation_4.txt, mutation_5.txt'" | head -n 1
+${CLICKHOUSE_CLIENT} --query="ALTER TABLE kill_mutation DELETE WHERE x = 1 SETTINGS mutations_sync = 1" 2>&1 | grep -o "happened during execution of mutation" | head -n 1
# but exception doesn't stop mutations, and we will still see them in system.mutations
${CLICKHOUSE_CLIENT} --query="SELECT count() FROM system.mutations WHERE database = '$CLICKHOUSE_DATABASE' AND table = 'kill_mutation' AND mutation_id = 'mutation_4.txt'" # 1
diff --git a/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh b/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh
index 5f69427c0cd..79d2c736793 100755
--- a/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh
+++ b/tests/queries/0_stateless/01076_parallel_alter_replicated_zookeeper.sh
@@ -138,8 +138,13 @@ while true ; do
done
for i in $(seq $REPLICAS); do
+ $CLICKHOUSE_CLIENT --query "SYSTEM SYNC REPLICA concurrent_mutate_mt_$i"
+ $CLICKHOUSE_CLIENT --query "CHECK TABLE concurrent_mutate_mt_$i" &> /dev/null # if we will remove something the output of select will be wrong
$CLICKHOUSE_CLIENT --query "SELECT SUM(toUInt64(value1)) > $INITIAL_SUM FROM concurrent_mutate_mt_$i"
$CLICKHOUSE_CLIENT --query "SELECT COUNT() FROM system.mutations WHERE table='concurrent_mutate_mt_$i' and is_done=0" # all mutations have to be done
$CLICKHOUSE_CLIENT --query "SELECT * FROM system.mutations WHERE table='concurrent_mutate_mt_$i' and is_done=0" # for verbose output
+done
+
+for i in $(seq $REPLICAS); do
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS concurrent_mutate_mt_$i"
done
diff --git a/tests/queries/0_stateless/01410_full_join_and_null_predicates.reference b/tests/queries/0_stateless/01410_full_join_and_null_predicates.reference
new file mode 100644
index 00000000000..785d581c685
--- /dev/null
+++ b/tests/queries/0_stateless/01410_full_join_and_null_predicates.reference
@@ -0,0 +1,12 @@
+select 1
+\N 1232 Johny
+select 2
+\N 1232 Johny
+select 3
+\N 1232 Johny
+select 4
+\N 1232 Johny
+select 5
+\N 1232 Johny
+select 6
+\N 1232 Johny
diff --git a/tests/queries/0_stateless/01410_full_join_and_null_predicates.sql b/tests/queries/0_stateless/01410_full_join_and_null_predicates.sql
new file mode 100644
index 00000000000..f7d5fa67b1d
--- /dev/null
+++ b/tests/queries/0_stateless/01410_full_join_and_null_predicates.sql
@@ -0,0 +1,58 @@
+drop table if EXISTS l;
+drop table if EXISTS r;
+
+CREATE TABLE l (luid Nullable(Int16), name String)
+ENGINE=MergeTree order by luid settings allow_nullable_key=1 as
+select * from VALUES ((1231, 'John'),(6666, 'Ksenia'),(Null, '---'));
+
+CREATE TABLE r (ruid Nullable(Int16), name String)
+ENGINE=MergeTree order by ruid settings allow_nullable_key=1 as
+select * from VALUES ((1231, 'John'),(1232, 'Johny'));
+
+select 'select 1';
+SELECT * FROM l full outer join r on l.luid = r.ruid
+where luid is null
+ and ruid is not null;
+
+select 'select 2';
+select * from (
+SELECT * FROM l full outer join r on l.luid = r.ruid)
+ where luid is null
+ and ruid is not null;
+
+select 'select 3';
+select * from (
+SELECT * FROM l full outer join r on l.luid = r.ruid
+limit 100000000)
+ where luid is null
+ and ruid is not null;
+
+drop table l;
+drop table r;
+
+CREATE TABLE l (luid Nullable(Int16), name String) ENGINE=MergeTree order by tuple() as
+select * from VALUES ((1231, 'John'),(6666, 'Ksenia'),(Null, '---'));
+
+CREATE TABLE r (ruid Nullable(Int16), name String) ENGINE=MergeTree order by tuple() as
+select * from VALUES ((1231, 'John'),(1232, 'Johny'));
+
+select 'select 4';
+SELECT * FROM l full outer join r on l.luid = r.ruid
+where luid is null
+ and ruid is not null;
+
+select 'select 5';
+select * from (
+SELECT * FROM l full outer join r on l.luid = r.ruid)
+ where luid is null
+ and ruid is not null;
+
+select 'select 6';
+select * from (
+SELECT * FROM l full outer join r on l.luid = r.ruid
+limit 100000000)
+ where luid is null
+ and ruid is not null;
+
+drop table l;
+drop table r;
diff --git a/tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.sql b/tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.sql
index 030db421683..aaf88f95f0c 100644
--- a/tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.sql
+++ b/tests/queries/0_stateless/01505_trivial_count_with_partition_predicate.sql
@@ -6,6 +6,7 @@ create table test1(p DateTime, k int) engine MergeTree partition by toDate(p) or
insert into test1 values ('2020-09-01 00:01:02', 1), ('2020-09-01 20:01:03', 2), ('2020-09-02 00:01:03', 3);
set max_rows_to_read = 1;
+set optimize_use_implicit_projections = 1;
-- non-optimized
select count() from test1 settings max_parallel_replicas = 3;
-- optimized (toYear is monotonic and we provide the partition expr as is)
diff --git a/tests/queries/0_stateless/01710_minmax_count_projection.sql b/tests/queries/0_stateless/01710_minmax_count_projection.sql
index c17f0e1e1fb..bc8327e3631 100644
--- a/tests/queries/0_stateless/01710_minmax_count_projection.sql
+++ b/tests/queries/0_stateless/01710_minmax_count_projection.sql
@@ -4,7 +4,7 @@ create table d (i int, j int) engine MergeTree partition by i % 2 order by tuple
insert into d select number, number from numbers(10000);
-set max_rows_to_read = 2, optimize_use_projections = 1;
+set max_rows_to_read = 2, optimize_use_projections = 1, optimize_use_implicit_projections = 1;
select min(i), max(i), count() from d;
select min(i), max(i), count() from d group by _partition_id order by _partition_id;
diff --git a/tests/queries/0_stateless/01739_index_hint.reference b/tests/queries/0_stateless/01739_index_hint.reference
index 71dfab29154..3a4b380de65 100644
--- a/tests/queries/0_stateless/01739_index_hint.reference
+++ b/tests/queries/0_stateless/01739_index_hint.reference
@@ -30,6 +30,6 @@ SELECT sum(t) FROM XXXX WHERE indexHint(t = 42);
drop table if exists XXXX;
create table XXXX (t Int64, f Float64) Engine=MergeTree order by t settings index_granularity=8192;
insert into XXXX select number*60, 0 from numbers(100000);
-SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0));
+SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0)) SETTINGS optimize_use_implicit_projections = 1;
100000
drop table XXXX;
diff --git a/tests/queries/0_stateless/01739_index_hint.sql b/tests/queries/0_stateless/01739_index_hint.sql
index 30dfa43d334..e1e66c630e1 100644
--- a/tests/queries/0_stateless/01739_index_hint.sql
+++ b/tests/queries/0_stateless/01739_index_hint.sql
@@ -30,6 +30,6 @@ create table XXXX (t Int64, f Float64) Engine=MergeTree order by t settings inde
insert into XXXX select number*60, 0 from numbers(100000);
-SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0));
+SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0)) SETTINGS optimize_use_implicit_projections = 1;
drop table XXXX;
diff --git a/tests/queries/0_stateless/01825_type_json_ghdata.sh b/tests/queries/0_stateless/01825_type_json_ghdata.sh
index bdb439f756f..cea02131d86 100755
--- a/tests/queries/0_stateless/01825_type_json_ghdata.sh
+++ b/tests/queries/0_stateless/01825_type_json_ghdata.sh
@@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CUR_DIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata"
-${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata (data JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_object_type 1
+${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata (data JSON) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_object_type 1
cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata FORMAT JSONAsObject"
diff --git a/tests/queries/0_stateless/01825_type_json_ghdata_insert_select.sh b/tests/queries/0_stateless/01825_type_json_ghdata_insert_select.sh
index 487c95137ae..498e1db1f69 100755
--- a/tests/queries/0_stateless/01825_type_json_ghdata_insert_select.sh
+++ b/tests/queries/0_stateless/01825_type_json_ghdata_insert_select.sh
@@ -9,9 +9,9 @@ ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2"
${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2_string"
${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata_2_from_string"
-${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2 (data JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_object_type 1
-${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_string (data String) ENGINE = MergeTree ORDER BY tuple()"
-${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_from_string (data JSON) ENGINE = MergeTree ORDER BY tuple()" --allow_experimental_object_type 1
+${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2 (data JSON) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_object_type 1
+${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_string (data String) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'"
+${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata_2_from_string (data JSON) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" --allow_experimental_object_type 1
cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata_2 FORMAT JSONAsObject"
cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} -q "INSERT INTO ghdata_2_string FORMAT JSONAsString"
diff --git a/tests/queries/0_stateless/01848_partition_value_column.sql b/tests/queries/0_stateless/01848_partition_value_column.sql
index d13e4508789..28d842af3e9 100644
--- a/tests/queries/0_stateless/01848_partition_value_column.sql
+++ b/tests/queries/0_stateless/01848_partition_value_column.sql
@@ -5,6 +5,8 @@ create table tbl(dt DateTime, i int, j String, v Float64) engine MergeTree parti
insert into tbl values ('2021-04-01 00:01:02', 1, '123', 4), ('2021-04-01 01:01:02', 1, '12', 4), ('2021-04-01 02:11:02', 2, '345', 4), ('2021-04-01 04:31:02', 2, '2', 4), ('2021-04-02 00:01:02', 1, '1234', 4), ('2021-04-02 00:01:02', 2, '123', 4), ('2021-04-02 00:01:02', 3, '12', 4), ('2021-04-02 00:01:02', 4, '1', 4);
+set optimize_use_implicit_projections = 1;
+
select count() from tbl where _partition_value = ('2021-04-01', 1, 2) settings max_rows_to_read = 1;
select count() from tbl where _partition_value.1 = '2021-04-01' settings max_rows_to_read = 4;
select count() from tbl where _partition_value.2 = 0 settings max_rows_to_read = 4;
diff --git a/tests/queries/0_stateless/02240_filesystem_cache_bypass_cache_threshold.reference b/tests/queries/0_stateless/02240_filesystem_cache_bypass_cache_threshold.reference
index 997105c9da3..eb6c9305ebd 100644
--- a/tests/queries/0_stateless/02240_filesystem_cache_bypass_cache_threshold.reference
+++ b/tests/queries/0_stateless/02240_filesystem_cache_bypass_cache_threshold.reference
@@ -3,7 +3,20 @@
SYSTEM DROP FILESYSTEM CACHE;
SET enable_filesystem_cache_on_write_operations=0;
DROP TABLE IF EXISTS test;
-CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_6', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
+CREATE TABLE test (key UInt32, value String)
+Engine=MergeTree()
+ORDER BY key
+SETTINGS min_bytes_for_wide_part = 10485760,
+ compress_marks=false,
+ compress_primary_key=false,
+ disk = disk(
+ type = cache,
+ max_size = '128Mi',
+ path = '/var/lib/clickhouse/${CLICKHOUSE_TEST_UNIQUE_NAME}_cache',
+ enable_bypass_cache_with_threashold = 1,
+ bypass_cache_threashold = 100,
+ delayed_cleanup_interval_ms = 100,
+ disk = 's3_disk');
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;
SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size;
diff --git a/tests/queries/0_stateless/02240_filesystem_cache_bypass_cache_threshold.sql b/tests/queries/0_stateless/02240_filesystem_cache_bypass_cache_threshold.sql
index f6671b82291..8871f8655dd 100644
--- a/tests/queries/0_stateless/02240_filesystem_cache_bypass_cache_threshold.sql
+++ b/tests/queries/0_stateless/02240_filesystem_cache_bypass_cache_threshold.sql
@@ -6,7 +6,21 @@ SYSTEM DROP FILESYSTEM CACHE;
SET enable_filesystem_cache_on_write_operations=0;
DROP TABLE IF EXISTS test;
-CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_6', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
+CREATE TABLE test (key UInt32, value String)
+Engine=MergeTree()
+ORDER BY key
+SETTINGS min_bytes_for_wide_part = 10485760,
+ compress_marks=false,
+ compress_primary_key=false,
+ disk = disk(
+ type = cache,
+ max_size = '128Mi',
+ path = '/var/lib/clickhouse/${CLICKHOUSE_TEST_UNIQUE_NAME}_cache',
+ enable_bypass_cache_with_threashold = 1,
+ bypass_cache_threashold = 100,
+ delayed_cleanup_interval_ms = 100,
+ disk = 's3_disk');
+
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;
diff --git a/tests/queries/0_stateless/02240_filesystem_query_cache.reference b/tests/queries/0_stateless/02240_filesystem_query_cache.reference
index 16c4cd1c049..26340c271e1 100644
--- a/tests/queries/0_stateless/02240_filesystem_query_cache.reference
+++ b/tests/queries/0_stateless/02240_filesystem_query_cache.reference
@@ -5,7 +5,20 @@ SET enable_filesystem_cache_on_write_operations=0;
SET skip_download_if_exceeds_query_cache=1;
SET filesystem_cache_max_download_size=128;
DROP TABLE IF EXISTS test;
-CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_4', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
+CREATE TABLE test (key UInt32, value String)
+Engine=MergeTree()
+ORDER BY key
+SETTINGS min_bytes_for_wide_part = 10485760,
+ compress_marks=false,
+ compress_primary_key=false,
+ disk = disk(
+ type = cache,
+ max_size = '128Mi',
+ path = '/var/lib/clickhouse/${CLICKHOUSE_TEST_UNIQUE_NAME}_cache',
+ cache_on_write_operations= 1,
+ enable_filesystem_query_cache_limit = 1,
+ delayed_cleanup_interval_ms = 100,
+ disk = 's3_disk');
SYSTEM DROP FILESYSTEM CACHE;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;
diff --git a/tests/queries/0_stateless/02240_filesystem_query_cache.sql b/tests/queries/0_stateless/02240_filesystem_query_cache.sql
index 44856a2188c..d85b3f543e1 100644
--- a/tests/queries/0_stateless/02240_filesystem_query_cache.sql
+++ b/tests/queries/0_stateless/02240_filesystem_query_cache.sql
@@ -8,7 +8,20 @@ SET skip_download_if_exceeds_query_cache=1;
SET filesystem_cache_max_download_size=128;
DROP TABLE IF EXISTS test;
-CREATE TABLE test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_4', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false;
+CREATE TABLE test (key UInt32, value String)
+Engine=MergeTree()
+ORDER BY key
+SETTINGS min_bytes_for_wide_part = 10485760,
+ compress_marks=false,
+ compress_primary_key=false,
+ disk = disk(
+ type = cache,
+ max_size = '128Mi',
+ path = '/var/lib/clickhouse/${CLICKHOUSE_TEST_UNIQUE_NAME}_cache',
+ cache_on_write_operations= 1,
+ enable_filesystem_query_cache_limit = 1,
+ delayed_cleanup_interval_ms = 100,
+ disk = 's3_disk');
SYSTEM DROP FILESYSTEM CACHE;
INSERT INTO test SELECT number, toString(number) FROM numbers(100);
SELECT * FROM test FORMAT Null;
diff --git a/tests/queries/0_stateless/02240_system_filesystem_cache_table.reference b/tests/queries/0_stateless/02240_system_filesystem_cache_table.reference
index f960b4eb21c..93b6d4de94f 100644
--- a/tests/queries/0_stateless/02240_system_filesystem_cache_table.reference
+++ b/tests/queries/0_stateless/02240_system_filesystem_cache_table.reference
@@ -16,22 +16,6 @@ DOWNLOADED 0 79 80
DOWNLOADED 0 745 746
2
Expect no cache
-Expect cache
-DOWNLOADED 0 0 1
-DOWNLOADED 0 79 80
-DOWNLOADED 0 745 746
-3
-Expect cache
-DOWNLOADED 0 0 1
-DOWNLOADED 0 79 80
-DOWNLOADED 0 745 746
-3
-Expect no cache
-Expect cache
-DOWNLOADED 0 79 80
-DOWNLOADED 0 745 746
-2
-Expect no cache
Using storage policy: local_cache
0
Expect cache
@@ -50,19 +34,3 @@ DOWNLOADED 0 79 80
DOWNLOADED 0 745 746
2
Expect no cache
-Expect cache
-DOWNLOADED 0 0 1
-DOWNLOADED 0 79 80
-DOWNLOADED 0 745 746
-3
-Expect cache
-DOWNLOADED 0 0 1
-DOWNLOADED 0 79 80
-DOWNLOADED 0 745 746
-3
-Expect no cache
-Expect cache
-DOWNLOADED 0 79 80
-DOWNLOADED 0 745 746
-2
-Expect no cache
diff --git a/tests/queries/0_stateless/02240_system_filesystem_cache_table.sh b/tests/queries/0_stateless/02240_system_filesystem_cache_table.sh
index c7dc9fbd961..6a94cffea5a 100755
--- a/tests/queries/0_stateless/02240_system_filesystem_cache_table.sh
+++ b/tests/queries/0_stateless/02240_system_filesystem_cache_table.sh
@@ -45,33 +45,4 @@ for STORAGE_POLICY in 's3_cache' 'local_cache'; do
echo 'Expect no cache'
${CLICKHOUSE_CLIENT} --query "SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache"
- ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test_02240_storage_policy_3"
- ${CLICKHOUSE_CLIENT} --query "CREATE TABLE test_02240_storage_policy_3 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='${STORAGE_POLICY}_3', min_bytes_for_wide_part = 1000000, compress_marks=false, compress_primary_key=false"
- ${CLICKHOUSE_CLIENT} --enable_filesystem_cache_on_write_operations=0 --query "INSERT INTO test_02240_storage_policy_3 SELECT number, toString(number) FROM numbers(100)"
-
- echo 'Expect cache'
- ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP MARK CACHE"
- ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_02240_storage_policy_3 FORMAT Null"
- ${CLICKHOUSE_CLIENT} --query "SELECT state, file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_begin, file_segment_range_end, size"
- ${CLICKHOUSE_CLIENT} --query "SELECT uniqExact(key) FROM system.filesystem_cache";
-
- echo 'Expect cache'
- ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP MARK CACHE"
- ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_02240_storage_policy_3 FORMAT Null"
- ${CLICKHOUSE_CLIENT} --query "SELECT state, file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_begin, file_segment_range_end, size"
- ${CLICKHOUSE_CLIENT} --query "SELECT uniqExact(key) FROM system.filesystem_cache";
-
- echo 'Expect no cache'
- ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP FILESYSTEM CACHE"
- ${CLICKHOUSE_CLIENT} --query "SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache"
-
- echo 'Expect cache'
- ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP MARK CACHE"
- ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_02240_storage_policy_3 FORMAT Null"
- ${CLICKHOUSE_CLIENT} --query "SELECT state, file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_begin, file_segment_range_end, size"
- ${CLICKHOUSE_CLIENT} --query "SELECT uniqExact(key) FROM system.filesystem_cache";
-
- ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP FILESYSTEM CACHE"
- echo 'Expect no cache'
- ${CLICKHOUSE_CLIENT} --query "SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache"
done
diff --git a/tests/queries/0_stateless/02273_full_sort_join.sql.j2 b/tests/queries/0_stateless/02273_full_sort_join.sql.j2
index 8b739330364..43f7354017c 100644
--- a/tests/queries/0_stateless/02273_full_sort_join.sql.j2
+++ b/tests/queries/0_stateless/02273_full_sort_join.sql.j2
@@ -1,4 +1,6 @@
--- Tags: long
+-- Tags: long, no-upgrade-check
+
+-- TODO(@vdimir): remove no-upgrade-check tag after https://github.com/ClickHouse/ClickHouse/pull/51737 is released
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
diff --git a/tests/queries/0_stateless/02286_drop_filesystem_cache.reference b/tests/queries/0_stateless/02286_drop_filesystem_cache.reference
index 62907a7c81c..b4e5b6715de 100644
--- a/tests/queries/0_stateless/02286_drop_filesystem_cache.reference
+++ b/tests/queries/0_stateless/02286_drop_filesystem_cache.reference
@@ -7,8 +7,6 @@ Using storage policy: s3_cache
1
1
0
-2
-0
Using storage policy: local_cache
0
2
@@ -18,5 +16,3 @@ Using storage policy: local_cache
1
1
0
-2
-0
diff --git a/tests/queries/0_stateless/02286_drop_filesystem_cache.sh b/tests/queries/0_stateless/02286_drop_filesystem_cache.sh
index a6fa0457078..1e1841862e9 100755
--- a/tests/queries/0_stateless/02286_drop_filesystem_cache.sh
+++ b/tests/queries/0_stateless/02286_drop_filesystem_cache.sh
@@ -67,18 +67,4 @@ for STORAGE_POLICY in 's3_cache' 'local_cache'; do
ON data_paths.cache_path = caches.cache_path"
$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test_022862"
-
- $CLICKHOUSE_CLIENT -n --query "CREATE TABLE test_022862 (key UInt32, value String)
- Engine=MergeTree()
- ORDER BY key
- SETTINGS storage_policy='${STORAGE_POLICY}_2', min_bytes_for_wide_part = 10485760"
-
- $CLICKHOUSE_CLIENT --enable_filesystem_cache_on_write_operations=0 --query "INSERT INTO test_022862 SELECT number, toString(number) FROM numbers(100)"
- $CLICKHOUSE_CLIENT --query "SELECT * FROM test_022862 FORMAT Null"
- $CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache"
-
- $CLICKHOUSE_CLIENT --query "SYSTEM DROP FILESYSTEM CACHE '${STORAGE_POLICY}_2'"
- $CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache"
-
- $CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS test_022862"
done
diff --git a/tests/queries/0_stateless/02344_describe_cache.reference b/tests/queries/0_stateless/02344_describe_cache.reference
index f1d0240d80e..da84cdabf79 100644
--- a/tests/queries/0_stateless/02344_describe_cache.reference
+++ b/tests/queries/0_stateless/02344_describe_cache.reference
@@ -1,2 +1 @@
134217728 10000000 33554432 4194304 1 0 0 0 /var/lib/clickhouse/caches/s3_cache/ 100 2 0
-134217728 10000000 104857600 4194304 0 0 0 0 /var/lib/clickhouse/caches/s3_cache_2/ 100 2 0
diff --git a/tests/queries/0_stateless/02344_describe_cache.sql b/tests/queries/0_stateless/02344_describe_cache.sql
index a687ad01394..9c5c5c10952 100644
--- a/tests/queries/0_stateless/02344_describe_cache.sql
+++ b/tests/queries/0_stateless/02344_describe_cache.sql
@@ -1,7 +1,4 @@
-- Tags: no-fasttest, no-parallel
SYSTEM DROP FILESYSTEM CACHE 's3_cache';
-SYSTEM DROP FILESYSTEM CACHE 's3_cache_2';
-
DESCRIBE FILESYSTEM CACHE 's3_cache';
-DESCRIBE FILESYSTEM CACHE 's3_cache_2';
diff --git a/tests/queries/0_stateless/02481_async_insert_dedup.python b/tests/queries/0_stateless/02481_async_insert_dedup.python
index 9fd82da1038..ca83253eaf8 100644
--- a/tests/queries/0_stateless/02481_async_insert_dedup.python
+++ b/tests/queries/0_stateless/02481_async_insert_dedup.python
@@ -39,7 +39,7 @@ client.query("DROP TABLE IF EXISTS t_async_insert_dedup_no_part SYNC")
# generate data and push to queue
-def generate_data(q, total_number):
+def generate_data(q, total_number, use_token):
old_data = []
max_chunk_size = 30
partitions = ["2022-11-11 10:10:10", "2022-12-12 10:10:10"]
@@ -63,12 +63,16 @@ def generate_data(q, total_number):
end = start + chunk_size
if end > total_number:
end = total_number
+
+ token = ""
for i in range(start, end + 1):
partition = partitions[random.randint(0, 1)]
insert_stmt += "('{}', {}),".format(partition, i)
+ if use_token:
+ token = str(i)
insert_stmt = insert_stmt[:-1]
- q.put(insert_stmt)
- old_data.append(insert_stmt)
+ q.put((insert_stmt, token))
+ old_data.append((insert_stmt, token))
last_number = end
if end >= total_number:
break
@@ -80,13 +84,14 @@ def fetch_and_insert_data(q, client):
while True:
insert = q.get()
client.query(
- insert,
+ insert[0],
settings={
"async_insert": 1,
"async_insert_deduplicate": 1,
"wait_for_async_insert": 0,
"async_insert_busy_timeout_ms": 1500,
"insert_keeper_fault_injection_probability": 0,
+ "insert_deduplication_token": insert[1],
},
)
q.task_done()
@@ -110,7 +115,11 @@ ORDER BY (KeyID, EventDate) SETTINGS use_async_block_ids_cache = 1
q = queue.Queue(100)
total_number = 10000
-gen = Thread(target=generate_data, args=[q, total_number])
+use_token = False
+if sys.argv[-1] == "token":
+ use_token = True
+
+gen = Thread(target=generate_data, args=[q, total_number, use_token])
gen.start()
for i in range(3):
diff --git a/tests/queries/0_stateless/02481_async_insert_dedup_token.reference b/tests/queries/0_stateless/02481_async_insert_dedup_token.reference
new file mode 100644
index 00000000000..a91c59a7fc5
--- /dev/null
+++ b/tests/queries/0_stateless/02481_async_insert_dedup_token.reference
@@ -0,0 +1,3 @@
+5
+
+10000
diff --git a/tests/queries/0_stateless/02481_async_insert_dedup_token.sh b/tests/queries/0_stateless/02481_async_insert_dedup_token.sh
new file mode 100755
index 00000000000..8ef6eecda24
--- /dev/null
+++ b/tests/queries/0_stateless/02481_async_insert_dedup_token.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+# Tags: long, zookeeper, no-parallel, no-fasttest
+
+CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CURDIR"/../shell_config.sh
+
+# We should have correct env vars from shell_config.sh to run this test
+python3 "$CURDIR"/02481_async_insert_dedup.python token
diff --git a/tests/queries/0_stateless/02497_trace_events_stress_long.sh b/tests/queries/0_stateless/02497_trace_events_stress_long.sh
index 3ec729079b8..91f6a9bb541 100755
--- a/tests/queries/0_stateless/02497_trace_events_stress_long.sh
+++ b/tests/queries/0_stateless/02497_trace_events_stress_long.sh
@@ -10,7 +10,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
function thread1()
{
thread_id=$1
- while true; do
+ local TIMELIMIT=$((SECONDS+$2))
+ while [ $SECONDS -lt "$TIMELIMIT" ]; do
query_id="02497_$CLICKHOUSE_DATABASE-$RANDOM-$thread_id"
$CLICKHOUSE_CLIENT --query_id=$query_id --query "
SELECT count() FROM numbers_mt(100000) SETTINGS
@@ -25,7 +26,8 @@ function thread1()
function thread2()
{
- while true; do
+ local TIMELIMIT=$((SECONDS+$1))
+ while [ $SECONDS -lt "$TIMELIMIT" ]; do
$CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS"
done
}
@@ -35,29 +37,12 @@ export -f thread2
TIMEOUT=10
-timeout $TIMEOUT bash -c "thread1 0" >/dev/null &
-timeout $TIMEOUT bash -c "thread1 1" >/dev/null &
-timeout $TIMEOUT bash -c "thread1 2" >/dev/null &
-timeout $TIMEOUT bash -c "thread1 3" >/dev/null &
-timeout $TIMEOUT bash -c thread2 >/dev/null &
+thread1 0 $TIMEOUT >/dev/null &
+thread1 1 $TIMEOUT >/dev/null &
+thread1 2 $TIMEOUT >/dev/null &
+thread1 3 $TIMEOUT >/dev/null &
+thread2 $TIMEOUT >/dev/null &
wait
-for _ in {1..10}
-do
- $CLICKHOUSE_CLIENT -q "KILL QUERY WHERE query_id LIKE '02497_$CLICKHOUSE_DATABASE%' SYNC" >/dev/null
-
- # After this moment, the server can still run another query.
- # For example, the 'timeout' command killed all threads of thread1,
- # and the 'timeout' itself has finished, and we have successfully 'wait'-ed for it,
- # but just before that, one of the threads successfully sent a query to the server,
- # but the server didn't start to run this query yet,
- # and even when the KILL QUERY was run, the query from the thread didn't start,
- # but only started after the KILL QUERY has been already processed.
-
- # That's why we have to run this in a loop.
-
- $CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE query_id LIKE '02497_$CLICKHOUSE_DATABASE%'" | rg '^0$' && break
-
- sleep 1
-done
+$CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE query_id LIKE '02497_$CLICKHOUSE_DATABASE%'" | rg '^0$'
\ No newline at end of file
diff --git a/tests/queries/0_stateless/02503_cache_on_write_with_small_segment_size.sh b/tests/queries/0_stateless/02503_cache_on_write_with_small_segment_size.sh
index ed66c36b823..229c68bf8ec 100755
--- a/tests/queries/0_stateless/02503_cache_on_write_with_small_segment_size.sh
+++ b/tests/queries/0_stateless/02503_cache_on_write_with_small_segment_size.sh
@@ -13,7 +13,21 @@ function random {
${CLICKHOUSE_CLIENT} --multiline --multiquery -q "
drop table if exists ttt;
-create table ttt (id Int32, value String) engine=MergeTree() order by tuple() settings storage_policy='s3_cache_small_segment_size', min_bytes_for_wide_part=0;
+
+CREATE TABLE ttt (id Int32, value String)
+Engine=MergeTree()
+ORDER BY tuple()
+SETTINGS min_bytes_for_wide_part = 0,
+ disk = disk(
+ type = cache,
+ max_size = '128Mi',
+ max_file_segment_size = '10Ki',
+ path = '/var/lib/clickhouse/${CLICKHOUSE_TEST_UNIQUE_NAME}_cache',
+ cache_on_write_operations = 1,
+ enable_filesystem_query_cache_limit = 1,
+ delayed_cleanup_interval_ms = 100,
+ disk = 's3_disk');
+
insert into ttt select number, toString(number) from numbers(100000) settings throw_on_error_from_cache_on_write_operations = 1;
"
diff --git a/tests/queries/0_stateless/02543_alter_rename_modify_stuck.sh b/tests/queries/0_stateless/02543_alter_rename_modify_stuck.sh
index adaf1846552..1f517913076 100755
--- a/tests/queries/0_stateless/02543_alter_rename_modify_stuck.sh
+++ b/tests/queries/0_stateless/02543_alter_rename_modify_stuck.sh
@@ -34,19 +34,10 @@ done
$CLICKHOUSE_CLIENT --query="ALTER TABLE table_to_rename UPDATE v2 = 77 WHERE 1 = 1 SETTINGS mutations_sync = 2" &
-counter=0 retries=60
-
-I=0
-while [[ $counter -lt $retries ]]; do
- I=$((I + 1))
- result=$($CLICKHOUSE_CLIENT --query "SELECT count() from system.mutations where database='${CLICKHOUSE_DATABASE}' and table='table_to_rename'")
- if [[ $result == "2" ]]; then
- break;
- fi
- sleep 0.1
- ((++counter))
-done
+# we cannot wait in the same way like we do for previous alter
+# because it's metadata alter and this one will wait for it
+sleep 3
$CLICKHOUSE_CLIENT --query="SYSTEM START MERGES table_to_rename"
diff --git a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference
index 3f34d5e2c79..a97879eaca8 100644
--- a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference
+++ b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.reference
@@ -17,7 +17,7 @@ INSERT and READ INSERT
DROP
CHECK with query_log
QueryFinish INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0; FileOpen 8
-QueryFinish SELECT \'1\', min(t) FROM times; FileOpen 0
+QueryFinish SELECT \'1\', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; FileOpen 0
QueryFinish INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0; FileOpen 8
-QueryFinish SELECT \'2\', min(t) FROM times; FileOpen 0
+QueryFinish SELECT \'2\', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1; FileOpen 0
QueryFinish INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0; FileOpen 8
diff --git a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh
index adc9525ef81..288f1129b53 100755
--- a/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh
+++ b/tests/queries/0_stateless/02675_profile_events_from_query_log_and_client.sh
@@ -44,13 +44,13 @@ INSERT INTO times SELECT now() + INTERVAL 1 day SETTINGS optimize_on_insert = 0;
echo "READ"
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq "
-SELECT '1', min(t) FROM times;
+SELECT '1', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1;
" 2>&1 | grep -o -e '\ \[\ .*\ \]\ FileOpen:\ .*\ '
echo "INSERT and READ INSERT"
$CLICKHOUSE_CLIENT --print-profile-events --profile-events-delay-ms=-1 -nq "
INSERT INTO times SELECT now() + INTERVAL 2 day SETTINGS optimize_on_insert = 0;
-SELECT '2', min(t) FROM times;
+SELECT '2', min(t) FROM times SETTINGS optimize_use_implicit_projections = 1;
INSERT INTO times SELECT now() + INTERVAL 3 day SETTINGS optimize_on_insert = 0;
" 2>&1 | grep -o -e '\ \[\ .*\ \]\ FileOpen:\ .*\ '
diff --git a/tests/queries/0_stateless/02680_illegal_type_of_filter_projection.sql b/tests/queries/0_stateless/02680_illegal_type_of_filter_projection.sql
index 3ef3b8a4fe6..d20e4deee27 100644
--- a/tests/queries/0_stateless/02680_illegal_type_of_filter_projection.sql
+++ b/tests/queries/0_stateless/02680_illegal_type_of_filter_projection.sql
@@ -1,3 +1,3 @@
CREATE TABLE test_tuple (`p` DateTime, `i` int, `j` int) ENGINE = MergeTree PARTITION BY (toDate(p), i) ORDER BY j SETTINGS index_granularity = 1;
insert into test_tuple values (1, 1, 1);
-SELECT count() FROM test_tuple PREWHERE sipHash64(sipHash64(p, toString(toDate(p))), toString(toDate(p))) % -0. WHERE i > NULL settings optimize_trivial_count_query=0; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER }
+SELECT count() FROM test_tuple PREWHERE sipHash64(sipHash64(p, toString(toDate(p))), toString(toDate(p))) % -0. WHERE i > NULL settings optimize_trivial_count_query=0, optimize_use_implicit_projections=1; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER }
diff --git a/tests/queries/0_stateless/02725_memory-for-merges.sql b/tests/queries/0_stateless/02725_memory-for-merges.sql
index b6ae7af7f1a..1a8402dff4b 100644
--- a/tests/queries/0_stateless/02725_memory-for-merges.sql
+++ b/tests/queries/0_stateless/02725_memory-for-merges.sql
@@ -1,4 +1,4 @@
--- Tags: no-s3-storage
+-- Tags: no-s3-storage, no-random-merge-tree-settings
-- We allocate a lot of memory for buffers when reading or writing to S3
DROP TABLE IF EXISTS 02725_memory_for_merges SYNC;
@@ -21,7 +21,6 @@ OPTIMIZE TABLE 02725_memory_for_merges FINAL;
SYSTEM FLUSH LOGS;
-WITH (SELECT uuid FROM system.tables WHERE table='02725_memory_for_merges' and database=currentDatabase()) as uuid
-SELECT sum(peak_memory_usage) < 1024 * 1024 * 200 from system.part_log where table_uuid=uuid and event_type='MergeParts';
+SELECT (sum(peak_memory_usage) < 1024 * 1024 * 200 AS x) ? x : sum(peak_memory_usage) from system.part_log where database=currentDatabase() and table='02725_memory_for_merges' and event_type='MergeParts';
DROP TABLE IF EXISTS 02725_memory_for_merges SYNC;
diff --git a/tests/queries/0_stateless/02732_rename_after_processing.reference b/tests/queries/0_stateless/02732_rename_after_processing.reference
index 39cdb677e09..86f682d682c 100644
--- a/tests/queries/0_stateless/02732_rename_after_processing.reference
+++ b/tests/queries/0_stateless/02732_rename_after_processing.reference
@@ -19,3 +19,6 @@ OK
tmp5.csv
OK
tmp5.csv
+4
+tmp6.csv.processed
+!tmp6.csv
diff --git a/tests/queries/0_stateless/02732_rename_after_processing.sh b/tests/queries/0_stateless/02732_rename_after_processing.sh
index c4f80d3462b..cdbc9892bc7 100755
--- a/tests/queries/0_stateless/02732_rename_after_processing.sh
+++ b/tests/queries/0_stateless/02732_rename_after_processing.sh
@@ -29,6 +29,7 @@ cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp3_1.csv
cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp3_2.csv
cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp4.csv
cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp5.csv
+cp ${tmp_dir}/tmp.csv ${tmp_dir}/tmp6.csv
### Checking that renaming works
@@ -115,5 +116,14 @@ if [ -e "${tmp_dir}/tmp5.csv" ]; then
echo "tmp5.csv"
fi
+# check full file name placeholder
+${CLICKHOUSE_CLIENT} --rename-files-after-processing="%a.processed" -q "SELECT COUNT(*) FROM file('${unique_name}/tmp6.csv')"
+if [ -e "${tmp_dir}/tmp6.csv.processed" ]; then
+ echo "tmp6.csv.processed"
+fi
+if [ ! -e "${tmp_dir}/tmp6.csv" ]; then
+ echo "!tmp6.csv"
+fi
+
# Clean
rm -rd $tmp_dir
diff --git a/tests/queries/0_stateless/02752_forbidden_headers.reference b/tests/queries/0_stateless/02752_forbidden_headers.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/02752_forbidden_headers.sql b/tests/queries/0_stateless/02752_forbidden_headers.sql
new file mode 100644
index 00000000000..d966fc0a187
--- /dev/null
+++ b/tests/queries/0_stateless/02752_forbidden_headers.sql
@@ -0,0 +1,18 @@
+-- Tags: no-fasttest
+-- Tag no-fasttest: Depends on AWS
+
+SELECT * FROM url('http://localhost:8123/', LineAsString, headers('exact_header' = 'value')); -- { serverError BAD_ARGUMENTS }
+SELECT * FROM url('http://localhost:8123/', LineAsString, headers('cAsE_INSENSITIVE_header' = 'value')); -- { serverError BAD_ARGUMENTS }
+SELECT * FROM url('http://localhost:8123/', LineAsString, headers('random_header' = 'value')) FORMAT Null;
+
+SELECT * FROM urlCluster('test_cluster_two_shards_localhost', 'http://localhost:8123/', LineAsString, headers('exact_header' = 'value')); -- { serverError BAD_ARGUMENTS }
+SELECT * FROM urlCluster('test_cluster_two_shards_localhost', 'http://localhost:8123/', LineAsString, headers('cAsE_INSENSITIVE_header' = 'value')); -- { serverError BAD_ARGUMENTS }
+SELECT * FROM urlCluster('test_cluster_two_shards_localhost', 'http://localhost:8123/', LineAsString, headers('random_header' = 'value')) FORMAT Null;
+
+SELECT * FROM s3('http://localhost:8123/123/4', LineAsString, headers('exact_header' = 'value')); -- { serverError BAD_ARGUMENTS }
+SELECT * FROM s3('http://localhost:8123/123/4', LineAsString, headers('cAsE_INSENSITIVE_header' = 'value')); -- { serverError BAD_ARGUMENTS }
+SELECT * FROM s3('http://localhost:8123/123/4', LineAsString, headers('random_header' = 'value')); -- { serverError S3_ERROR }
+
+SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:8123/123/4', LineAsString, headers('exact_header' = 'value')); -- { serverError BAD_ARGUMENTS }
+SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:8123/123/4', LineAsString, headers('cAsE_INSENSITIVE_header' = 'value')); -- { serverError BAD_ARGUMENTS }
+SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:8123/123/4', LineAsString, headers('random_header' = 'value')); -- { serverError S3_ERROR }
diff --git a/tests/queries/0_stateless/02771_complex_globs_in_storage_file_path.reference b/tests/queries/0_stateless/02771_complex_globs_in_storage_file_path.reference
new file mode 100644
index 00000000000..e1b420ecf37
--- /dev/null
+++ b/tests/queries/0_stateless/02771_complex_globs_in_storage_file_path.reference
@@ -0,0 +1,4 @@
+This is file data1 data1.csv
+This is file data2 data2.csv
+This is file data1 data1.csv
+This is file data2 data2.csv
diff --git a/tests/queries/0_stateless/02771_complex_globs_in_storage_file_path.sql b/tests/queries/0_stateless/02771_complex_globs_in_storage_file_path.sql
new file mode 100644
index 00000000000..1d125920220
--- /dev/null
+++ b/tests/queries/0_stateless/02771_complex_globs_in_storage_file_path.sql
@@ -0,0 +1,12 @@
+-- Tags: no-replicated-database, no-parallel
+
+SELECT *, _file FROM file('02771/dir{?/subdir?1/da,2/subdir2?/da}ta/non_existing.csv', CSV); -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE}
+
+INSERT INTO TABLE FUNCTION file('02771/dir1/subdir11/data1.csv', 'CSV', 's String') SELECT 'This is file data1' SETTINGS engine_file_truncate_on_insert=1;
+INSERT INTO TABLE FUNCTION file('02771/dir2/subdir22/data2.csv', 'CSV', 's String') SELECT 'This is file data2' SETTINGS engine_file_truncate_on_insert=1;
+
+SELECT *, _file FROM file('02771/dir{?/subdir?1/da,2/subdir2?/da}ta1.csv', CSV);
+SELECT *, _file FROM file('02771/dir{?/subdir?1/da,2/subdir2?/da}ta2.csv', CSV);
+
+SELECT *, _file FROM file('02771/dir?/{subdir?1/data1,subdir2?/data2}.csv', CSV) WHERE _file == 'data1.csv';
+SELECT *, _file FROM file('02771/dir?/{subdir?1/data1,subdir2?/data2}.csv', CSV) WHERE _file == 'data2.csv';
diff --git a/tests/queries/0_stateless/02772_s3_crash.reference b/tests/queries/0_stateless/02772_s3_crash.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/02772_s3_crash.sql b/tests/queries/0_stateless/02772_s3_crash.sql
new file mode 100644
index 00000000000..5cad83def63
--- /dev/null
+++ b/tests/queries/0_stateless/02772_s3_crash.sql
@@ -0,0 +1,5 @@
+-- Tags: no-fasttest
+-- Tag no-fasttest: Depends on AWS
+
+SELECT * FROM s3(headers('random_header' = 'value')); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
+SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', headers('random_header' = 'value')); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
diff --git a/tests/queries/0_stateless/02789_functions_after_sorting_and_columns_with_same_names_bug.reference b/tests/queries/0_stateless/02789_functions_after_sorting_and_columns_with_same_names_bug.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/02789_functions_after_sorting_and_columns_with_same_names_bug.sql b/tests/queries/0_stateless/02789_functions_after_sorting_and_columns_with_same_names_bug.sql
new file mode 100644
index 00000000000..4a9ede36335
--- /dev/null
+++ b/tests/queries/0_stateless/02789_functions_after_sorting_and_columns_with_same_names_bug.sql
@@ -0,0 +1,133 @@
+drop table if exists test;
+drop table if exists test1;
+
+CREATE TABLE test
+(
+ `pt` String,
+ `count_distinct_exposure_uv` AggregateFunction(uniqHLL12, Int64)
+)
+ENGINE = AggregatingMergeTree
+ORDER BY pt;
+
+SELECT *
+FROM
+(
+ SELECT m0.pt AS pt
+ ,m0.`exposure_uv` AS exposure_uv
+ ,round(m2.exposure_uv,4) AS exposure_uv_hb_last_value
+ ,if(m2.exposure_uv IS NULL OR m2.exposure_uv = 0,NULL,round((m0.exposure_uv - m2.exposure_uv) * 1.0 / m2.exposure_uv,4)) AS exposure_uv_hb_diff_percent
+ ,round(m1.exposure_uv,4) AS exposure_uv_tb_last_value
+ ,if(m1.exposure_uv IS NULL OR m1.exposure_uv = 0,NULL,round((m0.exposure_uv - m1.exposure_uv) * 1.0 / m1.exposure_uv,4)) AS exposure_uv_tb_diff_percent
+ FROM
+ (
+ SELECT m0.pt AS pt
+ ,`exposure_uv` AS `exposure_uv`
+ FROM
+ (
+ SELECT pt AS pt
+ ,CASE WHEN COUNT(`exposure_uv`) > 0 THEN AVG(`exposure_uv`) ELSE 0 END AS `exposure_uv`
+ FROM
+ (
+ SELECT pt AS pt
+ ,uniqHLL12Merge(count_distinct_exposure_uv) AS `exposure_uv`
+ FROM test
+ GROUP BY pt
+ ) m
+ GROUP BY pt
+ ) m0
+ ) m0
+ LEFT JOIN
+ (
+ SELECT m0.pt AS pt
+ ,`exposure_uv` AS `exposure_uv`
+ FROM
+ (
+ SELECT formatDateTime(addYears(parseDateTimeBestEffort(pt),1),'%Y%m%d') AS pt
+ ,CASE WHEN COUNT(`exposure_uv`) > 0 THEN AVG(`exposure_uv`) ELSE 0 END AS `exposure_uv`
+ FROM
+ (
+ SELECT pt AS pt
+ ,uniqHLL12Merge(count_distinct_exposure_uv) AS `exposure_uv`
+ FROM test
+ GROUP BY pt
+ ) m
+ GROUP BY pt
+ ) m0
+ ) m1
+ ON m0.pt = m1.pt
+ LEFT JOIN
+ (
+ SELECT m0.pt AS pt
+ ,`exposure_uv` AS `exposure_uv`
+ FROM
+ (
+ SELECT formatDateTime(addDays(toDate(parseDateTimeBestEffort(pt)),1),'%Y%m%d') AS pt
+ ,CASE WHEN COUNT(`exposure_uv`) > 0 THEN AVG(`exposure_uv`) ELSE 0 END AS `exposure_uv`
+ FROM
+ (
+ SELECT pt AS pt
+ ,uniqHLL12Merge(count_distinct_exposure_uv) AS `exposure_uv`
+ FROM test
+ GROUP BY pt
+ ) m
+ GROUP BY pt
+ ) m0
+ ) m2
+ ON m0.pt = m2.pt
+) c0
+ORDER BY pt ASC, exposure_uv DESC
+settings join_use_nulls = 1;
+
+CREATE TABLE test1
+(
+ `pt` String,
+ `exposure_uv` Float64
+)
+ENGINE = Memory;
+
+SELECT *
+FROM
+(
+ SELECT m0.pt
+ ,m0.exposure_uv AS exposure_uv
+ ,round(m2.exposure_uv,4)
+ FROM
+ (
+ SELECT pt
+ ,exposure_uv
+ FROM test1
+ ) m0
+ LEFT JOIN
+ (
+ SELECT pt
+ ,exposure_uv
+ FROM test1
+ ) m1
+ ON m0.pt = m1.pt
+ LEFT JOIN
+ (
+ SELECT pt
+ ,exposure_uv
+ FROM test1
+ ) m2
+ ON m0.pt = m2.pt
+) c0
+ORDER BY exposure_uv
+settings join_use_nulls = 1;
+
+SELECT
+ pt AS pt,
+ exposure_uv AS exposure_uv
+FROM
+(
+ SELECT
+ pt
+ FROM test1
+) AS m0
+FULL OUTER JOIN
+(
+ SELECT
+ pt,
+ exposure_uv
+ FROM test1
+) AS m1 ON m0.pt = m1.pt;
diff --git a/tests/queries/0_stateless/02789_functions_after_sorting_and_columns_with_same_names_bug_2.reference b/tests/queries/0_stateless/02789_functions_after_sorting_and_columns_with_same_names_bug_2.reference
new file mode 100644
index 00000000000..bcc55e50958
--- /dev/null
+++ b/tests/queries/0_stateless/02789_functions_after_sorting_and_columns_with_same_names_bug_2.reference
@@ -0,0 +1,3 @@
+20230626 0.3156979034107179 \N \N
+20230626 0.2624629016490004 \N \N
+20230626 0.19390556368960468 \N \N
diff --git a/tests/queries/0_stateless/02789_functions_after_sorting_and_columns_with_same_names_bug_2.sql b/tests/queries/0_stateless/02789_functions_after_sorting_and_columns_with_same_names_bug_2.sql
new file mode 100644
index 00000000000..b0221635fe9
--- /dev/null
+++ b/tests/queries/0_stateless/02789_functions_after_sorting_and_columns_with_same_names_bug_2.sql
@@ -0,0 +1,107 @@
+create table test1 (
+ `pt` String,
+ `brand_name` String,
+ `total_indirect_order_cnt` Float64,
+ `total_indirect_gmv` Float64
+) ENGINE = Memory;
+
+create table test2 (
+ `pt` String,
+ `brand_name` String,
+ `exposure_uv` Float64,
+ `click_uv` Float64
+) ENGINE = Memory;
+
+INSERT INTO test1 (`pt`, `brand_name`, `total_indirect_order_cnt`, `total_indirect_gmv`) VALUES ('20230625', 'LINING', 2232, 1008710), ('20230625', 'adidas', 125, 58820), ('20230625', 'Nike', 1291, 1033020), ('20230626', 'Nike', 1145, 938926), ('20230626', 'LINING', 1904, 853336), ('20230626', 'adidas', 133, 62546), ('20220626', 'LINING', 3747, 1855203), ('20220626', 'Nike', 2295, 1742665), ('20220626', 'adidas', 302, 122388);
+
+INSERT INTO test2 (`pt`, `brand_name`, `exposure_uv`, `click_uv`) VALUES ('20230625', 'Nike', 2012913, 612831), ('20230625', 'adidas', 480277, 96176), ('20230625', 'LINING', 2474234, 627814), ('20230626', 'Nike', 1934666, 610770), ('20230626', 'adidas', 469904, 91117), ('20230626', 'LINING', 2285142, 599765), ('20220626', 'Nike', 2979656, 937166), ('20220626', 'adidas', 704751, 124250), ('20220626', 'LINING', 3163884, 1010221);
+
+SELECT * FROM (
+ SELECT m0.pt AS pt
+ ,m0.`uvctr` AS uvctr
+ ,round(m1.uvctr,4) AS uvctr_hb_last_value
+ ,round(m2.uvctr,4) AS uvctr_tb_last_value
+ FROM
+ (
+ SELECT m0.pt AS pt
+ ,COALESCE(m0.brand_name,m1.brand_name) AS brand_name
+ ,if(isNaN(`click_uv` / `exposure_uv`) OR isInfinite(`click_uv` / `exposure_uv`),NULL,`click_uv` / `exposure_uv`) AS `uvctr`
+ FROM
+ (
+ SELECT pt AS pt
+ ,brand_name AS `brand_name`
+ ,exposure_uv AS `exposure_uv`
+ ,click_uv AS `click_uv`
+ FROM test2
+ WHERE pt = '20230626'
+ ) m0
+ FULL JOIN
+ (
+ SELECT pt AS pt
+ ,brand_name AS `brand_name`
+ ,total_indirect_order_cnt AS `total_indirect_order_cnt`
+ ,total_indirect_gmv AS `total_indirect_gmv`
+ FROM test1
+ WHERE pt = '20230626'
+ ) m1
+ ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt
+ ) m0
+ LEFT JOIN
+ (
+ SELECT m0.pt AS pt
+ ,if(isNaN(`click_uv` / `exposure_uv`) OR isInfinite(`click_uv` / `exposure_uv`),NULL,`click_uv` / `exposure_uv`) AS `uvctr`
+ ,COALESCE(m0.brand_name,m1.brand_name) AS brand_name
+ ,`exposure_uv` AS `exposure_uv`
+ ,`click_uv`
+ FROM
+ (
+ SELECT pt AS pt
+ ,brand_name AS `brand_name`
+ ,exposure_uv AS `exposure_uv`
+ ,click_uv AS `click_uv`
+ FROM test2
+ WHERE pt = '20230625'
+ ) m0
+ FULL JOIN
+ (
+ SELECT pt AS pt
+ ,brand_name AS `brand_name`
+ ,total_indirect_order_cnt AS `total_indirect_order_cnt`
+ ,total_indirect_gmv AS `total_indirect_gmv`
+ FROM test1
+ WHERE pt = '20230625'
+ ) m1
+ ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt
+ ) m1
+ ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt
+ LEFT JOIN
+ (
+ SELECT m0.pt AS pt
+ ,if(isNaN(`click_uv` / `exposure_uv`) OR isInfinite(`click_uv` / `exposure_uv`),NULL,`click_uv` / `exposure_uv`) AS `uvctr`
+ ,COALESCE(m0.brand_name,m1.brand_name) AS brand_name
+ ,`exposure_uv` AS `exposure_uv`
+ ,`click_uv`
+ FROM
+ (
+ SELECT pt AS pt
+ ,brand_name AS `brand_name`
+ ,exposure_uv AS `exposure_uv`
+ ,click_uv AS `click_uv`
+ FROM test2
+ WHERE pt = '20220626'
+ ) m0
+ FULL JOIN
+ (
+ SELECT pt AS pt
+ ,brand_name AS `brand_name`
+ ,total_indirect_order_cnt AS `total_indirect_order_cnt`
+ ,total_indirect_gmv AS `total_indirect_gmv`
+ FROM test1
+ WHERE pt = '20220626'
+ ) m1
+ ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt
+ ) m2
+ ON m0.brand_name = m2.brand_name AND m0.pt = m2.pt
+) c0
+ORDER BY pt ASC, uvctr DESC;
+
diff --git a/tests/queries/0_stateless/02801_backup_native_copy.sh b/tests/queries/0_stateless/02801_backup_native_copy.sh
index 015dcb19b82..31a7cc3b410 100755
--- a/tests/queries/0_stateless/02801_backup_native_copy.sh
+++ b/tests/queries/0_stateless/02801_backup_native_copy.sh
@@ -10,7 +10,7 @@ set -e
$CLICKHOUSE_CLIENT -nm -q "
drop table if exists data;
- create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_common_disk';
+ create table data (key Int) engine=MergeTree() order by tuple() settings disk='s3_disk';
insert into data select * from numbers(10);
"
diff --git a/tests/queries/0_stateless/02813_array_concat_agg.reference b/tests/queries/0_stateless/02813_array_concat_agg.reference
new file mode 100644
index 00000000000..7144a499922
--- /dev/null
+++ b/tests/queries/0_stateless/02813_array_concat_agg.reference
@@ -0,0 +1,5 @@
+[1,2,3,4,5,6]
+[1,2,3,4,5,6]
+1 [1,2,3]
+2 [4,5]
+3 [6]
diff --git a/tests/queries/0_stateless/02813_array_concat_agg.sql b/tests/queries/0_stateless/02813_array_concat_agg.sql
new file mode 100644
index 00000000000..94fe133db7d
--- /dev/null
+++ b/tests/queries/0_stateless/02813_array_concat_agg.sql
@@ -0,0 +1,9 @@
+drop table if exists t;
+
+create table t (n UInt32, a Array(Int32)) engine=Memory;
+insert into t values (1, [1,2,3]), (2, [4,5]), (3, [6]);
+
+select array_concat_agg(a) from t;
+select ArrAy_cOncAt_aGg(a) from t;
+select n, array_concat_agg(a) from t group by n order by n;
+drop table t;
diff --git a/tests/queries/0_stateless/02813_create_index_noop.reference b/tests/queries/0_stateless/02813_create_index_noop.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/02813_create_index_noop.sql b/tests/queries/0_stateless/02813_create_index_noop.sql
new file mode 100644
index 00000000000..3d65f81af9d
--- /dev/null
+++ b/tests/queries/0_stateless/02813_create_index_noop.sql
@@ -0,0 +1,1000 @@
+SET allow_create_index_without_type=0;
+CREATE INDEX idx_tab1_0 on tab1 (col0); -- { serverError INCORRECT_QUERY }
+SET allow_create_index_without_type=1;
+CREATE INDEX idx_tab1_0 on tab1 (col0);
+CREATE INDEX idx_tab1_1 on tab1 (col1);
+CREATE INDEX idx_tab1_3 on tab1 (col3);
+CREATE INDEX idx_tab1_4 on tab1 (col4);
+CREATE INDEX idx_tab2_0 ON tab2 (col0 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col0 DESC,col1 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col0 DESC,col1 DESC,col4 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col0 DESC,col1);
+CREATE INDEX idx_tab2_0 ON tab2 (col0 DESC,col3);
+CREATE INDEX idx_tab2_0 ON tab2 (col0 DESC,col4 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col0 DESC,col4);
+CREATE INDEX idx_tab2_0 ON tab2 (col0);
+CREATE INDEX idx_tab2_0 ON tab2 (col0,col1 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col0,col1 DESC,col3);
+CREATE INDEX idx_tab2_0 ON tab2 (col0,col1);
+CREATE INDEX idx_tab2_0 ON tab2 (col0,col3 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col0,col3);
+CREATE INDEX idx_tab2_0 ON tab2 (col0,col3,col1 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col0,col3,col4 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col0,col4 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col0,col4);
+CREATE INDEX idx_tab2_0 ON tab2 (col0,col4,col3 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col0 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col0);
+CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col0,col3 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col0,col4 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col3 DESC,col0);
+CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col3);
+CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col4);
+CREATE INDEX idx_tab2_0 ON tab2 (col1);
+CREATE INDEX idx_tab2_0 ON tab2 (col1,col0 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col1,col0 DESC,col4 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col1,col0,col4 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col1,col3 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col1,col3);
+CREATE INDEX idx_tab2_0 ON tab2 (col1,col4 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col1,col4 DESC,col0 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col1,col4 DESC,col3);
+CREATE INDEX idx_tab2_0 ON tab2 (col1,col4);
+CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col0 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col0 DESC,col4 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col0);
+CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col0,col1);
+CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col0,col4);
+CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col1 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col1);
+CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col1,col4);
+CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col4 DESC,col1 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col4 DESC,col1);
+CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col4);
+CREATE INDEX idx_tab2_0 ON tab2 (col3);
+CREATE INDEX idx_tab2_0 ON tab2 (col3,col0 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col3,col0 DESC,col1 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col3,col0 DESC,col1);
+CREATE INDEX idx_tab2_0 ON tab2 (col3,col0);
+CREATE INDEX idx_tab2_0 ON tab2 (col3,col1 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col3,col1 DESC,col0 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col3,col1);
+CREATE INDEX idx_tab2_0 ON tab2 (col3,col4 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col3,col4);
+CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col0 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col0);
+CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col0,col1 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col0,col1);
+CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col1 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col1 DESC,col0 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col1);
+CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col3 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col3 DESC,col0);
+CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col3);
+CREATE INDEX idx_tab2_0 ON tab2 (col4);
+CREATE INDEX idx_tab2_0 ON tab2 (col4,col0 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col4,col0 DESC,col1);
+CREATE INDEX idx_tab2_0 ON tab2 (col4,col0);
+CREATE INDEX idx_tab2_0 ON tab2 (col4,col1 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col4,col1 DESC,col3 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col4,col1 DESC,col3);
+CREATE INDEX idx_tab2_0 ON tab2 (col4,col1);
+CREATE INDEX idx_tab2_0 ON tab2 (col4,col3 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col4,col3 DESC,col0 DESC);
+CREATE INDEX idx_tab2_0 ON tab2 (col4,col3);
+CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC,col1 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC,col1);
+CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC,col3 DESC,col4);
+CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC,col3);
+CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC,col3,col1);
+CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC,col4 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC,col4);
+CREATE INDEX idx_tab2_1 ON tab2 (col0);
+CREATE INDEX idx_tab2_1 ON tab2 (col0,col1 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col0,col1);
+CREATE INDEX idx_tab2_1 ON tab2 (col0,col1,col4 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col0,col3 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col0,col3 DESC,col1);
+CREATE INDEX idx_tab2_1 ON tab2 (col0,col3 DESC,col4);
+CREATE INDEX idx_tab2_1 ON tab2 (col0,col3);
+CREATE INDEX idx_tab2_1 ON tab2 (col0,col4 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col0,col4);
+CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC,col0 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC,col0 DESC,col3);
+CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC,col0);
+CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC,col3);
+CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC,col4 DESC,col3,col0 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col1);
+CREATE INDEX idx_tab2_1 ON tab2 (col1,col0 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col1,col0);
+CREATE INDEX idx_tab2_1 ON tab2 (col1,col3 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col1,col3 DESC,col0 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col1,col3);
+CREATE INDEX idx_tab2_1 ON tab2 (col1,col3,col4);
+CREATE INDEX idx_tab2_1 ON tab2 (col1,col4);
+CREATE INDEX idx_tab2_1 ON tab2 (col1,col4,col3);
+CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col0 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col0 DESC,col1);
+CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col0 DESC,col4 DESC,col1);
+CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col0);
+CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col0,col1 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col1 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col1);
+CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col3);
+CREATE INDEX idx_tab2_1 ON tab2 (col3,col0 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col3,col0);
+CREATE INDEX idx_tab2_1 ON tab2 (col3,col1 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col3,col1);
+CREATE INDEX idx_tab2_1 ON tab2 (col3,col4 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col3,col4 DESC,col0 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col3,col4);
+CREATE INDEX idx_tab2_1 ON tab2 (col4 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col4 DESC,col0);
+CREATE INDEX idx_tab2_1 ON tab2 (col4 DESC,col1 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col4 DESC,col1);
+CREATE INDEX idx_tab2_1 ON tab2 (col4 DESC,col3 DESC,col1 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col4 DESC,col3);
+CREATE INDEX idx_tab2_1 ON tab2 (col4);
+CREATE INDEX idx_tab2_1 ON tab2 (col4,col0 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col4,col0);
+CREATE INDEX idx_tab2_1 ON tab2 (col4,col1 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col4,col1);
+CREATE INDEX idx_tab2_1 ON tab2 (col4,col3 DESC);
+CREATE INDEX idx_tab2_1 ON tab2 (col4,col3 DESC,col1);
+CREATE INDEX idx_tab2_2 ON tab2 (col0 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col0 DESC,col1 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col0 DESC,col1);
+CREATE INDEX idx_tab2_2 ON tab2 (col0 DESC,col3);
+CREATE INDEX idx_tab2_2 ON tab2 (col0 DESC,col3,col1);
+CREATE INDEX idx_tab2_2 ON tab2 (col0 DESC,col4 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col0 DESC,col4);
+CREATE INDEX idx_tab2_2 ON tab2 (col0);
+CREATE INDEX idx_tab2_2 ON tab2 (col0,col1 DESC,col3);
+CREATE INDEX idx_tab2_2 ON tab2 (col0,col1);
+CREATE INDEX idx_tab2_2 ON tab2 (col0,col3);
+CREATE INDEX idx_tab2_2 ON tab2 (col0,col4 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col0,col4 DESC,col1 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col0,col4 DESC,col3 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col0,col4);
+CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col0 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col0 DESC,col3 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col0);
+CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col0,col3 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col0,col4);
+CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col3 DESC,col0);
+CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col3);
+CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col4);
+CREATE INDEX idx_tab2_2 ON tab2 (col1);
+CREATE INDEX idx_tab2_2 ON tab2 (col1,col0 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col1,col0);
+CREATE INDEX idx_tab2_2 ON tab2 (col1,col3 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col1,col3,col0);
+CREATE INDEX idx_tab2_2 ON tab2 (col1,col4 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col1,col4);
+CREATE INDEX idx_tab2_2 ON tab2 (col3 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col3 DESC,col0 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col3 DESC,col0);
+CREATE INDEX idx_tab2_2 ON tab2 (col3 DESC,col1);
+CREATE INDEX idx_tab2_2 ON tab2 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col3 DESC,col4);
+CREATE INDEX idx_tab2_2 ON tab2 (col3);
+CREATE INDEX idx_tab2_2 ON tab2 (col3,col0 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col3,col0);
+CREATE INDEX idx_tab2_2 ON tab2 (col3,col1 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col3,col1 DESC,col0);
+CREATE INDEX idx_tab2_2 ON tab2 (col3,col1);
+CREATE INDEX idx_tab2_2 ON tab2 (col3,col4);
+CREATE INDEX idx_tab2_2 ON tab2 (col4 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col4 DESC,col0 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col4 DESC,col1 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col4 DESC,col1);
+CREATE INDEX idx_tab2_2 ON tab2 (col4 DESC,col3 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col4 DESC,col3);
+CREATE INDEX idx_tab2_2 ON tab2 (col4);
+CREATE INDEX idx_tab2_2 ON tab2 (col4,col0 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col4,col0,col3);
+CREATE INDEX idx_tab2_2 ON tab2 (col4,col1 DESC,col0);
+CREATE INDEX idx_tab2_2 ON tab2 (col4,col1);
+CREATE INDEX idx_tab2_2 ON tab2 (col4,col3 DESC);
+CREATE INDEX idx_tab2_2 ON tab2 (col4,col3);
+CREATE INDEX idx_tab2_3 ON tab2 (col0 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col0 DESC,col3);
+CREATE INDEX idx_tab2_3 ON tab2 (col0 DESC,col4 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col0 DESC,col4 DESC,col3);
+CREATE INDEX idx_tab2_3 ON tab2 (col0);
+CREATE INDEX idx_tab2_3 ON tab2 (col0,col1 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col0,col1);
+CREATE INDEX idx_tab2_3 ON tab2 (col0,col3 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col0,col4 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col0,col4 DESC,col1 DESC,col3);
+CREATE INDEX idx_tab2_3 ON tab2 (col0,col4 DESC,col3);
+CREATE INDEX idx_tab2_3 ON tab2 (col0,col4);
+CREATE INDEX idx_tab2_3 ON tab2 (col1 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col1 DESC,col0,col3);
+CREATE INDEX idx_tab2_3 ON tab2 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col1 DESC,col3);
+CREATE INDEX idx_tab2_3 ON tab2 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col1 DESC,col4 DESC,col0);
+CREATE INDEX idx_tab2_3 ON tab2 (col1 DESC,col4 DESC,col3);
+CREATE INDEX idx_tab2_3 ON tab2 (col1);
+CREATE INDEX idx_tab2_3 ON tab2 (col1,col0 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col1,col3 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col1,col3);
+CREATE INDEX idx_tab2_3 ON tab2 (col1,col4 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col1,col4);
+CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC,col0 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC,col0 DESC,col4);
+CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC,col0);
+CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC,col1);
+CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC,col1,col4);
+CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC,col4,col1);
+CREATE INDEX idx_tab2_3 ON tab2 (col3);
+CREATE INDEX idx_tab2_3 ON tab2 (col3,col0 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col3,col0);
+CREATE INDEX idx_tab2_3 ON tab2 (col3,col1 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col3,col1 DESC,col4 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col3,col1);
+CREATE INDEX idx_tab2_3 ON tab2 (col3,col4 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col3,col4);
+CREATE INDEX idx_tab2_3 ON tab2 (col4 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col4 DESC,col0 DESC,col1 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col4 DESC,col0 DESC,col3 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col4 DESC,col0,col1 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col4 DESC,col1 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col4 DESC,col1);
+CREATE INDEX idx_tab2_3 ON tab2 (col4 DESC,col3,col0);
+CREATE INDEX idx_tab2_3 ON tab2 (col4);
+CREATE INDEX idx_tab2_3 ON tab2 (col4,col0 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col4,col1 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col4,col3 DESC);
+CREATE INDEX idx_tab2_3 ON tab2 (col4,col3 DESC,col0 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col0 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col0 DESC,col1 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col0 DESC,col1);
+CREATE INDEX idx_tab2_4 ON tab2 (col0 DESC,col3 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col0 DESC,col4 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col0 DESC,col4 DESC,col3);
+CREATE INDEX idx_tab2_4 ON tab2 (col0 DESC,col4);
+CREATE INDEX idx_tab2_4 ON tab2 (col0);
+CREATE INDEX idx_tab2_4 ON tab2 (col0,col3 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col0,col3);
+CREATE INDEX idx_tab2_4 ON tab2 (col0,col4 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col1 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col1);
+CREATE INDEX idx_tab2_4 ON tab2 (col1,col0 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col1,col4 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col1,col4);
+CREATE INDEX idx_tab2_4 ON tab2 (col3 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col3 DESC,col0 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col3 DESC,col1);
+CREATE INDEX idx_tab2_4 ON tab2 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col3 DESC,col4);
+CREATE INDEX idx_tab2_4 ON tab2 (col3);
+CREATE INDEX idx_tab2_4 ON tab2 (col3,col0);
+CREATE INDEX idx_tab2_4 ON tab2 (col3,col1);
+CREATE INDEX idx_tab2_4 ON tab2 (col3,col4 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col3,col4);
+CREATE INDEX idx_tab2_4 ON tab2 (col4 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col4 DESC,col0 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col4 DESC,col1);
+CREATE INDEX idx_tab2_4 ON tab2 (col4 DESC,col3 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col4 DESC,col3 DESC,col1 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col4 DESC,col3);
+CREATE INDEX idx_tab2_4 ON tab2 (col4 DESC,col3,col0 DESC,col1 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col4);
+CREATE INDEX idx_tab2_4 ON tab2 (col4,col0 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col4,col0);
+CREATE INDEX idx_tab2_4 ON tab2 (col4,col1 DESC);
+CREATE INDEX idx_tab2_4 ON tab2 (col4,col1);
+CREATE INDEX idx_tab2_4 ON tab2 (col4,col3);
+CREATE INDEX idx_tab2_5 ON tab2 (col0 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col0 DESC,col3 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col0 DESC,col3 DESC,col1 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col0 DESC,col3);
+CREATE INDEX idx_tab2_5 ON tab2 (col0);
+CREATE INDEX idx_tab2_5 ON tab2 (col0,col1 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col0,col1);
+CREATE INDEX idx_tab2_5 ON tab2 (col0,col4 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col1 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col1 DESC,col3);
+CREATE INDEX idx_tab2_5 ON tab2 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col1 DESC,col4);
+CREATE INDEX idx_tab2_5 ON tab2 (col1);
+CREATE INDEX idx_tab2_5 ON tab2 (col1,col0);
+CREATE INDEX idx_tab2_5 ON tab2 (col1,col3 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col1,col3);
+CREATE INDEX idx_tab2_5 ON tab2 (col1,col4 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col3 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col3 DESC,col0 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col3 DESC,col1 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col3 DESC,col1);
+CREATE INDEX idx_tab2_5 ON tab2 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col3 DESC,col4,col1 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col3);
+CREATE INDEX idx_tab2_5 ON tab2 (col3,col0);
+CREATE INDEX idx_tab2_5 ON tab2 (col4 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col4 DESC,col0 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col4 DESC,col1);
+CREATE INDEX idx_tab2_5 ON tab2 (col4 DESC,col3 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col4 DESC,col3);
+CREATE INDEX idx_tab2_5 ON tab2 (col4);
+CREATE INDEX idx_tab2_5 ON tab2 (col4,col0 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col4,col0);
+CREATE INDEX idx_tab2_5 ON tab2 (col4,col0,col1 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col4,col1 DESC);
+CREATE INDEX idx_tab2_5 ON tab2 (col4,col1);
+CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col1 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col1 DESC,col4);
+CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col1);
+CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col1,col3);
+CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col1,col4);
+CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col3 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col3);
+CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col3,col1 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col4 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col4 DESC,col1);
+CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col4 DESC,col3);
+CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col4);
+CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col4,col1);
+CREATE INDEX idx_tab3_0 ON tab3 (col0);
+CREATE INDEX idx_tab3_0 ON tab3 (col0,col1 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col0,col1 DESC,col3 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col0,col1);
+CREATE INDEX idx_tab3_0 ON tab3 (col0,col3 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col0,col3);
+CREATE INDEX idx_tab3_0 ON tab3 (col0,col4 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col0,col4 DESC,col1 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col0,col4 DESC,col3);
+CREATE INDEX idx_tab3_0 ON tab3 (col0,col4);
+CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col0 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col0 DESC,col4);
+CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col3);
+CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col3,col4 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col4 DESC,col3 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col4);
+CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col4,col3 DESC,col0 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col1);
+CREATE INDEX idx_tab3_0 ON tab3 (col1,col0 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col1,col0);
+CREATE INDEX idx_tab3_0 ON tab3 (col1,col0,col3);
+CREATE INDEX idx_tab3_0 ON tab3 (col1,col3 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col1,col3 DESC,col0 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col1,col3 DESC,col4 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col1,col4 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col1,col4 DESC,col0 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col1,col4);
+CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col0 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col0 DESC,col1 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col0);
+CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col1 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col1);
+CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col1,col4);
+CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col4);
+CREATE INDEX idx_tab3_0 ON tab3 (col3);
+CREATE INDEX idx_tab3_0 ON tab3 (col3,col0 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col3,col0);
+CREATE INDEX idx_tab3_0 ON tab3 (col3,col1 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col3,col1 DESC,col4);
+CREATE INDEX idx_tab3_0 ON tab3 (col3,col1);
+CREATE INDEX idx_tab3_0 ON tab3 (col3,col1,col0 DESC,col4);
+CREATE INDEX idx_tab3_0 ON tab3 (col3,col4 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col3,col4);
+CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col0 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col0);
+CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col0,col3);
+CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col1 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col1);
+CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col1,col3);
+CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col3 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col3);
+CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col3,col1 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col4);
+CREATE INDEX idx_tab3_0 ON tab3 (col4,col0 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col4,col0);
+CREATE INDEX idx_tab3_0 ON tab3 (col4,col1 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col4,col1 DESC,col0);
+CREATE INDEX idx_tab3_0 ON tab3 (col4,col1 DESC,col3 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col4,col1);
+CREATE INDEX idx_tab3_0 ON tab3 (col4,col3 DESC);
+CREATE INDEX idx_tab3_0 ON tab3 (col4,col3);
+CREATE INDEX idx_tab3_0 ON tab3 (col4,col3,col1,col0);
+CREATE INDEX idx_tab3_1 ON tab3 (col0 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col0 DESC,col1 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col0 DESC,col1);
+CREATE INDEX idx_tab3_1 ON tab3 (col0 DESC,col3);
+CREATE INDEX idx_tab3_1 ON tab3 (col0 DESC,col3,col1);
+CREATE INDEX idx_tab3_1 ON tab3 (col0 DESC,col4 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col0 DESC,col4);
+CREATE INDEX idx_tab3_1 ON tab3 (col0);
+CREATE INDEX idx_tab3_1 ON tab3 (col0,col1 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col0,col1);
+CREATE INDEX idx_tab3_1 ON tab3 (col0,col1,col3 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col0,col3 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col0,col3);
+CREATE INDEX idx_tab3_1 ON tab3 (col0,col4 DESC,col1);
+CREATE INDEX idx_tab3_1 ON tab3 (col0,col4);
+CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col0 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col0);
+CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col0,col3 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col3);
+CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col3,col4 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col4);
+CREATE INDEX idx_tab3_1 ON tab3 (col1);
+CREATE INDEX idx_tab3_1 ON tab3 (col1,col0 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col1,col0 DESC,col4);
+CREATE INDEX idx_tab3_1 ON tab3 (col1,col0);
+CREATE INDEX idx_tab3_1 ON tab3 (col1,col3 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col1,col3 DESC,col0);
+CREATE INDEX idx_tab3_1 ON tab3 (col1,col3);
+CREATE INDEX idx_tab3_1 ON tab3 (col1,col4 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col1,col4 DESC,col3 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col3 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col3 DESC,col0 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col3 DESC,col0 DESC,col4);
+CREATE INDEX idx_tab3_1 ON tab3 (col3 DESC,col0);
+CREATE INDEX idx_tab3_1 ON tab3 (col3 DESC,col0,col1);
+CREATE INDEX idx_tab3_1 ON tab3 (col3 DESC,col1);
+CREATE INDEX idx_tab3_1 ON tab3 (col3 DESC,col4);
+CREATE INDEX idx_tab3_1 ON tab3 (col3);
+CREATE INDEX idx_tab3_1 ON tab3 (col3,col0 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col3,col0 DESC,col4);
+CREATE INDEX idx_tab3_1 ON tab3 (col3,col0,col4 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col3,col1 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col3,col1 DESC,col0 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col3,col1);
+CREATE INDEX idx_tab3_1 ON tab3 (col3,col4 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col3,col4);
+CREATE INDEX idx_tab3_1 ON tab3 (col4 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col4 DESC,col0 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col4 DESC,col1 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col4 DESC,col1 DESC,col3 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col4 DESC,col1);
+CREATE INDEX idx_tab3_1 ON tab3 (col4 DESC,col1,col0 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col4 DESC,col3 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col4);
+CREATE INDEX idx_tab3_1 ON tab3 (col4,col0 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col4,col0);
+CREATE INDEX idx_tab3_1 ON tab3 (col4,col1 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col4,col1);
+CREATE INDEX idx_tab3_1 ON tab3 (col4,col1,col0);
+CREATE INDEX idx_tab3_1 ON tab3 (col4,col3 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col4,col3 DESC,col0 DESC);
+CREATE INDEX idx_tab3_1 ON tab3 (col4,col3);
+CREATE INDEX idx_tab3_2 ON tab3 (col0 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col0 DESC,col1 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col0 DESC,col3 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col0 DESC,col3);
+CREATE INDEX idx_tab3_2 ON tab3 (col0 DESC,col4 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col0 DESC,col4);
+CREATE INDEX idx_tab3_2 ON tab3 (col0);
+CREATE INDEX idx_tab3_2 ON tab3 (col0,col1 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col0,col1);
+CREATE INDEX idx_tab3_2 ON tab3 (col0,col3 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col0,col3);
+CREATE INDEX idx_tab3_2 ON tab3 (col0,col4 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col0,col4);
+CREATE INDEX idx_tab3_2 ON tab3 (col1 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col1 DESC,col0 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col1 DESC,col0);
+CREATE INDEX idx_tab3_2 ON tab3 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col1 DESC,col3);
+CREATE INDEX idx_tab3_2 ON tab3 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col1 DESC,col4);
+CREATE INDEX idx_tab3_2 ON tab3 (col1);
+CREATE INDEX idx_tab3_2 ON tab3 (col1,col0 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col1,col0);
+CREATE INDEX idx_tab3_2 ON tab3 (col1,col3 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col1,col3);
+CREATE INDEX idx_tab3_2 ON tab3 (col1,col4);
+CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col0);
+CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col1 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col1);
+CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col1,col0 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col1,col4 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col4 DESC,col0 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col4 DESC,col0);
+CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col4);
+CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col4,col0);
+CREATE INDEX idx_tab3_2 ON tab3 (col3);
+CREATE INDEX idx_tab3_2 ON tab3 (col3,col0 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col3,col1 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col3,col1);
+CREATE INDEX idx_tab3_2 ON tab3 (col3,col4 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col3,col4);
+CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col0 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col0 DESC,col3);
+CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col0);
+CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col0,col3 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col1);
+CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col3 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col3 DESC,col0 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col3,col0);
+CREATE INDEX idx_tab3_2 ON tab3 (col4);
+CREATE INDEX idx_tab3_2 ON tab3 (col4,col0 DESC,col1);
+CREATE INDEX idx_tab3_2 ON tab3 (col4,col0);
+CREATE INDEX idx_tab3_2 ON tab3 (col4,col1 DESC);
+CREATE INDEX idx_tab3_2 ON tab3 (col4,col1);
+CREATE INDEX idx_tab3_2 ON tab3 (col4,col3 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col0 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col0 DESC,col1 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col0 DESC,col3 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col0 DESC,col3);
+CREATE INDEX idx_tab3_3 ON tab3 (col0 DESC,col3,col4);
+CREATE INDEX idx_tab3_3 ON tab3 (col0 DESC,col4);
+CREATE INDEX idx_tab3_3 ON tab3 (col0);
+CREATE INDEX idx_tab3_3 ON tab3 (col0,col1 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col0,col1);
+CREATE INDEX idx_tab3_3 ON tab3 (col0,col3 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col0,col3);
+CREATE INDEX idx_tab3_3 ON tab3 (col0,col4 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col1 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col1 DESC,col0 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col1 DESC,col3);
+CREATE INDEX idx_tab3_3 ON tab3 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col1 DESC,col4);
+CREATE INDEX idx_tab3_3 ON tab3 (col1);
+CREATE INDEX idx_tab3_3 ON tab3 (col1,col0);
+CREATE INDEX idx_tab3_3 ON tab3 (col1,col3 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col1,col4 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col1,col4 DESC,col0 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col1,col4);
+CREATE INDEX idx_tab3_3 ON tab3 (col3 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col3 DESC,col0);
+CREATE INDEX idx_tab3_3 ON tab3 (col3 DESC,col1 DESC,col4);
+CREATE INDEX idx_tab3_3 ON tab3 (col3 DESC,col1);
+CREATE INDEX idx_tab3_3 ON tab3 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col3 DESC,col4 DESC,col1 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col3 DESC,col4);
+CREATE INDEX idx_tab3_3 ON tab3 (col3);
+CREATE INDEX idx_tab3_3 ON tab3 (col3,col0 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col3,col0);
+CREATE INDEX idx_tab3_3 ON tab3 (col3,col0,col4 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col3,col4,col1 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col4 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col4 DESC,col0);
+CREATE INDEX idx_tab3_3 ON tab3 (col4 DESC,col1 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col4 DESC,col3);
+CREATE INDEX idx_tab3_3 ON tab3 (col4);
+CREATE INDEX idx_tab3_3 ON tab3 (col4,col0 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col4,col0);
+CREATE INDEX idx_tab3_3 ON tab3 (col4,col1 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col4,col1);
+CREATE INDEX idx_tab3_3 ON tab3 (col4,col3 DESC);
+CREATE INDEX idx_tab3_3 ON tab3 (col4,col3);
+CREATE INDEX idx_tab3_4 ON tab3 (col0 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col0 DESC,col1);
+CREATE INDEX idx_tab3_4 ON tab3 (col0 DESC,col3 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col0 DESC,col3);
+CREATE INDEX idx_tab3_4 ON tab3 (col0 DESC,col4);
+CREATE INDEX idx_tab3_4 ON tab3 (col0);
+CREATE INDEX idx_tab3_4 ON tab3 (col0,col1 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col0,col1);
+CREATE INDEX idx_tab3_4 ON tab3 (col0,col3);
+CREATE INDEX idx_tab3_4 ON tab3 (col0,col4 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col0,col4);
+CREATE INDEX idx_tab3_4 ON tab3 (col1 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col1 DESC,col4 DESC,col0 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col1 DESC,col4 DESC,col0);
+CREATE INDEX idx_tab3_4 ON tab3 (col1 DESC,col4);
+CREATE INDEX idx_tab3_4 ON tab3 (col1 DESC,col4,col0 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col1);
+CREATE INDEX idx_tab3_4 ON tab3 (col1,col0);
+CREATE INDEX idx_tab3_4 ON tab3 (col1,col4 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col3 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col3 DESC,col1);
+CREATE INDEX idx_tab3_4 ON tab3 (col3 DESC,col1,col0 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col3 DESC,col4,col0 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col3);
+CREATE INDEX idx_tab3_4 ON tab3 (col3,col1);
+CREATE INDEX idx_tab3_4 ON tab3 (col3,col4 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col4 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col4 DESC,col0 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col4 DESC,col0 DESC,col1);
+CREATE INDEX idx_tab3_4 ON tab3 (col4 DESC,col0);
+CREATE INDEX idx_tab3_4 ON tab3 (col4);
+CREATE INDEX idx_tab3_4 ON tab3 (col4,col0);
+CREATE INDEX idx_tab3_4 ON tab3 (col4,col0,col1);
+CREATE INDEX idx_tab3_4 ON tab3 (col4,col1 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col4,col1);
+CREATE INDEX idx_tab3_4 ON tab3 (col4,col1,col0 DESC);
+CREATE INDEX idx_tab3_4 ON tab3 (col4,col3);
+CREATE INDEX idx_tab3_5 ON tab3 (col0 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col0 DESC,col1);
+CREATE INDEX idx_tab3_5 ON tab3 (col0 DESC,col3);
+CREATE INDEX idx_tab3_5 ON tab3 (col0 DESC,col4 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col0);
+CREATE INDEX idx_tab3_5 ON tab3 (col0,col1 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col0,col1);
+CREATE INDEX idx_tab3_5 ON tab3 (col0,col1,col3);
+CREATE INDEX idx_tab3_5 ON tab3 (col0,col3 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col0,col4);
+CREATE INDEX idx_tab3_5 ON tab3 (col1 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col1 DESC,col0 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col1 DESC,col3);
+CREATE INDEX idx_tab3_5 ON tab3 (col1);
+CREATE INDEX idx_tab3_5 ON tab3 (col1,col0);
+CREATE INDEX idx_tab3_5 ON tab3 (col1,col3 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col1,col4 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col1,col4);
+CREATE INDEX idx_tab3_5 ON tab3 (col3 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col3 DESC,col0 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col3 DESC,col1 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col3 DESC,col1);
+CREATE INDEX idx_tab3_5 ON tab3 (col3 DESC,col1,col0);
+CREATE INDEX idx_tab3_5 ON tab3 (col3 DESC,col4);
+CREATE INDEX idx_tab3_5 ON tab3 (col3);
+CREATE INDEX idx_tab3_5 ON tab3 (col3,col0 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col3,col0 DESC,col4);
+CREATE INDEX idx_tab3_5 ON tab3 (col3,col0);
+CREATE INDEX idx_tab3_5 ON tab3 (col3,col1 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col3,col4);
+CREATE INDEX idx_tab3_5 ON tab3 (col4 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col4 DESC,col0 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col4 DESC,col0 DESC,col1 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col4 DESC,col1 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col4 DESC,col3 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col4);
+CREATE INDEX idx_tab3_5 ON tab3 (col4,col0 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col4,col0);
+CREATE INDEX idx_tab3_5 ON tab3 (col4,col1);
+CREATE INDEX idx_tab3_5 ON tab3 (col4,col3 DESC);
+CREATE INDEX idx_tab3_5 ON tab3 (col4,col3);
+CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col1 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col1 DESC,col4 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col1);
+CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col1,col4 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col1,col4);
+CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col3 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col3);
+CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col3,col4 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col4 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col4 DESC,col1 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col4 DESC,col1);
+CREATE INDEX idx_tab4_0 ON tab4 (col0);
+CREATE INDEX idx_tab4_0 ON tab4 (col0,col1);
+CREATE INDEX idx_tab4_0 ON tab4 (col0,col1,col4 DESC,col3);
+CREATE INDEX idx_tab4_0 ON tab4 (col0,col3 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col0,col3);
+CREATE INDEX idx_tab4_0 ON tab4 (col0,col3,col1);
+CREATE INDEX idx_tab4_0 ON tab4 (col0,col3,col4);
+CREATE INDEX idx_tab4_0 ON tab4 (col0,col4 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col0,col4);
+CREATE INDEX idx_tab4_0 ON tab4 (col0,col4,col1,col3 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col0 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col0);
+CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col3);
+CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col3,col4 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col4 DESC,col0 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col4 DESC,col0);
+CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col4);
+CREATE INDEX idx_tab4_0 ON tab4 (col1);
+CREATE INDEX idx_tab4_0 ON tab4 (col1,col0 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col1,col0 DESC,col4 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col1,col0);
+CREATE INDEX idx_tab4_0 ON tab4 (col1,col0,col3);
+CREATE INDEX idx_tab4_0 ON tab4 (col1,col3 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col1,col3 DESC,col0);
+CREATE INDEX idx_tab4_0 ON tab4 (col1,col3);
+CREATE INDEX idx_tab4_0 ON tab4 (col1,col4 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col1,col4);
+CREATE INDEX idx_tab4_0 ON tab4 (col1,col4,col0 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col0 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col0);
+CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col1 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col1 DESC,col0);
+CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col1);
+CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col4 DESC,col0);
+CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col4);
+CREATE INDEX idx_tab4_0 ON tab4 (col3);
+CREATE INDEX idx_tab4_0 ON tab4 (col3,col0 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col3,col0 DESC,col1,col4 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col3,col0);
+CREATE INDEX idx_tab4_0 ON tab4 (col3,col0,col4 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col3,col1 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col3,col1 DESC,col4);
+CREATE INDEX idx_tab4_0 ON tab4 (col3,col1);
+CREATE INDEX idx_tab4_0 ON tab4 (col3,col4 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col3,col4);
+CREATE INDEX idx_tab4_0 ON tab4 (col3,col4,col1);
+CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col0 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col0);
+CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col0,col1 DESC,col3);
+CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col1 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col1 DESC,col3 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col1 DESC,col3);
+CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col1);
+CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col3 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col3 DESC,col1 DESC,col0 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col3);
+CREATE INDEX idx_tab4_0 ON tab4 (col4);
+CREATE INDEX idx_tab4_0 ON tab4 (col4,col0 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col4,col0);
+CREATE INDEX idx_tab4_0 ON tab4 (col4,col0,col1);
+CREATE INDEX idx_tab4_0 ON tab4 (col4,col1 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col4,col1);
+CREATE INDEX idx_tab4_0 ON tab4 (col4,col3 DESC);
+CREATE INDEX idx_tab4_0 ON tab4 (col4,col3);
+CREATE INDEX idx_tab4_1 ON tab4 (col0 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col0 DESC,col1);
+CREATE INDEX idx_tab4_1 ON tab4 (col0 DESC,col3 DESC,col1 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col0 DESC,col3);
+CREATE INDEX idx_tab4_1 ON tab4 (col0 DESC,col4 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col0 DESC,col4);
+CREATE INDEX idx_tab4_1 ON tab4 (col0);
+CREATE INDEX idx_tab4_1 ON tab4 (col0,col1 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col0,col1);
+CREATE INDEX idx_tab4_1 ON tab4 (col0,col1,col4 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col0,col3 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col0,col3);
+CREATE INDEX idx_tab4_1 ON tab4 (col0,col3,col4 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col0,col4 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col1 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col1 DESC,col0 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col1 DESC,col0);
+CREATE INDEX idx_tab4_1 ON tab4 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col1 DESC,col3 DESC,col0 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col1 DESC,col4);
+CREATE INDEX idx_tab4_1 ON tab4 (col1);
+CREATE INDEX idx_tab4_1 ON tab4 (col1,col0 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col1,col0);
+CREATE INDEX idx_tab4_1 ON tab4 (col1,col3 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col1,col3);
+CREATE INDEX idx_tab4_1 ON tab4 (col1,col3,col4 DESC,col0);
+CREATE INDEX idx_tab4_1 ON tab4 (col1,col4 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col1,col4 DESC,col0);
+CREATE INDEX idx_tab4_1 ON tab4 (col1,col4);
+CREATE INDEX idx_tab4_1 ON tab4 (col1,col4,col3 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC,col0 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC,col0);
+CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC,col1 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC,col1);
+CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC,col4);
+CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC,col4,col1);
+CREATE INDEX idx_tab4_1 ON tab4 (col3);
+CREATE INDEX idx_tab4_1 ON tab4 (col3,col0 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col3,col0 DESC,col1 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col3,col0);
+CREATE INDEX idx_tab4_1 ON tab4 (col3,col1 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col3,col1 DESC,col0 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col3,col1 DESC,col0);
+CREATE INDEX idx_tab4_1 ON tab4 (col3,col4 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col3,col4);
+CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC,col0 DESC,col3 DESC,col1);
+CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC,col0);
+CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC,col0,col3 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC,col1 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC,col1);
+CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC,col3 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC,col3 DESC,col1 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col4);
+CREATE INDEX idx_tab4_1 ON tab4 (col4,col0 DESC,col3 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col4,col1 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col4,col1);
+CREATE INDEX idx_tab4_1 ON tab4 (col4,col3 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col4,col3 DESC,col0,col1 DESC);
+CREATE INDEX idx_tab4_1 ON tab4 (col4,col3);
+CREATE INDEX idx_tab4_2 ON tab4 (col0 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col0 DESC,col3);
+CREATE INDEX idx_tab4_2 ON tab4 (col0 DESC,col4 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col0 DESC,col4);
+CREATE INDEX idx_tab4_2 ON tab4 (col0);
+CREATE INDEX idx_tab4_2 ON tab4 (col0,col3 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col0,col3);
+CREATE INDEX idx_tab4_2 ON tab4 (col0,col4 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col0,col4);
+CREATE INDEX idx_tab4_2 ON tab4 (col0,col4,col1 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col1 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col1 DESC,col0 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col1 DESC,col3);
+CREATE INDEX idx_tab4_2 ON tab4 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col1 DESC,col4 DESC,col0 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col1);
+CREATE INDEX idx_tab4_2 ON tab4 (col1,col0 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col1,col0,col3);
+CREATE INDEX idx_tab4_2 ON tab4 (col1,col0,col4 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col1,col3 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col1,col3);
+CREATE INDEX idx_tab4_2 ON tab4 (col1,col4 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col1,col4);
+CREATE INDEX idx_tab4_2 ON tab4 (col3 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col3 DESC,col0 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col3 DESC,col0);
+CREATE INDEX idx_tab4_2 ON tab4 (col3 DESC,col1 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col3 DESC,col1);
+CREATE INDEX idx_tab4_2 ON tab4 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col3 DESC,col4);
+CREATE INDEX idx_tab4_2 ON tab4 (col3);
+CREATE INDEX idx_tab4_2 ON tab4 (col3,col0 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col3,col0);
+CREATE INDEX idx_tab4_2 ON tab4 (col3,col1 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col3,col4);
+CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC,col0 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC,col0 DESC,col3 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC,col1 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC,col1);
+CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC,col3 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC,col3);
+CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC,col3,col0 DESC);
+CREATE INDEX idx_tab4_2 ON tab4 (col4);
+CREATE INDEX idx_tab4_2 ON tab4 (col4,col0,col1);
+CREATE INDEX idx_tab4_2 ON tab4 (col4,col0,col3);
+CREATE INDEX idx_tab4_2 ON tab4 (col4,col1);
+CREATE INDEX idx_tab4_2 ON tab4 (col4,col3 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col0 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col0 DESC,col1 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col0 DESC,col1);
+CREATE INDEX idx_tab4_3 ON tab4 (col0 DESC,col3 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col0 DESC,col3);
+CREATE INDEX idx_tab4_3 ON tab4 (col0);
+CREATE INDEX idx_tab4_3 ON tab4 (col0,col1 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col0,col1);
+CREATE INDEX idx_tab4_3 ON tab4 (col0,col3 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col0,col3,col4 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col0,col4 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col1 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col1 DESC,col0 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col1 DESC,col0);
+CREATE INDEX idx_tab4_3 ON tab4 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col1 DESC,col3);
+CREATE INDEX idx_tab4_3 ON tab4 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col1 DESC,col4);
+CREATE INDEX idx_tab4_3 ON tab4 (col1);
+CREATE INDEX idx_tab4_3 ON tab4 (col1,col0);
+CREATE INDEX idx_tab4_3 ON tab4 (col1,col4);
+CREATE INDEX idx_tab4_3 ON tab4 (col3 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col3 DESC,col1);
+CREATE INDEX idx_tab4_3 ON tab4 (col3 DESC,col1,col0);
+CREATE INDEX idx_tab4_3 ON tab4 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col3);
+CREATE INDEX idx_tab4_3 ON tab4 (col3,col0 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col3,col0);
+CREATE INDEX idx_tab4_3 ON tab4 (col3,col1 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col3,col1 DESC,col4 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col3,col1);
+CREATE INDEX idx_tab4_3 ON tab4 (col3,col4);
+CREATE INDEX idx_tab4_3 ON tab4 (col4 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col4 DESC,col0);
+CREATE INDEX idx_tab4_3 ON tab4 (col4 DESC,col1 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col4 DESC,col1);
+CREATE INDEX idx_tab4_3 ON tab4 (col4 DESC,col3);
+CREATE INDEX idx_tab4_3 ON tab4 (col4 DESC,col3,col1 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col4 DESC,col3,col1);
+CREATE INDEX idx_tab4_3 ON tab4 (col4);
+CREATE INDEX idx_tab4_3 ON tab4 (col4,col0 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col4,col0);
+CREATE INDEX idx_tab4_3 ON tab4 (col4,col1 DESC);
+CREATE INDEX idx_tab4_3 ON tab4 (col4,col1);
+CREATE INDEX idx_tab4_3 ON tab4 (col4,col3);
+CREATE INDEX idx_tab4_4 ON tab4 (col0 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col0 DESC,col1 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col0 DESC,col1);
+CREATE INDEX idx_tab4_4 ON tab4 (col0 DESC,col3 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col0 DESC,col3);
+CREATE INDEX idx_tab4_4 ON tab4 (col0);
+CREATE INDEX idx_tab4_4 ON tab4 (col0,col1);
+CREATE INDEX idx_tab4_4 ON tab4 (col0,col1,col3 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col0,col3);
+CREATE INDEX idx_tab4_4 ON tab4 (col0,col4);
+CREATE INDEX idx_tab4_4 ON tab4 (col1 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col1 DESC,col0 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col1 DESC,col0);
+CREATE INDEX idx_tab4_4 ON tab4 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col1);
+CREATE INDEX idx_tab4_4 ON tab4 (col1,col0 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col1,col3);
+CREATE INDEX idx_tab4_4 ON tab4 (col1,col4 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col3 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col3 DESC,col0);
+CREATE INDEX idx_tab4_4 ON tab4 (col3 DESC,col1 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col3 DESC,col1);
+CREATE INDEX idx_tab4_4 ON tab4 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col3 DESC,col4 DESC,col0 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col3);
+CREATE INDEX idx_tab4_4 ON tab4 (col3,col0);
+CREATE INDEX idx_tab4_4 ON tab4 (col3,col1 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col3,col4 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col0);
+CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col1 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col1);
+CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col3 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col3 DESC,col0 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col3);
+CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col3,col0 DESC,col1 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col3,col1);
+CREATE INDEX idx_tab4_4 ON tab4 (col4);
+CREATE INDEX idx_tab4_4 ON tab4 (col4,col0);
+CREATE INDEX idx_tab4_4 ON tab4 (col4,col1);
+CREATE INDEX idx_tab4_4 ON tab4 (col4,col3 DESC);
+CREATE INDEX idx_tab4_4 ON tab4 (col4,col3 DESC,col0);
+CREATE INDEX idx_tab4_5 ON tab4 (col0 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col0 DESC,col1 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col0 DESC,col1);
+CREATE INDEX idx_tab4_5 ON tab4 (col0 DESC,col3 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col0 DESC,col3 DESC,col1 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col0 DESC,col4 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col0 DESC,col4 DESC,col3);
+CREATE INDEX idx_tab4_5 ON tab4 (col0);
+CREATE INDEX idx_tab4_5 ON tab4 (col0,col3 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col0,col3);
+CREATE INDEX idx_tab4_5 ON tab4 (col0,col4 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col1 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col1 DESC,col0 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col1 DESC,col0);
+CREATE INDEX idx_tab4_5 ON tab4 (col1 DESC,col3 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col1 DESC,col3);
+CREATE INDEX idx_tab4_5 ON tab4 (col1 DESC,col4 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col1);
+CREATE INDEX idx_tab4_5 ON tab4 (col1,col0 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col1,col3 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col1,col4);
+CREATE INDEX idx_tab4_5 ON tab4 (col3 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col3 DESC,col1 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col3 DESC,col1 DESC,col4 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col3 DESC,col1);
+CREATE INDEX idx_tab4_5 ON tab4 (col3 DESC,col1,col4);
+CREATE INDEX idx_tab4_5 ON tab4 (col3 DESC,col4 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col3);
+CREATE INDEX idx_tab4_5 ON tab4 (col3,col0);
+CREATE INDEX idx_tab4_5 ON tab4 (col3,col1 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col3,col1);
+CREATE INDEX idx_tab4_5 ON tab4 (col3,col4 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col4 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col4 DESC,col1 DESC,col0 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col4 DESC,col3);
+CREATE INDEX idx_tab4_5 ON tab4 (col4);
+CREATE INDEX idx_tab4_5 ON tab4 (col4,col0 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col4,col1 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col4,col1 DESC,col3);
+CREATE INDEX idx_tab4_5 ON tab4 (col4,col3 DESC,col1 DESC);
+CREATE INDEX idx_tab4_5 ON tab4 (col4,col3);
\ No newline at end of file
diff --git a/tests/queries/0_stateless/02814_age_datediff.reference b/tests/queries/0_stateless/02814_age_datediff.reference
new file mode 100644
index 00000000000..cbcb8c8a7b6
--- /dev/null
+++ b/tests/queries/0_stateless/02814_age_datediff.reference
@@ -0,0 +1,130 @@
+-- { echo }
+
+-- DateTime64 vs DateTime64 with fractional part
+SELECT age('microsecond', toDateTime64('2015-08-18 20:30:36.100200005', 9, 'UTC'), toDateTime64('2015-08-18 20:30:41.200400005', 9, 'UTC'));
+5100200
+SELECT age('microsecond', toDateTime64('2015-08-18 20:30:36.100200005', 9, 'UTC'), toDateTime64('2015-08-18 20:30:41.200400004', 9, 'UTC'));
+5100200
+SELECT age('millisecond', toDateTime64('2015-08-18 20:30:36.450299', 6, 'UTC'), toDateTime64('2015-08-18 20:30:41.550299', 6, 'UTC'));
+5100
+SELECT age('millisecond', toDateTime64('2015-08-18 20:30:36.450299', 6, 'UTC'), toDateTime64('2015-08-18 20:30:41.550298', 6, 'UTC'));
+5099
+SELECT age('second', toDateTime64('2023-03-01 19:18:36.999003', 6, 'UTC'), toDateTime64('2023-03-01 19:18:41.999002', 6, 'UTC'));
+4
+SELECT age('second', toDateTime64('2023-03-01 19:18:36.999', 3, 'UTC'), toDateTime64('2023-03-01 19:18:41.001', 3, 'UTC'));
+4
+SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 20:35:36.300', 3, 'UTC'));
+5
+SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 20:35:36.100', 3, 'UTC'));
+4
+SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-01 20:35:36.200100', 6, 'UTC'));
+4
+SELECT age('hour', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
+3
+SELECT age('hour', toDateTime64('2015-01-01 20:31:36.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
+2
+SELECT age('hour', toDateTime64('2015-01-01 20:30:37.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
+2
+SELECT age('hour', toDateTime64('2015-01-01 20:30:36.300', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
+2
+SELECT age('hour', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-01 23:30:36.200100', 6, 'UTC'));
+2
+SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:36.200', 3, 'UTC'));
+3
+SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 19:30:36.200', 3, 'UTC'));
+2
+SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:28:36.200', 3, 'UTC'));
+2
+SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:35.200', 3, 'UTC'));
+2
+SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:36.199', 3, 'UTC'));
+2
+SELECT age('day', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-04 20:30:36.200100', 6, 'UTC'));
+2
+SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:36.200', 3, 'UTC'));
+2
+SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 19:30:36.200', 3, 'UTC'));
+1
+SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:29:36.200', 3, 'UTC'));
+1
+SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:35.200', 3, 'UTC'));
+1
+SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:36.100', 3, 'UTC'));
+1
+SELECT age('week', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-15 20:30:36.200100', 6, 'UTC'));
+1
+SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:36.200', 3, 'UTC'));
+16
+SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-01 20:30:36.200', 3, 'UTC'));
+15
+SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 19:30:36.200', 3, 'UTC'));
+15
+SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:29:36.200', 3, 'UTC'));
+15
+SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:35.200', 3, 'UTC'));
+15
+SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:36.100', 3, 'UTC'));
+15
+SELECT age('month', toDateTime64('2015-01-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2016-05-02 20:30:36.200100', 6, 'UTC'));
+15
+SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:36.200', 3, 'UTC'));
+5
+SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-01 20:30:36.200', 3, 'UTC'));
+4
+SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 19:30:36.200', 3, 'UTC'));
+4
+SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:29:36.200', 3, 'UTC'));
+4
+SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:35.200', 3, 'UTC'));
+4
+SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:36.100', 3, 'UTC'));
+4
+SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2016-04-02 20:30:36.200100', 6, 'UTC'));
+4
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:36.200', 3, 'UTC'));
+8
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-01-02 20:30:36.200', 3, 'UTC'));
+7
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-01 20:30:36.200', 3, 'UTC'));
+7
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 19:30:36.200', 3, 'UTC'));
+7
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:29:36.200', 3, 'UTC'));
+7
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:35.200', 3, 'UTC'));
+7
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:36.100', 3, 'UTC'));
+7
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2023-02-02 20:30:36.200100', 6, 'UTC'));
+7
+-- DateTime64 vs DateTime64 with negative time
+SELECT age('millisecond', toDateTime64('1969-12-31 23:59:58.001', 3, 'UTC'), toDateTime64('1970-01-01 00:00:00.350', 3, 'UTC'));
+2349
+SELECT age('second', toDateTime64('1969-12-31 23:59:58.001', 3, 'UTC'), toDateTime64('1970-01-01 00:00:00.35', 3, 'UTC'));
+2
+SELECT age('second', toDateTime64('1969-12-31 23:59:50.001', 3, 'UTC'), toDateTime64('1969-12-31 23:59:55.002', 3, 'UTC'));
+5
+SELECT age('second', toDateTime64('1969-12-31 23:59:50.003', 3, 'UTC'), toDateTime64('1969-12-31 23:59:55.002', 3, 'UTC'));
+4
+SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-02'::Date);
+86400000
+SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-03'::Date32);
+172800000
+SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-02 00:01:01'::DateTime);
+86461000
+SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-02 00:00:01.299'::DateTime64);
+86401299
+SELECT DATEDIFF(millisecond, '2021-01-01 23:59:59.299'::DateTime64, '2021-01-02'::Date);
+701
+SELECT DATEDIFF(millisecond, '2021-01-01 23:59:59.299999'::DateTime64(6), '2021-01-02'::Date);
+701
+SELECT DATEDIFF(millisecond, '2021-01-01 23:59:59.2'::DateTime64(1), '2021-01-02'::Date);
+800
+SELECT DATEDIFF(microsecond, '2021-01-01 23:59:59.899999'::DateTime64(6), '2021-01-02 00:01:00.100200300'::DateTime64(9));
+60200201
+SELECT DATEDIFF(microsecond, '1969-12-31 23:59:59.999950'::DateTime64(6, 'UTC'), '1970-01-01 00:00:00.000010'::DateTime64(6, 'UTC'));
+60
+SELECT DATEDIFF(second, '1969-12-31 23:59:59.123'::DateTime64(6, 'UTC'), '1970-01-01 00:00:09.123'::DateTime64(6, 'UTC'));
+10
+SELECT toYYYYMMDDhhmmss(toDateTime64('1969-12-31 23:59:59.900', 3));
+19691231235959
diff --git a/tests/queries/0_stateless/02814_age_datediff.sql b/tests/queries/0_stateless/02814_age_datediff.sql
new file mode 100644
index 00000000000..934a95c035f
--- /dev/null
+++ b/tests/queries/0_stateless/02814_age_datediff.sql
@@ -0,0 +1,80 @@
+-- { echo }
+
+-- DateTime64 vs DateTime64 with fractional part
+SELECT age('microsecond', toDateTime64('2015-08-18 20:30:36.100200005', 9, 'UTC'), toDateTime64('2015-08-18 20:30:41.200400005', 9, 'UTC'));
+SELECT age('microsecond', toDateTime64('2015-08-18 20:30:36.100200005', 9, 'UTC'), toDateTime64('2015-08-18 20:30:41.200400004', 9, 'UTC'));
+
+SELECT age('millisecond', toDateTime64('2015-08-18 20:30:36.450299', 6, 'UTC'), toDateTime64('2015-08-18 20:30:41.550299', 6, 'UTC'));
+SELECT age('millisecond', toDateTime64('2015-08-18 20:30:36.450299', 6, 'UTC'), toDateTime64('2015-08-18 20:30:41.550298', 6, 'UTC'));
+
+SELECT age('second', toDateTime64('2023-03-01 19:18:36.999003', 6, 'UTC'), toDateTime64('2023-03-01 19:18:41.999002', 6, 'UTC'));
+SELECT age('second', toDateTime64('2023-03-01 19:18:36.999', 3, 'UTC'), toDateTime64('2023-03-01 19:18:41.001', 3, 'UTC'));
+
+SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 20:35:36.300', 3, 'UTC'));
+SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 20:35:36.100', 3, 'UTC'));
+SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-01 20:35:36.200100', 6, 'UTC'));
+
+SELECT age('hour', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
+SELECT age('hour', toDateTime64('2015-01-01 20:31:36.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
+SELECT age('hour', toDateTime64('2015-01-01 20:30:37.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
+SELECT age('hour', toDateTime64('2015-01-01 20:30:36.300', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC'));
+SELECT age('hour', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-01 23:30:36.200100', 6, 'UTC'));
+
+SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:36.200', 3, 'UTC'));
+SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 19:30:36.200', 3, 'UTC'));
+SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:28:36.200', 3, 'UTC'));
+SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:35.200', 3, 'UTC'));
+SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:36.199', 3, 'UTC'));
+SELECT age('day', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-04 20:30:36.200100', 6, 'UTC'));
+
+SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:36.200', 3, 'UTC'));
+SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 19:30:36.200', 3, 'UTC'));
+SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:29:36.200', 3, 'UTC'));
+SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:35.200', 3, 'UTC'));
+SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:36.100', 3, 'UTC'));
+SELECT age('week', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-15 20:30:36.200100', 6, 'UTC'));
+
+SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:36.200', 3, 'UTC'));
+SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-01 20:30:36.200', 3, 'UTC'));
+SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 19:30:36.200', 3, 'UTC'));
+SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:29:36.200', 3, 'UTC'));
+SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:35.200', 3, 'UTC'));
+SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:36.100', 3, 'UTC'));
+SELECT age('month', toDateTime64('2015-01-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2016-05-02 20:30:36.200100', 6, 'UTC'));
+
+SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:36.200', 3, 'UTC'));
+SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-01 20:30:36.200', 3, 'UTC'));
+SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 19:30:36.200', 3, 'UTC'));
+SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:29:36.200', 3, 'UTC'));
+SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:35.200', 3, 'UTC'));
+SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:36.100', 3, 'UTC'));
+SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2016-04-02 20:30:36.200100', 6, 'UTC'));
+
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:36.200', 3, 'UTC'));
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-01-02 20:30:36.200', 3, 'UTC'));
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-01 20:30:36.200', 3, 'UTC'));
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 19:30:36.200', 3, 'UTC'));
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:29:36.200', 3, 'UTC'));
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:35.200', 3, 'UTC'));
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:36.100', 3, 'UTC'));
+SELECT age('year', toDateTime64('2015-02-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2023-02-02 20:30:36.200100', 6, 'UTC'));
+
+-- DateTime64 vs DateTime64 with negative time
+SELECT age('millisecond', toDateTime64('1969-12-31 23:59:58.001', 3, 'UTC'), toDateTime64('1970-01-01 00:00:00.350', 3, 'UTC'));
+SELECT age('second', toDateTime64('1969-12-31 23:59:58.001', 3, 'UTC'), toDateTime64('1970-01-01 00:00:00.35', 3, 'UTC'));
+SELECT age('second', toDateTime64('1969-12-31 23:59:50.001', 3, 'UTC'), toDateTime64('1969-12-31 23:59:55.002', 3, 'UTC'));
+SELECT age('second', toDateTime64('1969-12-31 23:59:50.003', 3, 'UTC'), toDateTime64('1969-12-31 23:59:55.002', 3, 'UTC'));
+
+SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-02'::Date);
+SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-03'::Date32);
+SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-02 00:01:01'::DateTime);
+SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-02 00:00:01.299'::DateTime64);
+SELECT DATEDIFF(millisecond, '2021-01-01 23:59:59.299'::DateTime64, '2021-01-02'::Date);
+SELECT DATEDIFF(millisecond, '2021-01-01 23:59:59.299999'::DateTime64(6), '2021-01-02'::Date);
+SELECT DATEDIFF(millisecond, '2021-01-01 23:59:59.2'::DateTime64(1), '2021-01-02'::Date);
+SELECT DATEDIFF(microsecond, '2021-01-01 23:59:59.899999'::DateTime64(6), '2021-01-02 00:01:00.100200300'::DateTime64(9));
+
+SELECT DATEDIFF(microsecond, '1969-12-31 23:59:59.999950'::DateTime64(6, 'UTC'), '1970-01-01 00:00:00.000010'::DateTime64(6, 'UTC'));
+SELECT DATEDIFF(second, '1969-12-31 23:59:59.123'::DateTime64(6, 'UTC'), '1970-01-01 00:00:09.123'::DateTime64(6, 'UTC'));
+
+SELECT toYYYYMMDDhhmmss(toDateTime64('1969-12-31 23:59:59.900', 3));
diff --git a/tests/queries/0_stateless/02815_first_line.reference b/tests/queries/0_stateless/02815_first_line.reference
new file mode 100644
index 00000000000..cdc86229cc8
--- /dev/null
+++ b/tests/queries/0_stateless/02815_first_line.reference
@@ -0,0 +1,9 @@
+foo
+foo
+foo
+foobarbaz
+== vector
+1 foo
+2 quux
+3 single line
+4 windows
diff --git a/tests/queries/0_stateless/02815_first_line.sql b/tests/queries/0_stateless/02815_first_line.sql
new file mode 100644
index 00000000000..8c0affaebd3
--- /dev/null
+++ b/tests/queries/0_stateless/02815_first_line.sql
@@ -0,0 +1,12 @@
+select firstLine('foo\nbar\nbaz');
+select firstLine('foo\rbar\rbaz');
+select firstLine('foo\r\nbar\r\nbaz');
+select firstLine('foobarbaz');
+
+select '== vector';
+
+drop table if exists 02815_first_line_vector;
+create table 02815_first_line_vector (n Int32, text String) engine = MergeTree order by n;
+
+insert into 02815_first_line_vector values (1, 'foo\nbar\nbaz'), (2, 'quux\n'), (3, 'single line'), (4, 'windows\r\nline breaks');
+select n, firstLine(text) from 02815_first_line_vector order by n;
diff --git a/tests/queries/0_stateless/02815_logical_error_cannot_get_column_name_of_set.reference b/tests/queries/0_stateless/02815_logical_error_cannot_get_column_name_of_set.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/02815_logical_error_cannot_get_column_name_of_set.sql b/tests/queries/0_stateless/02815_logical_error_cannot_get_column_name_of_set.sql
new file mode 100644
index 00000000000..aa659165940
--- /dev/null
+++ b/tests/queries/0_stateless/02815_logical_error_cannot_get_column_name_of_set.sql
@@ -0,0 +1,3 @@
+SELECT * FROM numbers(SETTINGS x = 1); -- { serverError BAD_ARGUMENTS }
+SELECT * FROM numbers(numbers(SETTINGS x = 1)); -- { serverError UNKNOWN_FUNCTION, UNSUPPORTED_METHOD }
+SELECT * FROM numbers(numbers(SETTINGS x = 1), SETTINGS x = 1); -- { serverError UNKNOWN_FUNCTION, UNSUPPORTED_METHOD }
diff --git a/tests/queries/0_stateless/02816_has_token_empty.reference b/tests/queries/0_stateless/02816_has_token_empty.reference
new file mode 100644
index 00000000000..aa47d0d46d4
--- /dev/null
+++ b/tests/queries/0_stateless/02816_has_token_empty.reference
@@ -0,0 +1,2 @@
+0
+0
diff --git a/tests/queries/0_stateless/02816_has_token_empty.sql b/tests/queries/0_stateless/02816_has_token_empty.sql
new file mode 100644
index 00000000000..e5d6156debd
--- /dev/null
+++ b/tests/queries/0_stateless/02816_has_token_empty.sql
@@ -0,0 +1,7 @@
+SELECT hasTokenCaseInsensitive('K(G', ''); -- { serverError BAD_ARGUMENTS }
+SELECT hasTokenCaseInsensitive('Hello', ''); -- { serverError BAD_ARGUMENTS }
+SELECT hasTokenCaseInsensitive('', ''); -- { serverError BAD_ARGUMENTS }
+SELECT hasTokenCaseInsensitive('', 'Hello');
+SELECT hasToken('Hello', ''); -- { serverError BAD_ARGUMENTS }
+SELECT hasToken('', 'Hello');
+SELECT hasToken('', ''); -- { serverError BAD_ARGUMENTS }
diff --git a/tests/queries/0_stateless/02817_group_array_moving_zero_window_size.reference b/tests/queries/0_stateless/02817_group_array_moving_zero_window_size.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/02817_group_array_moving_zero_window_size.sql b/tests/queries/0_stateless/02817_group_array_moving_zero_window_size.sql
new file mode 100644
index 00000000000..fcbcaf1245b
--- /dev/null
+++ b/tests/queries/0_stateless/02817_group_array_moving_zero_window_size.sql
@@ -0,0 +1,2 @@
+SELECT groupArrayMovingAvg ( toInt64 ( 0 ) ) ( toDecimal32 ( 1 , 1 ) ); -- { serverError BAD_ARGUMENTS }
+
diff --git a/tests/queries/1_stateful/00172_early_constant_folding.sql b/tests/queries/1_stateful/00172_early_constant_folding.sql
index 1ed7b8719b4..19f99f107ac 100644
--- a/tests/queries/1_stateful/00172_early_constant_folding.sql
+++ b/tests/queries/1_stateful/00172_early_constant_folding.sql
@@ -1,4 +1,5 @@
-- Tags: no-parallel-replicas
set max_threads=10;
+set optimize_use_implicit_projections=1;
EXPLAIN PIPELINE SELECT count(JavaEnable) FROM test.hits WHERE WatchID = 1 OR Title = 'next' OR URL = 'prev' OR URL = '???' OR 1;
diff --git a/tests/sqllogic/connection.py b/tests/sqllogic/connection.py
index ca03839fc35..0033c29c41c 100644
--- a/tests/sqllogic/connection.py
+++ b/tests/sqllogic/connection.py
@@ -62,7 +62,7 @@ def default_clickhouse_odbc_conn_str():
return str(
OdbcConnectingArgs.create_from_kw(
dsn="ClickHouse DSN (ANSI)",
- Url="http://localhost:8123/query?default_format=ODBCDriver2&default_table_engine=MergeTree&union_default_mode=DISTINCT&group_by_use_nulls=1&join_use_nulls=1",
+ Url="http://localhost:8123/query?default_format=ODBCDriver2&default_table_engine=MergeTree&union_default_mode=DISTINCT&group_by_use_nulls=1&join_use_nulls=1&allow_create_index_without_type=1",
)
)
diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt
index 1acc74c5d4a..a7f717f2f48 100644
--- a/utils/check-style/aspell-ignore/en/aspell-dict.txt
+++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt
@@ -993,6 +993,7 @@ addressToLine
addressToLineWithInlines
addressToSymbol
adviced
+agg
aggregatefunction
aggregatingmergetree
aggregatio
@@ -1429,6 +1430,7 @@ filesystemFree
filesystems
finalizeAggregation
fips
+firstLine
firstSignificantSubdomain
firstSignificantSubdomainCustom
fixedstring
diff --git a/utils/self-extracting-executable/decompressor.cpp b/utils/self-extracting-executable/decompressor.cpp
index 4a4985120fd..91f4bea5a5b 100644
--- a/utils/self-extracting-executable/decompressor.cpp
+++ b/utils/self-extracting-executable/decompressor.cpp
@@ -362,11 +362,12 @@ int decompressFiles(int input_fd, char * path, char * name, bool & have_compress
#else
- int read_exe_path(char *exe, size_t/* buf_sz*/)
+ int read_exe_path(char *exe, size_t buf_sz)
{
- if (realpath("/proc/self/exe", exe) == nullptr)
- return 1;
- return 0;
+ ssize_t n = readlink("/proc/self/exe", exe, buf_sz - 1);
+ if (n > 0)
+ exe[n] = '\0';
+ return n > 0 && n < static_cast(buf_sz);
}
#endif
@@ -430,58 +431,55 @@ int main(int/* argc*/, char* argv[])
return 1;
}
- int lock = -1;
- /// Protection from double decompression
#if !defined(OS_DARWIN) && !defined(OS_FREEBSD)
/// get inode of this executable
uint64_t inode = getInode(self);
- /// In some cases /proc/self/maps may not contain the inode for the
- /// /proc/self/exe, one of such examples are using qemu-*-static, in this
- /// case maps will be proxied through the qemu, and it will remove
- /// information about itself from it.
- if (inode != 0)
+ if (inode == 0)
{
- std::stringstream lock_path; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
- lock_path << "/tmp/" << name << ".decompression." << inode << ".lock";
- lock = open(lock_path.str().c_str(), O_CREAT | O_RDWR, 0666);
- if (lock < 0)
+ std::cerr << "Unable to obtain inode for exe '" << self << "'." << std::endl;
+ return 1;
+ }
+
+ std::stringstream lock_path; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
+ lock_path << "/tmp/" << name << ".decompression." << inode << ".lock";
+ int lock = open(lock_path.str().c_str(), O_CREAT | O_RDWR, 0666);
+ if (lock < 0)
+ {
+ perror("lock open");
+ return 1;
+ }
+
+ /// lock file should be closed on exec call
+ fcntl(lock, F_SETFD, FD_CLOEXEC);
+
+ if (lockf(lock, F_LOCK, 0))
+ {
+ perror("lockf");
+ return 1;
+ }
+
+ /// inconsistency in WSL1 Ubuntu - inode reported in /proc/self/maps is a 64bit to
+ /// 32bit conversion of input_info.st_ino
+ if (input_info.st_ino & 0xFFFFFFFF00000000 && !(inode & 0xFFFFFFFF00000000))
+ input_info.st_ino &= 0x00000000FFFFFFFF;
+
+ /// if decompression was performed by another process since this copy was started
+ /// then file referred by path "self" is already pointing to different inode
+ if (input_info.st_ino != inode)
+ {
+ struct stat lock_info;
+ if (0 != fstat(lock, &lock_info))
{
- perror("lock open");
+ perror("fstat lock");
return 1;
}
- /// lock file should be closed on exec call
- fcntl(lock, F_SETFD, FD_CLOEXEC);
+ /// size 1 of lock file indicates that another decompressor has found active executable
+ if (lock_info.st_size == 1)
+ execv(self, argv);
- if (lockf(lock, F_LOCK, 0))
- {
- perror("lockf");
- return 1;
- }
-
- /// inconsistency in WSL1 Ubuntu - inode reported in /proc/self/maps is a 64bit to
- /// 32bit conversion of input_info.st_ino
- if (input_info.st_ino & 0xFFFFFFFF00000000 && !(inode & 0xFFFFFFFF00000000))
- input_info.st_ino &= 0x00000000FFFFFFFF;
-
- /// if decompression was performed by another process since this copy was started
- /// then file referred by path "self" is already pointing to different inode
- if (input_info.st_ino != inode)
- {
- struct stat lock_info;
- if (0 != fstat(lock, &lock_info))
- {
- perror("fstat lock");
- return 1;
- }
-
- /// size 1 of lock file indicates that another decompressor has found active executable
- if (lock_info.st_size == 1)
- execv(self, argv);
-
- printf("No target executable - decompression only was performed.\n");
- return 0;
- }
+ printf("No target executable - decompression only was performed.\n");
+ return 0;
}
#endif
@@ -549,19 +547,21 @@ int main(int/* argc*/, char* argv[])
if (has_exec)
{
+#if !defined(OS_DARWIN) && !defined(OS_FREEBSD)
/// write one byte to the lock in case other copies of compressed are running to indicate that
/// execution should be performed
- if (lock >= 0)
- write(lock, "1", 1);
+ write(lock, "1", 1);
+#endif
execv(self, argv);
/// This part of code will be reached only if error happened
perror("execv");
return 1;
}
+#if !defined(OS_DARWIN) && !defined(OS_FREEBSD)
/// since inodes can be reused - it's a precaution if lock file already exists and have size of 1
- if (lock >= 0)
- ftruncate(lock, 0);
+ ftruncate(lock, 0);
+#endif
printf("No target executable - decompression only was performed.\n");
}