diff --git a/cmake/fuzzer.cmake b/cmake/fuzzer.cmake
index 52f301ab8ad..dd0c4b080fe 100644
--- a/cmake/fuzzer.cmake
+++ b/cmake/fuzzer.cmake
@@ -4,8 +4,8 @@ if (FUZZER)
# NOTE: Eldar Zaitov decided to name it "libfuzzer" instead of "fuzzer" to keep in mind another possible fuzzer backends.
# NOTE: no-link means that all the targets are built with instrumentation for fuzzer, but only some of them
# (tests) have entry point for fuzzer and it's not checked.
- set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link")
- set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link")
+ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link -DFUZZER=1")
+ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link -DFUZZER=1")
# NOTE: oss-fuzz can change LIB_FUZZING_ENGINE variable
if (NOT LIB_FUZZING_ENGINE)
diff --git a/contrib/google-protobuf-cmake/CMakeLists.txt b/contrib/google-protobuf-cmake/CMakeLists.txt
index 727121e60b5..dda6dfe85e4 100644
--- a/contrib/google-protobuf-cmake/CMakeLists.txt
+++ b/contrib/google-protobuf-cmake/CMakeLists.txt
@@ -385,9 +385,25 @@ endif ()
include("${ClickHouse_SOURCE_DIR}/contrib/google-protobuf-cmake/protobuf_generate.cmake")
+# These files needs to be installed to make it possible that users can use well-known protobuf types
+set(google_proto_files
+ ${protobuf_source_dir}/src/google/protobuf/any.proto
+ ${protobuf_source_dir}/src/google/protobuf/api.proto
+ ${protobuf_source_dir}/src/google/protobuf/descriptor.proto
+ ${protobuf_source_dir}/src/google/protobuf/duration.proto
+ ${protobuf_source_dir}/src/google/protobuf/empty.proto
+ ${protobuf_source_dir}/src/google/protobuf/field_mask.proto
+ ${protobuf_source_dir}/src/google/protobuf/source_context.proto
+ ${protobuf_source_dir}/src/google/protobuf/struct.proto
+ ${protobuf_source_dir}/src/google/protobuf/timestamp.proto
+ ${protobuf_source_dir}/src/google/protobuf/type.proto
+ ${protobuf_source_dir}/src/google/protobuf/wrappers.proto
+)
+
add_library(_protobuf INTERFACE)
target_link_libraries(_protobuf INTERFACE _libprotobuf)
target_include_directories(_protobuf INTERFACE "${Protobuf_INCLUDE_DIR}")
+set_target_properties(_protobuf PROPERTIES google_proto_files "${google_proto_files}")
add_library(ch_contrib::protobuf ALIAS _protobuf)
add_library(_protoc INTERFACE)
diff --git a/contrib/libunwind b/contrib/libunwind
index 30cc1d3fd36..40d8eadf96b 160000
--- a/contrib/libunwind
+++ b/contrib/libunwind
@@ -1 +1 @@
-Subproject commit 30cc1d3fd3655a5cfa0ab112fe320fb9fc0a8344
+Subproject commit 40d8eadf96b127d9b22d53ce7a4fc52aaedea965
diff --git a/contrib/libunwind-cmake/CMakeLists.txt b/contrib/libunwind-cmake/CMakeLists.txt
index 0d872bae5d1..8f3cd8bd07b 100644
--- a/contrib/libunwind-cmake/CMakeLists.txt
+++ b/contrib/libunwind-cmake/CMakeLists.txt
@@ -20,15 +20,7 @@ set(LIBUNWIND_ASM_SOURCES
"${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersRestore.S"
"${LIBUNWIND_SOURCE_DIR}/src/UnwindRegistersSave.S")
-# CMake doesn't pass the correct architecture for Apple prior to CMake 3.19 [1]
-# Workaround these two issues by compiling as C.
-#
-# [1]: https://gitlab.kitware.com/cmake/cmake/-/issues/20771
-if (APPLE AND CMAKE_VERSION VERSION_LESS 3.19)
- set_source_files_properties(${LIBUNWIND_ASM_SOURCES} PROPERTIES LANGUAGE C)
-else()
- enable_language(ASM)
-endif()
+enable_language(ASM)
set(LIBUNWIND_SOURCES
${LIBUNWIND_CXX_SOURCES}
diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile
index 63de9f6c462..b174dfde675 100644
--- a/docker/keeper/Dockerfile
+++ b/docker/keeper/Dockerfile
@@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
-ARG VERSION="23.10.4.25"
+ARG VERSION="23.10.5.20"
ARG PACKAGES="clickhouse-keeper"
# user/group precreated explicitly with fixed uid/gid on purpose.
diff --git a/docker/server/.dockerignore b/docker/server/.dockerignore
deleted file mode 100644
index d360712c18f..00000000000
--- a/docker/server/.dockerignore
+++ /dev/null
@@ -1,8 +0,0 @@
-# post / preinstall scripts (not needed, we do it in Dockerfile)
-alpine-root/install/*
-
-# docs (looks useless)
-alpine-root/usr/share/doc/*
-
-# packages, etc. (used by alpine-build.sh)
-tgz-packages/*
diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine
index d26bb344fef..d4498abda6a 100644
--- a/docker/server/Dockerfile.alpine
+++ b/docker/server/Dockerfile.alpine
@@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
-ARG VERSION="23.10.4.25"
+ARG VERSION="23.10.5.20"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# user/group precreated explicitly with fixed uid/gid on purpose.
diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu
index 53a36818121..08e95cd535b 100644
--- a/docker/server/Dockerfile.ubuntu
+++ b/docker/server/Dockerfile.ubuntu
@@ -30,7 +30,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
-ARG VERSION="23.10.4.25"
+ARG VERSION="23.10.5.20"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# set non-empty deb_location_url url to create a docker image
diff --git a/docker/test/base/setup_export_logs.sh b/docker/test/base/setup_export_logs.sh
index ec24b237752..6e3721956c0 100755
--- a/docker/test/base/setup_export_logs.sh
+++ b/docker/test/base/setup_export_logs.sh
@@ -126,6 +126,9 @@ function setup_logs_replication
# It's doesn't make sense to try creating tables if SYNC fails
echo "SYSTEM SYNC DATABASE REPLICA default" | clickhouse-client "${CONNECTION_ARGS[@]}" || return 0
+ debug_or_sanitizer_build=$(clickhouse-client -q "WITH ((SELECT value FROM system.build_options WHERE name='BUILD_TYPE') AS build, (SELECT value FROM system.build_options WHERE name='CXX_FLAGS') as flags) SELECT build='Debug' OR flags LIKE '%fsanitize%'")
+ echo "Build is debug or sanitizer: $debug_or_sanitizer_build"
+
# For each system log table:
echo 'Create %_log tables'
clickhouse-client --query "SHOW TABLES FROM system LIKE '%\\_log'" | while read -r table
@@ -133,7 +136,14 @@ function setup_logs_replication
if [[ "$table" = "trace_log" ]]
then
EXTRA_COLUMNS_FOR_TABLE="${EXTRA_COLUMNS_TRACE_LOG}"
- EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION_TRACE_LOG}"
+ # Do not try to resolve stack traces in case of debug/sanitizers
+ # build, since it is too slow (flushing of trace_log can take ~1min
+ # with such MV attached)
+ if [[ "$debug_or_sanitizer_build" = 1 ]]; then
+ EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}"
+ else
+ EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION_TRACE_LOG}"
+ fi
else
EXTRA_COLUMNS_FOR_TABLE="${EXTRA_COLUMNS}"
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}"
@@ -182,3 +192,13 @@ function setup_logs_replication
" || continue
done
)
+
+function stop_logs_replication
+{
+ echo "Detach all logs replication"
+ clickhouse-client --query "select database||'.'||table from system.tables where database = 'system' and (table like '%_sender' or table like '%_watcher')" | {
+ tee /dev/stderr
+ } | {
+ xargs -n1 -r -i clickhouse-client --query "drop table {}"
+ }
+}
diff --git a/docker/test/fuzzer/generate-test-j2.py b/docker/test/fuzzer/generate-test-j2.py
index 11525163ed8..6fd37d6bd02 100755
--- a/docker/test/fuzzer/generate-test-j2.py
+++ b/docker/test/fuzzer/generate-test-j2.py
@@ -3,6 +3,7 @@
from argparse import ArgumentParser
import os
import jinja2
+import itertools
def removesuffix(text, suffix):
@@ -47,6 +48,7 @@ def main(args):
loader=jinja2.FileSystemLoader(suite_dir),
keep_trailing_newline=True,
)
+ j2env.globals.update(product=itertools.product)
test_names = os.listdir(suite_dir)
for test_name in test_names:
diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh
index af1ce0c4dd4..8aeb06ec27b 100755
--- a/docker/test/fuzzer/run-fuzzer.sh
+++ b/docker/test/fuzzer/run-fuzzer.sh
@@ -212,11 +212,11 @@ quit
gdb -batch -command script.gdb -p $server_pid &
sleep 5
- # gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
+ # gdb will send SIGSTOP, spend some time loading debug info, and then send SIGCONT, wait for it (up to send_timeout, 300s)
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
# Check connectivity after we attach gdb, because it might cause the server
- # to freeze and the fuzzer will fail. In debug build it can take a lot of time.
+ # to freeze, and the fuzzer will fail. In debug build, it can take a lot of time.
for _ in {1..180}
do
if clickhouse-client --query "select 1"
@@ -226,14 +226,15 @@ quit
sleep 1
done
kill -0 $server_pid # This checks that it is our server that is started and not some other one
- echo 'Server started and responded'
+ echo 'Server started and responded.'
setup_logs_replication
# SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric.
- # SC2046: Quote this to prevent word splitting. Actually I need word splitting.
+ # SC2046: Quote this to prevent word splitting. Actually, I need word splitting.
# shellcheck disable=SC2012,SC2046
timeout -s TERM --preserve-status 30m clickhouse-client \
+ --max_memory_usage_in_client=1000000000 \
--receive_timeout=10 \
--receive_data_timeout_ms=10000 \
--stacktrace \
@@ -253,10 +254,10 @@ quit
wait "$fuzzer_pid" || fuzzer_exit_code=$?
echo "Fuzzer exit code is $fuzzer_exit_code"
- # If the server dies, most often the fuzzer returns code 210: connetion
+ # If the server dies, most often the fuzzer returns Code 210: Connetion
# refused, and sometimes also code 32: attempt to read after eof. For
- # simplicity, check again whether the server is accepting connections, using
- # clickhouse-client. We don't check for existence of server process, because
+ # simplicity, check again whether the server is accepting connections using
+ # clickhouse-client. We don't check for the existence of the server process, because
# the process is still present while the server is terminating and not
# accepting the connections anymore.
diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh
index 9951d79d6ac..07b40ea3b3d 100755
--- a/docker/test/stateless/run.sh
+++ b/docker/test/stateless/run.sh
@@ -217,6 +217,9 @@ ls -la /
clickhouse-client -q "system flush logs" ||:
+# stop logs replication to make it possible to dump logs tables via clickhouse-local
+stop_logs_replication
+
# Stop server so we can safely read data with clickhouse-local.
# Why do we read data with clickhouse-local?
# Because it's the simplest way to read it when server has crashed.
diff --git a/docs/changelogs/v23.10.5.20-stable.md b/docs/changelogs/v23.10.5.20-stable.md
new file mode 100644
index 00000000000..03e8c47481b
--- /dev/null
+++ b/docs/changelogs/v23.10.5.20-stable.md
@@ -0,0 +1,28 @@
+---
+sidebar_position: 1
+sidebar_label: 2023
+---
+
+# 2023 Changelog
+
+### ClickHouse release v23.10.5.20-stable (e84001e5c61) FIXME as compared to v23.10.4.25-stable (330fd687d41)
+
+#### Improvement
+* Backported in [#56924](https://github.com/ClickHouse/ClickHouse/issues/56924): There was a potential vulnerability in previous ClickHouse versions: if a user has connected and unsuccessfully tried to authenticate with the "interserver secret" method, the server didn't terminate the connection immediately but continued to receive and ignore the leftover packets from the client. While these packets are ignored, they are still parsed, and if they use a compression method with another known vulnerability, it will lead to exploitation of it without authentication. This issue was found with [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by https://twitter.com/malacupa. [#56794](https://github.com/ClickHouse/ClickHouse/pull/56794) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+
+#### Build/Testing/Packaging Improvement
+* Backported in [#57023](https://github.com/ClickHouse/ClickHouse/issues/57023): There was an attempt to have the proper listing in [#44311](https://github.com/ClickHouse/ClickHouse/issues/44311), but the fix itself was in the wrong place, so it's still broken. See an [example](https://github.com/ClickHouse/ClickHouse/actions/runs/6897342568/job/18781001022#step:8:25). [#56989](https://github.com/ClickHouse/ClickHouse/pull/56989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+
+#### Bug Fix (user-visible misbehavior in an official stable release)
+
+* Fix ON CLUSTER queries without database on initial node [#56484](https://github.com/ClickHouse/ClickHouse/pull/56484) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Fix buffer overflow in Gorilla codec [#57107](https://github.com/ClickHouse/ClickHouse/pull/57107) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Close interserver connection on any exception before authentication [#57142](https://github.com/ClickHouse/ClickHouse/pull/57142) ([Antonio Andelic](https://github.com/antonio2368)).
+
+#### NOT FOR CHANGELOG / INSIGNIFICANT
+
+* Fix client suggestions for user without grants [#56234](https://github.com/ClickHouse/ClickHouse/pull/56234) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Fix pygithub [#56778](https://github.com/ClickHouse/ClickHouse/pull/56778) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+* Avoid dependencies with no fixed versions [#56914](https://github.com/ClickHouse/ClickHouse/pull/56914) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Tiny improvement security [#57171](https://github.com/ClickHouse/ClickHouse/pull/57171) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+
diff --git a/docs/changelogs/v23.3.18.15-lts.md b/docs/changelogs/v23.3.18.15-lts.md
new file mode 100644
index 00000000000..3bf993a0960
--- /dev/null
+++ b/docs/changelogs/v23.3.18.15-lts.md
@@ -0,0 +1,26 @@
+---
+sidebar_position: 1
+sidebar_label: 2023
+---
+
+# 2023 Changelog
+
+### ClickHouse release v23.3.18.15-lts (7228475d77a) FIXME as compared to v23.3.17.13-lts (e867d59020f)
+
+#### Improvement
+* Backported in [#56928](https://github.com/ClickHouse/ClickHouse/issues/56928): There was a potential vulnerability in previous ClickHouse versions: if a user has connected and unsuccessfully tried to authenticate with the "interserver secret" method, the server didn't terminate the connection immediately but continued to receive and ignore the leftover packets from the client. While these packets are ignored, they are still parsed, and if they use a compression method with another known vulnerability, it will lead to exploitation of it without authentication. This issue was found with [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by https://twitter.com/malacupa. [#56794](https://github.com/ClickHouse/ClickHouse/pull/56794) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+
+#### Build/Testing/Packaging Improvement
+* Backported in [#57019](https://github.com/ClickHouse/ClickHouse/issues/57019): There was an attempt to have the proper listing in [#44311](https://github.com/ClickHouse/ClickHouse/issues/44311), but the fix itself was in the wrong place, so it's still broken. See an [example](https://github.com/ClickHouse/ClickHouse/actions/runs/6897342568/job/18781001022#step:8:25). [#56989](https://github.com/ClickHouse/ClickHouse/pull/56989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+
+#### Bug Fix (user-visible misbehavior in an official stable release)
+
+* Fix buffer overflow in Gorilla codec [#57107](https://github.com/ClickHouse/ClickHouse/pull/57107) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Close interserver connection on any exception before authentication [#57142](https://github.com/ClickHouse/ClickHouse/pull/57142) ([Antonio Andelic](https://github.com/antonio2368)).
+
+#### NOT FOR CHANGELOG / INSIGNIFICANT
+
+* Fix pygithub [#56778](https://github.com/ClickHouse/ClickHouse/pull/56778) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+* Avoid dependencies with no fixed versions [#56914](https://github.com/ClickHouse/ClickHouse/pull/56914) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Tiny improvement security [#57171](https://github.com/ClickHouse/ClickHouse/pull/57171) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+
diff --git a/docs/changelogs/v23.8.8.20-lts.md b/docs/changelogs/v23.8.8.20-lts.md
new file mode 100644
index 00000000000..345cfcccf17
--- /dev/null
+++ b/docs/changelogs/v23.8.8.20-lts.md
@@ -0,0 +1,28 @@
+---
+sidebar_position: 1
+sidebar_label: 2023
+---
+
+# 2023 Changelog
+
+### ClickHouse release v23.8.8.20-lts (5e012a03bf2) FIXME as compared to v23.8.7.24-lts (812b95e14ba)
+
+#### Improvement
+* Backported in [#56509](https://github.com/ClickHouse/ClickHouse/issues/56509): Allow backup of materialized view with dropped inner table instead of failing the backup. [#56387](https://github.com/ClickHouse/ClickHouse/pull/56387) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Backported in [#56929](https://github.com/ClickHouse/ClickHouse/issues/56929): There was a potential vulnerability in previous ClickHouse versions: if a user has connected and unsuccessfully tried to authenticate with the "interserver secret" method, the server didn't terminate the connection immediately but continued to receive and ignore the leftover packets from the client. While these packets are ignored, they are still parsed, and if they use a compression method with another known vulnerability, it will lead to exploitation of it without authentication. This issue was found with [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by https://twitter.com/malacupa. [#56794](https://github.com/ClickHouse/ClickHouse/pull/56794) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+
+#### Build/Testing/Packaging Improvement
+* Backported in [#57020](https://github.com/ClickHouse/ClickHouse/issues/57020): There was an attempt to have the proper listing in [#44311](https://github.com/ClickHouse/ClickHouse/issues/44311), but the fix itself was in the wrong place, so it's still broken. See an [example](https://github.com/ClickHouse/ClickHouse/actions/runs/6897342568/job/18781001022#step:8:25). [#56989](https://github.com/ClickHouse/ClickHouse/pull/56989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+
+#### Bug Fix (user-visible misbehavior in an official stable release)
+
+* Fix ON CLUSTER queries without database on initial node [#56484](https://github.com/ClickHouse/ClickHouse/pull/56484) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Fix buffer overflow in Gorilla codec [#57107](https://github.com/ClickHouse/ClickHouse/pull/57107) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Close interserver connection on any exception before authentication [#57142](https://github.com/ClickHouse/ClickHouse/pull/57142) ([Antonio Andelic](https://github.com/antonio2368)).
+
+#### NOT FOR CHANGELOG / INSIGNIFICANT
+
+* Fix pygithub [#56778](https://github.com/ClickHouse/ClickHouse/pull/56778) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+* Avoid dependencies with no fixed versions [#56914](https://github.com/ClickHouse/ClickHouse/pull/56914) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Tiny improvement security [#57171](https://github.com/ClickHouse/ClickHouse/pull/57171) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+
diff --git a/docs/changelogs/v23.9.6.20-stable.md b/docs/changelogs/v23.9.6.20-stable.md
new file mode 100644
index 00000000000..b4aed625fea
--- /dev/null
+++ b/docs/changelogs/v23.9.6.20-stable.md
@@ -0,0 +1,28 @@
+---
+sidebar_position: 1
+sidebar_label: 2023
+---
+
+# 2023 Changelog
+
+### ClickHouse release v23.9.6.20-stable (cf7e84bb8cf) FIXME as compared to v23.9.5.29-stable (f8554c1a1ff)
+
+#### Improvement
+* Backported in [#56930](https://github.com/ClickHouse/ClickHouse/issues/56930): There was a potential vulnerability in previous ClickHouse versions: if a user has connected and unsuccessfully tried to authenticate with the "interserver secret" method, the server didn't terminate the connection immediately but continued to receive and ignore the leftover packets from the client. While these packets are ignored, they are still parsed, and if they use a compression method with another known vulnerability, it will lead to exploitation of it without authentication. This issue was found with [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by https://twitter.com/malacupa. [#56794](https://github.com/ClickHouse/ClickHouse/pull/56794) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+
+#### Build/Testing/Packaging Improvement
+* Backported in [#57022](https://github.com/ClickHouse/ClickHouse/issues/57022): There was an attempt to have the proper listing in [#44311](https://github.com/ClickHouse/ClickHouse/issues/44311), but the fix itself was in the wrong place, so it's still broken. See an [example](https://github.com/ClickHouse/ClickHouse/actions/runs/6897342568/job/18781001022#step:8:25). [#56989](https://github.com/ClickHouse/ClickHouse/pull/56989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+
+#### Bug Fix (user-visible misbehavior in an official stable release)
+
+* Fix ON CLUSTER queries without database on initial node [#56484](https://github.com/ClickHouse/ClickHouse/pull/56484) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Fix buffer overflow in Gorilla codec [#57107](https://github.com/ClickHouse/ClickHouse/pull/57107) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Close interserver connection on any exception before authentication [#57142](https://github.com/ClickHouse/ClickHouse/pull/57142) ([Antonio Andelic](https://github.com/antonio2368)).
+
+#### NOT FOR CHANGELOG / INSIGNIFICANT
+
+* Fix client suggestions for user without grants [#56234](https://github.com/ClickHouse/ClickHouse/pull/56234) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Fix pygithub [#56778](https://github.com/ClickHouse/ClickHouse/pull/56778) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+* Avoid dependencies with no fixed versions [#56914](https://github.com/ClickHouse/ClickHouse/pull/56914) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Tiny improvement security [#57171](https://github.com/ClickHouse/ClickHouse/pull/57171) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+
diff --git a/docs/en/engines/table-engines/integrations/azureBlobStorage.md b/docs/en/engines/table-engines/integrations/azureBlobStorage.md
index 3df08ee2ffb..c6525121667 100644
--- a/docs/en/engines/table-engines/integrations/azureBlobStorage.md
+++ b/docs/en/engines/table-engines/integrations/azureBlobStorage.md
@@ -47,6 +47,12 @@ SELECT * FROM test_table;
└──────┴───────┘
```
+## Virtual columns {#virtual-columns}
+
+- `_path` — Path to the file. Type: `LowCardinalty(String)`.
+- `_file` — Name of the file. Type: `LowCardinalty(String)`.
+- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
+
## See also
[Azure Blob Storage Table Function](/docs/en/sql-reference/table-functions/azureBlobStorage)
diff --git a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md
index 23ab89e1983..9af857b0835 100644
--- a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md
+++ b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md
@@ -85,6 +85,10 @@ You can also change any [rocksdb options](https://github.com/facebook/rocksdb/wi
```
+By default trivial approximate count optimization is turned off, which might affect the performance `count()` queries. To enable this
+optimization set up `optimize_trivial_approximate_count_query = 1`. Also, this setting affects `system.tables` for EmbeddedRocksDB engine,
+turn on the settings to see approximate values for `total_rows` and `total_bytes`.
+
## Supported operations {#supported-operations}
### Inserts
diff --git a/docs/en/engines/table-engines/integrations/hdfs.md b/docs/en/engines/table-engines/integrations/hdfs.md
index c677123a8d0..19221c256f9 100644
--- a/docs/en/engines/table-engines/integrations/hdfs.md
+++ b/docs/en/engines/table-engines/integrations/hdfs.md
@@ -230,8 +230,9 @@ libhdfs3 support HDFS namenode HA.
## Virtual Columns {#virtual-columns}
-- `_path` — Path to the file.
-- `_file` — Name of the file.
+- `_path` — Path to the file. Type: `LowCardinalty(String)`.
+- `_file` — Name of the file. Type: `LowCardinalty(String)`.
+- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
## Storage Settings {#storage-settings}
diff --git a/docs/en/engines/table-engines/integrations/s3.md b/docs/en/engines/table-engines/integrations/s3.md
index 2967a15494c..3144bdd32fa 100644
--- a/docs/en/engines/table-engines/integrations/s3.md
+++ b/docs/en/engines/table-engines/integrations/s3.md
@@ -142,8 +142,9 @@ Code: 48. DB::Exception: Received from localhost:9000. DB::Exception: Reading fr
## Virtual columns {#virtual-columns}
-- `_path` — Path to the file.
-- `_file` — Name of the file.
+- `_path` — Path to the file. Type: `LowCardinalty(String)`.
+- `_file` — Name of the file. Type: `LowCardinalty(String)`.
+- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
For more information about virtual columns see [here](../../../engines/table-engines/index.md#table_engines-virtual_columns).
diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md
index f0bc45b9f53..9cbb48ef847 100644
--- a/docs/en/engines/table-engines/mergetree-family/mergetree.md
+++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md
@@ -504,8 +504,8 @@ Indexes of type `set` can be utilized by all functions. The other index types ar
| Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | inverted |
|------------------------------------------------------------------------------------------------------------|-------------|--------|------------|------------|--------------|----------|
-| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
-| [notEquals(!=, <>)](/docs/en/sql-reference/functions/comparison-functions.md/#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
+| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
+| [notEquals(!=, <>)](/docs/en/sql-reference/functions/comparison-functions.md/#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [like](/docs/en/sql-reference/functions/string-search-functions.md/#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [notLike](/docs/en/sql-reference/functions/string-search-functions.md/#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [startsWith](/docs/en/sql-reference/functions/string-functions.md/#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
@@ -513,10 +513,10 @@ Indexes of type `set` can be utilized by all functions. The other index types ar
| [multiSearchAny](/docs/en/sql-reference/functions/string-search-functions.md/#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | ✔ |
| [in](/docs/en/sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notIn](/docs/en/sql-reference/functions/in-functions#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
-| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
-| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
-| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
-| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
+| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
+| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
+| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
+| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [empty](/docs/en/sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [notEmpty](/docs/en/sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [has](/docs/en/sql-reference/functions/array-functions#function-has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
diff --git a/docs/en/engines/table-engines/special/file.md b/docs/en/engines/table-engines/special/file.md
index 27945b30c03..6e3897398a5 100644
--- a/docs/en/engines/table-engines/special/file.md
+++ b/docs/en/engines/table-engines/special/file.md
@@ -87,12 +87,18 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64
- Indices
- Replication
-## PARTITION BY
+## PARTITION BY {#partition-by}
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
+## Virtual Columns {#virtual-columns}
+
+- `_path` — Path to the file. Type: `LowCardinalty(String)`.
+- `_file` — Name of the file. Type: `LowCardinalty(String)`.
+- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
+
## Settings {#settings}
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
diff --git a/docs/en/engines/table-engines/special/url.md b/docs/en/engines/table-engines/special/url.md
index 5a5e1564180..f6183a779ae 100644
--- a/docs/en/engines/table-engines/special/url.md
+++ b/docs/en/engines/table-engines/special/url.md
@@ -103,6 +103,12 @@ SELECT * FROM url_engine_table
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
+## Virtual Columns {#virtual-columns}
+
+- `_path` — Path to the `URL`. Type: `LowCardinalty(String)`.
+- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`.
+- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
+
## Storage Settings {#storage-settings}
- [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md
index 62256354467..2b73c4ec624 100644
--- a/docs/en/operations/server-configuration-parameters/settings.md
+++ b/docs/en/operations/server-configuration-parameters/settings.md
@@ -74,7 +74,7 @@ The maximum number of threads that will be used for fetching data parts from ano
Type: UInt64
-Default: 8
+Default: 16
## background_merges_mutations_concurrency_ratio
@@ -136,7 +136,7 @@ The maximum number of threads that will be used for constantly executing some li
Type: UInt64
-Default: 128
+Default: 512
## backup_threads
diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md
index edc1c9bdfd7..663572d91c8 100644
--- a/docs/en/operations/settings/settings.md
+++ b/docs/en/operations/settings/settings.md
@@ -4805,6 +4805,243 @@ a Tuple(
If a table has a space-filling curve in its index, e.g. `ORDER BY mortonEncode(x, y)`, and the query has conditions on its arguments, e.g. `x >= 10 AND x <= 20 AND y >= 20 AND y <= 30`, use the space-filling curve for index analysis.
+## query_plan_enable_optimizations {#query_plan_enable_optimizations}
+
+Toggles query optimization at the query plan level.
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Possible values:
+
+- 0 - Disable all optimizations at the query plan level
+- 1 - Enable optimizations at the query plan level (but individual optimizations may still be disabled via their individual settings)
+
+Default value: `1`.
+
+## query_plan_max_optimizations_to_apply
+
+Limits the total number of optimizations applied to query plan, see setting [query_plan_enable_optimizations](#query_plan_enable_optimizations).
+Useful to avoid long optimization times for complex queries.
+If the actual number of optimizations exceeds this setting, an exception is thrown.
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Type: [UInt64](../../sql-reference/data-types/int-uint.md).
+
+Default value: '10000'
+
+## query_plan_lift_up_array_join
+
+Toggles a query-plan-level optimization which moves ARRAY JOINs up in the execution plan.
+Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1.
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Possible values:
+
+- 0 - Disable
+- 1 - Enable
+
+Default value: `1`.
+
+## query_plan_push_down_limit
+
+Toggles a query-plan-level optimization which moves LIMITs down in the execution plan.
+Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1.
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Possible values:
+
+- 0 - Disable
+- 1 - Enable
+
+Default value: `1`.
+
+## query_plan_split_filter
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Toggles a query-plan-level optimization which splits filters into expressions.
+Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1.
+
+Possible values:
+
+- 0 - Disable
+- 1 - Enable
+
+Default value: `1`.
+
+## query_plan_merge_expressions
+
+Toggles a query-plan-level optimization which merges consecutive filters.
+Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1.
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Possible values:
+
+- 0 - Disable
+- 1 - Enable
+
+Default value: `1`.
+
+## query_plan_filter_push_down
+
+Toggles a query-plan-level optimization which moves filters down in the execution plan.
+Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1.
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Possible values:
+
+- 0 - Disable
+- 1 - Enable
+
+Default value: `1`.
+
+## query_plan_execute_functions_after_sorting
+
+Toggles a query-plan-level optimization which moves expressions after sorting steps.
+Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1.
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Possible values:
+
+- 0 - Disable
+- 1 - Enable
+
+Default value: `1`.
+
+## query_plan_reuse_storage_ordering_for_window_functions
+
+Toggles a query-plan-level optimization which uses storage sorting when sorting for window functions.
+Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1.
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Possible values:
+
+- 0 - Disable
+- 1 - Enable
+
+Default value: `1`.
+
+## query_plan_lift_up_union
+
+Toggles a query-plan-level optimization which moves larger subtrees of the query plan into union to enable further optimizations.
+Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1.
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Possible values:
+
+- 0 - Disable
+- 1 - Enable
+
+Default value: `1`.
+
+## query_plan_distinct_in_order
+
+Toggles the distinct in-order optimization query-plan-level optimization.
+Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1.
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Possible values:
+
+- 0 - Disable
+- 1 - Enable
+
+Default value: `1`.
+
+## query_plan_read_in_order
+
+Toggles the read in-order optimization query-plan-level optimization.
+Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1.
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Possible values:
+
+- 0 - Disable
+- 1 - Enable
+
+Default value: `1`.
+
+## query_plan_aggregation_in_order
+
+Toggles the aggregation in-order query-plan-level optimization.
+Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1.
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Possible values:
+
+- 0 - Disable
+- 1 - Enable
+
+Default value: `0`.
+
+## query_plan_remove_redundant_sorting
+
+Toggles a query-plan-level optimization which removes redundant sorting steps, e.g. in subqueries.
+Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1.
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Possible values:
+
+- 0 - Disable
+- 1 - Enable
+
+Default value: `1`.
+
+## query_plan_remove_redundant_distinct
+
+Toggles a query-plan-level optimization which removes redundant DISTINCT steps.
+Only takes effect if setting [query_plan_enable_optimizations](#query_plan_enable_optimizations) is 1.
+
+:::note
+This is an expert-level setting which should only be used for debugging by developers. The setting may change in future in backward-incompatible ways or be removed.
+:::
+
+Possible values:
+
+- 0 - Disable
+- 1 - Enable
+
+Default value: `1`.
+
## dictionary_use_async_executor {#dictionary_use_async_executor}
Execute a pipeline for reading dictionary source in several threads. It's supported only by dictionaries with local CLICKHOUSE source.
diff --git a/docs/en/operations/system-tables/databases.md b/docs/en/operations/system-tables/databases.md
index f3d3d388c36..e3b0ded96e8 100644
--- a/docs/en/operations/system-tables/databases.md
+++ b/docs/en/operations/system-tables/databases.md
@@ -14,6 +14,7 @@ Columns:
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Database UUID.
- `comment` ([String](../../sql-reference/data-types/enum.md)) — Database comment.
- `engine_full` ([String](../../sql-reference/data-types/enum.md)) — Parameters of the database engine.
+- `database` ([String](../../sql-reference/data-types/string.md)) – Alias for `name`.
The `name` column from this system table is used for implementing the `SHOW DATABASES` query.
diff --git a/docs/en/sql-reference/data-types/lowcardinality.md b/docs/en/sql-reference/data-types/lowcardinality.md
index 7810f4c5324..db10103282d 100644
--- a/docs/en/sql-reference/data-types/lowcardinality.md
+++ b/docs/en/sql-reference/data-types/lowcardinality.md
@@ -56,7 +56,7 @@ Functions:
## Related content
-- [Reducing ClickHouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/)
+- [Reducing ClickHouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://altinity.com/blog/2020-5-20-reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer)
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf)
- Blog: [Optimizing ClickHouse with Schemas and Codecs](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema)
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
diff --git a/docs/en/sql-reference/functions/comparison-functions.md b/docs/en/sql-reference/functions/comparison-functions.md
index 297d84eb8a5..abe923adeb3 100644
--- a/docs/en/sql-reference/functions/comparison-functions.md
+++ b/docs/en/sql-reference/functions/comparison-functions.md
@@ -20,7 +20,7 @@ Strings are compared byte-by-byte. Note that this may lead to unexpected results
A string S1 which has another string S2 as prefix is considered longer than S2.
-## equals, `=`, `==` operators
+## equals, `=`, `==` operators {#equals}
**Syntax**
@@ -32,7 +32,7 @@ Alias:
- `a = b` (operator)
- `a == b` (operator)
-## notEquals, `!=`, `<>` operators
+## notEquals, `!=`, `<>` operators {#notequals}
**Syntax**
@@ -44,7 +44,7 @@ Alias:
- `a != b` (operator)
- `a <> b` (operator)
-## less, `<` operator
+## less, `<` operator {#less}
**Syntax**
@@ -55,7 +55,7 @@ less(a, b)
Alias:
- `a < b` (operator)
-## greater, `>` operator
+## greater, `>` operator {#greater}
**Syntax**
@@ -66,7 +66,7 @@ greater(a, b)
Alias:
- `a > b` (operator)
-## lessOrEquals, `<=` operator
+## lessOrEquals, `<=` operator {#lessorequals}
**Syntax**
@@ -77,7 +77,7 @@ lessOrEquals(a, b)
Alias:
- `a <= b` (operator)
-## greaterOrEquals, `>=` operator
+## greaterOrEquals, `>=` operator {#greaterorequals}
**Syntax**
diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md
index 43f7c9cc61e..565486275e6 100644
--- a/docs/en/sql-reference/functions/date-time-functions.md
+++ b/docs/en/sql-reference/functions/date-time-functions.md
@@ -2533,13 +2533,14 @@ formatDateTime(Time, Format[, Timezone])
Returns time and date values according to the determined format.
**Replacement fields**
+
Using replacement fields, you can define a pattern for the resulting string. “Example” column shows formatting result for `2018-01-02 22:33:44`.
-| Placeholder | Description | Example |
+| Placeholder | Description | Example |
|----------|---------------------------------------------------------|------------|
| %a | abbreviated weekday name (Mon-Sun) | Mon |
| %b | abbreviated month name (Jan-Dec) | Jan |
-| %c | month as an integer number (01-12) | 01 |
+| %c | month as an integer number (01-12), see 'Note 3' below | 01 |
| %C | year divided by 100 and truncated to integer (00-99) | 20 |
| %d | day of the month, zero-padded (01-31) | 02 |
| %D | Short MM/DD/YY date, equivalent to %m/%d/%y | 01/02/18 |
@@ -2553,8 +2554,8 @@ Using replacement fields, you can define a pattern for the resulting string. “
| %i | minute (00-59) | 33 |
| %I | hour in 12h format (01-12) | 10 |
| %j | day of the year (001-366) | 002 |
-| %k | hour in 24h format (00-23) | 22 |
-| %l | hour in 12h format (01-12) | 09 |
+| %k | hour in 24h format (00-23), see 'Note 3' below | 14 |
+| %l | hour in 12h format (01-12), see 'Note 3' below | 09 |
| %m | month as an integer number (01-12) | 01 |
| %M | full month name (January-December), see 'Note 2' below | January |
| %n | new-line character (‘’) | |
@@ -2579,6 +2580,8 @@ Note 1: In ClickHouse versions earlier than v23.4, `%f` prints a single zero (0)
Note 2: In ClickHouse versions earlier than v23.4, `%M` prints the minute (00-59) instead of the full month name (January-December). The previous behavior can be restored using setting `formatdatetime_parsedatetime_m_is_month_name = 0`.
+Note 3: In ClickHouse versions earlier than v23.11, function `parseDateTime()` required leading zeros for formatters `%c` (month) and `%l`/`%k` (hour), e.g. `07`. In later versions, the leading zero may be omitted, e.g. `7`. The previous behavior can be restored using setting `parsedatetime_parse_without_leading_zeros = 0`. Note that function `formatDateTime()` by default still prints leading zeros for `%c` and `%l`/`%k` to not break existing use cases. This behavior can be changed by setting `formatdatetime_format_without_leading_zeros = 1`.
+
**Example**
``` sql
diff --git a/docs/en/sql-reference/functions/functions-for-nulls.md b/docs/en/sql-reference/functions/functions-for-nulls.md
index bde2a8a9505..91c04cfded3 100644
--- a/docs/en/sql-reference/functions/functions-for-nulls.md
+++ b/docs/en/sql-reference/functions/functions-for-nulls.md
@@ -164,7 +164,7 @@ Consider a list of contacts that may specify multiple ways to contact a customer
└──────────┴──────┴───────────┴───────────┘
```
-The `mail` and `phone` fields are of type String, but the `icq` field is `UInt32`, so it needs to be converted to `String`.
+The `mail` and `phone` fields are of type String, but the `telegram` field is `UInt32`, so it needs to be converted to `String`.
Get the first available contact method for the customer from the contact list:
diff --git a/docs/en/sql-reference/functions/math-functions.md b/docs/en/sql-reference/functions/math-functions.md
index 9eab2274210..b27668caf0c 100644
--- a/docs/en/sql-reference/functions/math-functions.md
+++ b/docs/en/sql-reference/functions/math-functions.md
@@ -6,11 +6,9 @@ sidebar_label: Mathematical
# Mathematical Functions
-All the functions return a Float64 number. Results are generally as close to the actual result as possible, but in some cases less precise than the machine-representable number.
-
## e
-Returns e.
+Returns e ([Euler's constant](https://en.wikipedia.org/wiki/Euler%27s_constant))
**Syntax**
@@ -18,15 +16,22 @@ Returns e.
e()
```
+**Returned value**
+
+Type: [Float64](../../sql-reference/data-types/float.md).
+
## pi
-Returns π.
+Returns π ([Pi](https://en.wikipedia.org/wiki/Pi)).
**Syntax**
```sql
pi()
```
+**Returned value**
+
+Type: [Float64](../../sql-reference/data-types/float.md).
## exp
@@ -38,6 +43,14 @@ Returns e to the power of the given argument.
exp(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## log
Returns the natural logarithm of the argument.
@@ -50,6 +63,14 @@ log(x)
Alias: `ln(x)`
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## exp2
Returns 2 to the power of the given argument
@@ -60,6 +81,14 @@ Returns 2 to the power of the given argument
exp2(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## intExp2
Like `exp` but returns a UInt64.
@@ -80,6 +109,14 @@ Returns the binary logarithm of the argument.
log2(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## exp10
Returns 10 to the power of the given argument.
@@ -90,6 +127,14 @@ Returns 10 to the power of the given argument.
exp10(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## intExp10
Like `exp10` but returns a UInt64.
@@ -110,6 +155,14 @@ Returns the decimal logarithm of the argument.
log10(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## sqrt
Returns the square root of the argument.
@@ -118,6 +171,14 @@ Returns the square root of the argument.
sqrt(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## cbrt
Returns the cubic root of the argument.
@@ -126,6 +187,14 @@ Returns the cubic root of the argument.
cbrt(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## erf
If `x` is non-negative, then `erf(x / σ√2)` is the probability that a random variable having a normal distribution with standard deviation `σ` takes the value that is separated from the expected value by more than `x`.
@@ -136,6 +205,14 @@ If `x` is non-negative, then `erf(x / σ√2)` is the probability that a random
erf(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
**Example**
(three sigma rule)
@@ -160,6 +237,14 @@ Returns a number close to `1 - erf(x)` without loss of precision for large ‘x
erfc(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## lgamma
Returns the logarithm of the gamma function.
@@ -170,6 +255,14 @@ Returns the logarithm of the gamma function.
lgamma(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## tgamma
Returns the gamma function.
@@ -180,6 +273,14 @@ Returns the gamma function.
gamma(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## sin
Returns the sine of the argument
@@ -190,6 +291,14 @@ Returns the sine of the argument
sin(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## cos
Returns the cosine of the argument.
@@ -200,6 +309,14 @@ Returns the cosine of the argument.
cos(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## tan
Returns the tangent of the argument.
@@ -210,6 +327,14 @@ Returns the tangent of the argument.
tan(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## asin
Returns the arc sine of the argument.
@@ -220,6 +345,14 @@ Returns the arc sine of the argument.
asin(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## acos
Returns the arc cosine of the argument.
@@ -230,6 +363,14 @@ Returns the arc cosine of the argument.
acos(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## atan
Returns the arc tangent of the argument.
@@ -240,6 +381,14 @@ Returns the arc tangent of the argument.
atan(x)
```
+**Arguments**
+
+- `x` - [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md)
+
+**Returned value**
+
+Type: [Float*](../../sql-reference/data-types/float.md).
+
## pow
Returns `x` to the power of `y`.
@@ -252,6 +401,15 @@ pow(x, y)
Alias: `power(x, y)`
+**Arguments**
+
+- `x` - [(U)Int8/16/32/64](../../sql-reference/data-types/int-uint.md) or [Float*](../../sql-reference/data-types/float.md)
+- `y` - [(U)Int8/16/32/64](../../sql-reference/data-types/int-uint.md) or [Float*](../../sql-reference/data-types/float.md)
+
+**Returned value**
+
+Type: [Float64](../../sql-reference/data-types/float.md).
+
## cosh
Returns the [hyperbolic cosine](https://in.mathworks.com/help/matlab/ref/cosh.html) of the argument.
diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md
index 4c103274f43..b2a1d5066bb 100644
--- a/docs/en/sql-reference/functions/other-functions.md
+++ b/docs/en/sql-reference/functions/other-functions.md
@@ -67,7 +67,45 @@ WHERE macro = 'test';
│ test │ Value │
└───────┴──────────────┘
```
+
+## getClientHTTPHeader
+Returns the value of specified http header.If there is no such header or the request method is not http, it will throw an exception.
+**Syntax**
+
+```sql
+getClientHTTPHeader(name);
+```
+
+**Arguments**
+
+- `name` — HTTP header name .[String](../../sql-reference/data-types/string.md#string)
+
+**Returned value**
+
+Value of the specified header.
+Type:[String](../../sql-reference/data-types/string.md#string).
+
+
+When we use `clickhouse-client` to execute this function, we'll always get empty string, because client doesn't use http protocol.
+```sql
+SELECT getCientHTTPHeader('test')
+```
+result:
+
+```text
+┌─getClientHTTPHeader('test')─┐
+│ │
+└────────────------───────────┘
+```
+Try to use http request:
+```shell
+echo "select getClientHTTPHeader('X-Clickhouse-User')" | curl -H 'X-ClickHouse-User: default' -H 'X-ClickHouse-Key: ' 'http://localhost:8123/' -d @-
+
+#result
+default
+```
+
## FQDN
Returns the fully qualified domain name of the ClickHouse server.
diff --git a/docs/en/sql-reference/table-functions/azureBlobStorage.md b/docs/en/sql-reference/table-functions/azureBlobStorage.md
index 59c92e1327e..1510489ce83 100644
--- a/docs/en/sql-reference/table-functions/azureBlobStorage.md
+++ b/docs/en/sql-reference/table-functions/azureBlobStorage.md
@@ -67,6 +67,12 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam
└─────────┘
```
+## Virtual Columns {#virtual-columns}
+
+- `_path` — Path to the file. Type: `LowCardinalty(String)`.
+- `_file` — Name of the file. Type: `LowCardinalty(String)`.
+- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`.
+
**See Also**
- [AzureBlobStorage Table Engine](/docs/en/engines/table-engines/integrations/azureBlobStorage.md)
diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md
index a871bdaafa9..ad1feb87c60 100644
--- a/docs/en/sql-reference/table-functions/file.md
+++ b/docs/en/sql-reference/table-functions/file.md
@@ -191,12 +191,13 @@ Query the total number of rows from all files `file002` inside any folder in dir
SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt32');
```
-## Virtual Columns
+## Virtual Columns {#virtual-columns}
-- `_path` — Path to the file.
-- `_file` — Name of the file.
+- `_path` — Path to the file. Type: `LowCardinalty(String)`.
+- `_file` — Name of the file. Type: `LowCardinalty(String)`.
+- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`.
-## Settings
+## Settings {#settings}
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
diff --git a/docs/en/sql-reference/table-functions/fuzzJSON.md b/docs/en/sql-reference/table-functions/fuzzJSON.md
new file mode 100644
index 00000000000..74ccb0bcb8a
--- /dev/null
+++ b/docs/en/sql-reference/table-functions/fuzzJSON.md
@@ -0,0 +1,86 @@
+---
+slug: /en/sql-reference/table-functions/fuzzJSON
+sidebar_position: 75
+sidebar_label: fuzzJSON
+---
+
+# fuzzJSON
+
+Perturbs a JSON string with random variations.
+
+``` sql
+fuzzJSON({ named_collection [option=value [,..]] | json_str[, random_seed] })
+```
+
+**Arguments**
+
+- `named_collection`- A [NAMED COLLECTION](/docs/en/sql-reference/statements/create/named-collection.md).
+- `option=value` - Named collection optional parameters and their values.
+ - `json_str` (String) - The source string representing structured data in JSON format.
+ - `random_seed` (UInt64) - Manual random seed for producing stable results.
+ - `reuse_output` (boolean) - Reuse the output from a fuzzing process as input for the next fuzzer.
+ - `max_output_length` (UInt64) - Maximum allowable length of the generated or perturbed JSON string.
+ - `probability` (Float64) - The probability to fuzz a JSON field (a key-value pair). Must be within [0, 1] range.
+ - `max_nesting_level` (UInt64) - The maximum allowed depth of nested structures within the JSON data.
+ - `max_array_size` (UInt64) - The maximum allowed size of a JSON array.
+ - `max_object_size` (UInt64) - The maximum allowed number of fields on a single level of a JSON object.
+ - `max_string_value_length` (UInt64) - The maximum length of a String value.
+ - `min_key_length` (UInt64) - The minimum key length. Should be at least 1.
+ - `max_key_length` (UInt64) - The maximum key length. Should be greater or equal than the `min_key_length`, if specified.
+
+**Returned Value**
+
+A table object with a a single column containing perturbed JSON strings.
+
+## Usage Example
+
+``` sql
+CREATE NAMED COLLECTION json_fuzzer AS json_str='{}';
+SELECT * FROM fuzzJSON(json_fuzzer) LIMIT 3;
+```
+
+``` text
+{"52Xz2Zd4vKNcuP2":true}
+{"UPbOhOQAdPKIg91":3405264103600403024}
+{"X0QUWu8yT":[]}
+```
+
+``` sql
+SELECT * FROM fuzzJSON(json_fuzzer, json_str='{"name" : "value"}', random_seed=1234) LIMIT 3;
+```
+
+``` text
+{"key":"value", "mxPG0h1R5":"L-YQLv@9hcZbOIGrAn10%GA"}
+{"BRE3":true}
+{"key":"value", "SWzJdEJZ04nrpSfy":[{"3Q23y":[]}]}
+```
+
+``` sql
+SELECT * FROM fuzzJSON(json_fuzzer, json_str='{"students" : ["Alice", "Bob"]}', reuse_output=true) LIMIT 3;
+```
+
+``` text
+{"students":["Alice", "Bob"], "nwALnRMc4pyKD9Krv":[]}
+{"students":["1rNY5ZNs0wU&82t_P", "Bob"], "wLNRGzwDiMKdw":[{}]}
+{"xeEk":["1rNY5ZNs0wU&82t_P", "Bob"], "wLNRGzwDiMKdw":[{}, {}]}
+```
+
+``` sql
+SELECT * FROM fuzzJSON(json_fuzzer, json_str='{"students" : ["Alice", "Bob"]}', max_output_length=512) LIMIT 3;
+```
+
+``` text
+{"students":["Alice", "Bob"], "BREhhXj5":true}
+{"NyEsSWzJdeJZ04s":["Alice", 5737924650575683711, 5346334167565345826], "BjVO2X9L":true}
+{"NyEsSWzJdeJZ04s":["Alice", 5737924650575683711, 5346334167565345826], "BjVO2X9L":true, "k1SXzbSIz":[{}]}
+```
+
+``` sql
+SELECT * FROM fuzzJSON('{"id":1}', 1234) LIMIT 3;
+```
+
+``` text
+{"id":1, "mxPG0h1R5":"L-YQLv@9hcZbOIGrAn10%GA"}
+{"BRjE":16137826149911306846}
+{"XjKE":15076727133550123563}
+```
diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md
index 678470e9150..31780e30e8e 100644
--- a/docs/en/sql-reference/table-functions/hdfs.md
+++ b/docs/en/sql-reference/table-functions/hdfs.md
@@ -94,8 +94,9 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin
## Virtual Columns
-- `_path` — Path to the file.
-- `_file` — Name of the file.
+- `_path` — Path to the file. Type: `LowCardinalty(String)`.
+- `_file` — Name of the file. Type: `LowCardinalty(String)`.
+- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
## Storage Settings {#storage-settings}
diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md
index c80488df05e..dc11259c626 100644
--- a/docs/en/sql-reference/table-functions/s3.md
+++ b/docs/en/sql-reference/table-functions/s3.md
@@ -228,6 +228,12 @@ FROM s3(
LIMIT 5;
```
+## Virtual Columns {#virtual-columns}
+
+- `_path` — Path to the file. Type: `LowCardinalty(String)`.
+- `_file` — Name of the file. Type: `LowCardinalty(String)`.
+- `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`.
+
## Storage Settings {#storage-settings}
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
diff --git a/docs/en/sql-reference/table-functions/url.md b/docs/en/sql-reference/table-functions/url.md
index 859de86f019..4dc6e435b50 100644
--- a/docs/en/sql-reference/table-functions/url.md
+++ b/docs/en/sql-reference/table-functions/url.md
@@ -50,8 +50,9 @@ Character `|` inside patterns is used to specify failover addresses. They are it
## Virtual Columns
-- `_path` — Path to the `URL`.
-- `_file` — Resource name of the `URL`.
+- `_path` — Path to the `URL`. Type: `LowCardinalty(String)`.
+- `_file` — Resource name of the `URL`. Type: `LowCardinalty(String)`.
+- `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`.
## Storage Settings {#storage-settings}
diff --git a/docs/ru/engines/table-engines/mergetree-family/mergetree.md b/docs/ru/engines/table-engines/mergetree-family/mergetree.md
index 00eb830c9ef..7195ee38af6 100644
--- a/docs/ru/engines/table-engines/mergetree-family/mergetree.md
+++ b/docs/ru/engines/table-engines/mergetree-family/mergetree.md
@@ -337,7 +337,7 @@ SELECT count() FROM table WHERE u64 * i32 == 10 AND u64 * length(s) >= 1234
Поддерживаемые типы данных: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `String`, `FixedString`.
- Фильтром могут пользоваться функции: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions.md), [notIn](../../../sql-reference/functions/in-functions.md), [has](../../../sql-reference/functions/array-functions.md#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions.md#hasany), [hasAll](../../../sql-reference/functions/array-functions.md#hasall).
+ Фильтром могут пользоваться функции: [equals](../../../sql-reference/functions/comparison-functions.md#equals), [notEquals](../../../sql-reference/functions/comparison-functions.md#notequals), [in](../../../sql-reference/functions/in-functions.md), [notIn](../../../sql-reference/functions/in-functions.md), [has](../../../sql-reference/functions/array-functions.md#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions.md#hasany), [hasAll](../../../sql-reference/functions/array-functions.md#hasall).
**Примеры**
@@ -354,8 +354,8 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT
| Функция (оператор) / Индекс | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter |
|------------------------------------------------------------------------------------------------------------|-------------|--------|-------------|-------------|---------------|
-| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ |
-| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ |
+| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#equals) | ✔ | ✔ | ✔ | ✔ | ✔ |
+| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✗ |
| [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✔ | ✔ | ✗ |
| [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ |
@@ -363,10 +363,10 @@ INDEX b (u64 * length(str), i32 + f64 * 100, date, str) TYPE set(100) GRANULARIT
| [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ |
| [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
-| [less (\<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ |
-| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ |
-| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
-| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
+| [less (\<)](../../../sql-reference/functions/comparison-functions.md#less) | ✔ | ✔ | ✗ | ✗ | ✗ |
+| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#greater) | ✔ | ✔ | ✗ | ✗ | ✗ |
+| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
+| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ |
diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md
index 29776ac1249..788693b581e 100644
--- a/docs/ru/operations/server-configuration-parameters/settings.md
+++ b/docs/ru/operations/server-configuration-parameters/settings.md
@@ -994,7 +994,7 @@ ClickHouse использует потоки из глобального пул
- Положительное целое число.
-Значение по умолчанию: 128.
+Значение по умолчанию: 512.
## background_fetches_pool_size {#background_fetches_pool_size}
@@ -1004,7 +1004,7 @@ ClickHouse использует потоки из глобального пул
- Положительное целое число.
-Значение по умолчанию: 8.
+Значение по умолчанию: 16.
## background_distributed_schedule_pool_size {#background_distributed_schedule_pool_size}
diff --git a/docs/ru/sql-reference/functions/comparison-functions.md b/docs/ru/sql-reference/functions/comparison-functions.md
index f66b42977cc..bb9322d5a82 100644
--- a/docs/ru/sql-reference/functions/comparison-functions.md
+++ b/docs/ru/sql-reference/functions/comparison-functions.md
@@ -23,14 +23,14 @@ sidebar_label: "Функции сравнения"
Замечание. До версии 1.1.54134 сравнение знаковых и беззнаковых целых чисел производилось также, как в C++. То есть, вы могли получить неверный результат в таких случаях: SELECT 9223372036854775807 \> -1. С версии 1.1.54134 поведение изменилось и стало математически корректным.
-## equals, оператор a = b и a == b {#function-equals}
+## equals, оператор a = b и a == b {#equals}
-## notEquals, оператор a != b и a `<>` b {#function-notequals}
+## notEquals, оператор a != b и a `<>` b {#notequals}
-## less, оператор `<` {#function-less}
+## less, оператор `<` {#less}
-## greater, оператор `>` {#function-greater}
+## greater, оператор `>` {#greater}
-## lessOrEquals, оператор `<=` {#function-lessorequals}
+## lessOrEquals, оператор `<=` {#lessorequals}
-## greaterOrEquals, оператор `>=` {#function-greaterorequals}
+## greaterOrEquals, оператор `>=` {#greaterorequals}
diff --git a/docs/zh/engines/table-engines/mergetree-family/mergetree.md b/docs/zh/engines/table-engines/mergetree-family/mergetree.md
index 815d84702a9..c738ae0f24c 100644
--- a/docs/zh/engines/table-engines/mergetree-family/mergetree.md
+++ b/docs/zh/engines/table-engines/mergetree-family/mergetree.md
@@ -349,8 +349,8 @@ WHERE 子句中的条件可以包含对某列数据进行运算的函数表达
| 函数 (操作符) / 索引 | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter |
| ------------------------------------------------------------ | ----------- | ------ | ---------- | ---------- | ------------ |
-| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#function-equals) | ✔ | ✔ | ✔ | ✔ | ✔ |
-| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#function-notequals) | ✔ | ✔ | ✔ | ✔ | ✔ |
+| [equals (=, ==)](../../../sql-reference/functions/comparison-functions.md#equals) | ✔ | ✔ | ✔ | ✔ | ✔ |
+| [notEquals(!=, <>)](../../../sql-reference/functions/comparison-functions.md#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [like](../../../sql-reference/functions/string-search-functions.md#function-like) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notLike](../../../sql-reference/functions/string-search-functions.md#function-notlike) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [startsWith](../../../sql-reference/functions/string-functions.md#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ |
@@ -358,10 +358,10 @@ WHERE 子句中的条件可以包含对某列数据进行运算的函数表达
| [multiSearchAny](../../../sql-reference/functions/string-search-functions.md#function-multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ |
| [in](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notIn](../../../sql-reference/functions/in-functions.md#in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ |
-| [less (\<)](../../../sql-reference/functions/comparison-functions.md#function-less) | ✔ | ✔ | ✗ | ✗ | ✗ |
-| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#function-greater) | ✔ | ✔ | ✗ | ✗ | ✗ |
-| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#function-lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
-| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#function-greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
+| [less (\<)](../../../sql-reference/functions/comparison-functions.md#less) | ✔ | ✔ | ✗ | ✗ | ✗ |
+| [greater (\>)](../../../sql-reference/functions/comparison-functions.md#greater) | ✔ | ✔ | ✗ | ✗ | ✗ |
+| [lessOrEquals (\<=)](../../../sql-reference/functions/comparison-functions.md#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
+| [greaterOrEquals (\>=)](../../../sql-reference/functions/comparison-functions.md#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [empty](../../../sql-reference/functions/array-functions.md#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
| [notEmpty](../../../sql-reference/functions/array-functions.md#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ |
diff --git a/docs/zh/sql-reference/functions/comparison-functions.md b/docs/zh/sql-reference/functions/comparison-functions.md
index ef3adf427f1..ed33dc40531 100644
--- a/docs/zh/sql-reference/functions/comparison-functions.md
+++ b/docs/zh/sql-reference/functions/comparison-functions.md
@@ -21,14 +21,14 @@ sidebar_label: 比较函数
字符串按字节进行比较。较短的字符串小于以其开头并且至少包含一个字符的所有字符串。
-## 等于,a=b和a==b 运算符 {#equals-a-b-and-a-b-operator}
+## 等于,a=b和a==b 运算符 {#equals}
-## 不等于,a!=b和a<>b 运算符 {#notequals-a-operator-b-and-a-b}
+## 不等于,a!=b和a<>b 运算符 {#notequals}
-## 少, < 运算符 {#less-operator}
+## 少, < 运算符 {#less}
-## 大于, > 运算符 {#greater-operator}
+## 大于, > 运算符 {#greater}
-## 小于等于, <= 运算符 {#lessorequals-operator}
+## 小于等于, <= 运算符 {#lessorequals}
-## 大于等于, >= 运算符 {#greaterorequals-operator}
+## 大于等于, >= 运算符 {#greaterorequals}
diff --git a/packages/clickhouse-common-static.yaml b/packages/clickhouse-common-static.yaml
index 95532726d94..238126f95fd 100644
--- a/packages/clickhouse-common-static.yaml
+++ b/packages/clickhouse-common-static.yaml
@@ -44,6 +44,8 @@ contents:
dst: /usr/bin/clickhouse-odbc-bridge
- src: root/usr/share/bash-completion/completions
dst: /usr/share/bash-completion/completions
+- src: root/usr/share/clickhouse
+ dst: /usr/share/clickhouse
# docs
- src: ../AUTHORS
dst: /usr/share/doc/clickhouse-common-static/AUTHORS
diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt
index f17aff65fb5..b3a5af6d6c9 100644
--- a/programs/CMakeLists.txt
+++ b/programs/CMakeLists.txt
@@ -457,3 +457,10 @@ endif()
if (ENABLE_FUZZING)
add_compile_definitions(FUZZING_MODE=1)
endif ()
+
+if (TARGET ch_contrib::protobuf)
+ get_property(google_proto_files TARGET ch_contrib::protobuf PROPERTY google_proto_files)
+ foreach (proto_file IN LISTS google_proto_files)
+ install(FILES ${proto_file} DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/clickhouse/protos/google/protobuf)
+ endforeach()
+endif ()
diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp
index d2527ad0c98..3233e40de31 100644
--- a/programs/client/Client.cpp
+++ b/programs/client/Client.cpp
@@ -306,6 +306,10 @@ void Client::initialize(Poco::Util::Application & self)
/// Set path for format schema files
if (config().has("format_schema_path"))
global_context->setFormatSchemaPath(fs::weakly_canonical(config().getString("format_schema_path")));
+
+ /// Set the path for google proto files
+ if (config().has("google_protos_path"))
+ global_context->setGoogleProtosPath(fs::weakly_canonical(config().getString("google_protos_path")));
}
diff --git a/programs/client/clickhouse-client.xml b/programs/client/clickhouse-client.xml
index dbfb267d778..d0deb818c1e 100644
--- a/programs/client/clickhouse-client.xml
+++ b/programs/client/clickhouse-client.xml
@@ -37,7 +37,7 @@
{display_name} \e[1;31m:)\e[0m
-
+
+ /usr/share/clickhouse/protos/
setHTTPHeaderFilter(*config);
global_context->setMaxTableSizeToDrop(server_settings_.max_table_size_to_drop);
+ global_context->setClientHTTPHeaderForbiddenHeaders(server_settings_.get_client_http_header_forbidden_headers);
+ global_context->setAllowGetHTTPHeaderFunction(server_settings_.allow_get_client_http_header);
global_context->setMaxPartitionSizeToDrop(server_settings_.max_partition_size_to_drop);
ConcurrencyControl::SlotCount concurrent_threads_soft_limit = ConcurrencyControl::Unlimited;
@@ -1575,6 +1577,10 @@ try
global_context->setFormatSchemaPath(format_schema_path);
fs::create_directories(format_schema_path);
+ /// Set the path for google proto files
+ if (config().has("google_protos_path"))
+ global_context->setGoogleProtosPath(fs::weakly_canonical(config().getString("google_protos_path")));
+
/// Set path for filesystem caches
fs::path filesystem_caches_path(config().getString("filesystem_caches_path", ""));
if (!filesystem_caches_path.empty())
diff --git a/programs/server/config.d/path.xml b/programs/server/config.d/path.xml
index 46af5bfb64b..7afada689d7 100644
--- a/programs/server/config.d/path.xml
+++ b/programs/server/config.d/path.xml
@@ -3,6 +3,7 @@
./tmp/
./user_files/
./format_schemas/
+ ../../contrib/google-protobuf/src/
./access/
./top_level_domains/
diff --git a/programs/server/config.xml b/programs/server/config.xml
index f81fbe9cc3b..f367b97cec1 100644
--- a/programs/server/config.xml
+++ b/programs/server/config.xml
@@ -1428,6 +1428,10 @@
-->
/var/lib/clickhouse/format_schemas/
+
+ /usr/share/clickhouse/protos/
+
10
+
+
+ /usr/share/clickhouse/protos/
+
diff --git a/tests/config/config.d/forbidden_get_client_http_headers.xml b/tests/config/config.d/forbidden_get_client_http_headers.xml
new file mode 100644
index 00000000000..cfecb015260
--- /dev/null
+++ b/tests/config/config.d/forbidden_get_client_http_headers.xml
@@ -0,0 +1,4 @@
+
+ FORBIDDEN-KEY1,FORBIDDEN-KEY2
+ 1
+
diff --git a/tests/config/install.sh b/tests/config/install.sh
index 417a413bbec..96f35219bc6 100755
--- a/tests/config/install.sh
+++ b/tests/config/install.sh
@@ -15,6 +15,7 @@ mkdir -p $DEST_SERVER_PATH/config.d/
mkdir -p $DEST_SERVER_PATH/users.d/
mkdir -p $DEST_CLIENT_PATH
+ln -sf $SRC_PATH/config.d/forbidden_get_client_http_headers.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/zookeeper_write.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/listen.xml $DEST_SERVER_PATH/config.d/
ln -sf $SRC_PATH/config.d/text_log.xml $DEST_SERVER_PATH/config.d/
diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py
index cbc511628f0..5e4bb32cf94 100644
--- a/tests/integration/helpers/cluster.py
+++ b/tests/integration/helpers/cluster.py
@@ -4130,14 +4130,14 @@ class ClickHouseInstance:
[
"bash",
"-c",
- "echo 'ATTACH DATABASE system ENGINE=Ordinary' > /var/lib/clickhouse/metadata/system.sql",
+ "if [ ! -f /var/lib/clickhouse/metadata/system.sql ]; then echo 'ATTACH DATABASE system ENGINE=Ordinary' > /var/lib/clickhouse/metadata/system.sql; fi",
]
)
self.exec_in_container(
[
"bash",
"-c",
- "echo 'ATTACH DATABASE system ENGINE=Ordinary' > /var/lib/clickhouse/metadata/default.sql",
+ "if [ ! -f /var/lib/clickhouse/metadata/default.sql ]; then echo 'ATTACH DATABASE system ENGINE=Ordinary' > /var/lib/clickhouse/metadata/default.sql; fi",
]
)
self.exec_in_container(
diff --git a/tests/integration/helpers/corrupt_part_data_on_disk.py b/tests/integration/helpers/corrupt_part_data_on_disk.py
index e253ce23d83..a84a6e825e6 100644
--- a/tests/integration/helpers/corrupt_part_data_on_disk.py
+++ b/tests/integration/helpers/corrupt_part_data_on_disk.py
@@ -1,19 +1,21 @@
-def corrupt_part_data_on_disk(node, table, part_name):
+def corrupt_part_data_on_disk(node, table, part_name, file_ext=".bin", database=None):
part_path = node.query(
- "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format(
- table, part_name
+ "SELECT path FROM system.parts WHERE table = '{}' and name = '{}' {}".format(
+ table,
+ part_name,
+ f"AND database = '{database}'" if database is not None else "",
)
).strip()
- corrupt_part_data_by_path(node, part_path)
+ corrupt_part_data_by_path(node, part_path, file_ext)
-def corrupt_part_data_by_path(node, part_path):
+def corrupt_part_data_by_path(node, part_path, file_ext=".bin"):
print("Corrupting part", part_path, "at", node.name)
print(
"Will corrupt: ",
node.exec_in_container(
- ["bash", "-c", "cd {p} && ls *.bin | head -n 1".format(p=part_path)]
+ ["bash", "-c", f"cd {part_path} && ls *{file_ext} | head -n 1"]
),
)
@@ -21,9 +23,7 @@ def corrupt_part_data_by_path(node, part_path):
[
"bash",
"-c",
- "cd {p} && ls *.bin | head -n 1 | xargs -I{{}} sh -c 'echo \"1\" >> $1' -- {{}}".format(
- p=part_path
- ),
+ f"cd {part_path} && ls *{file_ext} | head -n 1 | xargs -I{{}} sh -c 'truncate -s -1 $1' -- {{}}",
],
privileged=True,
)
diff --git a/tests/integration/test_check_table/test.py b/tests/integration/test_check_table/test.py
index 70cadbc97e2..021977fb6b6 100644
--- a/tests/integration/test_check_table/test.py
+++ b/tests/integration/test_check_table/test.py
@@ -3,6 +3,7 @@ import pytest
import concurrent
from helpers.cluster import ClickHouseCluster
from helpers.client import QueryRuntimeException
+from helpers.corrupt_part_data_on_disk import corrupt_part_data_on_disk
cluster = ClickHouseCluster(__file__)
@@ -21,22 +22,6 @@ def started_cluster():
cluster.shutdown()
-def corrupt_data_part_on_disk(node, database, table, part_name):
- part_path = node.query(
- f"SELECT path FROM system.parts WHERE database = '{database}' AND table = '{table}' AND name = '{part_name}'"
- ).strip()
- node.exec_in_container(
- [
- "bash",
- "-c",
- "cd {p} && ls *.bin | head -n 1 | xargs -I{{}} sh -c 'echo \"1\" >> $1' -- {{}}".format(
- p=part_path
- ),
- ],
- privileged=True,
- )
-
-
def remove_checksums_on_disk(node, database, table, part_name):
part_path = node.query(
f"SELECT path FROM system.parts WHERE database = '{database}' AND table = '{table}' AND name = '{part_name}'"
@@ -59,14 +44,15 @@ def remove_part_from_disk(node, table, part_name):
)
-def test_check_normal_table_corruption(started_cluster):
+@pytest.mark.parametrize("merge_tree_settings", [""])
+def test_check_normal_table_corruption(started_cluster, merge_tree_settings):
node1.query("DROP TABLE IF EXISTS non_replicated_mt")
node1.query(
- """
+ f"""
CREATE TABLE non_replicated_mt(date Date, id UInt32, value Int32)
ENGINE = MergeTree() PARTITION BY toYYYYMM(date) ORDER BY id
- SETTINGS min_bytes_for_wide_part=0;
+ {merge_tree_settings};
"""
)
@@ -105,7 +91,9 @@ def test_check_normal_table_corruption(started_cluster):
assert node1.query("SELECT COUNT() FROM non_replicated_mt") == "2\n"
- corrupt_data_part_on_disk(node1, "default", "non_replicated_mt", "201902_1_1_0")
+ corrupt_part_data_on_disk(
+ node1, "non_replicated_mt", "201902_1_1_0", database="default"
+ )
assert node1.query(
"CHECK TABLE non_replicated_mt",
@@ -129,7 +117,9 @@ def test_check_normal_table_corruption(started_cluster):
== "201901_2_2_0\t1\t\n"
)
- corrupt_data_part_on_disk(node1, "default", "non_replicated_mt", "201901_2_2_0")
+ corrupt_part_data_on_disk(
+ node1, "non_replicated_mt", "201901_2_2_0", database="default"
+ )
remove_checksums_on_disk(node1, "default", "non_replicated_mt", "201901_2_2_0")
@@ -139,16 +129,23 @@ def test_check_normal_table_corruption(started_cluster):
).strip().split("\t")[0:2] == ["201901_2_2_0", "0"]
-def test_check_replicated_table_simple(started_cluster):
+@pytest.mark.parametrize("merge_tree_settings, zk_path_suffix", [("", "_0")])
+def test_check_replicated_table_simple(
+ started_cluster, merge_tree_settings, zk_path_suffix
+):
for node in [node1, node2]:
- node.query("DROP TABLE IF EXISTS replicated_mt")
+ node.query("DROP TABLE IF EXISTS replicated_mt SYNC")
node.query(
"""
CREATE TABLE replicated_mt(date Date, id UInt32, value Int32)
- ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt', '{replica}') PARTITION BY toYYYYMM(date) ORDER BY id;
+ ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt_{zk_path_suffix}', '{replica}')
+ PARTITION BY toYYYYMM(date) ORDER BY id
+ {merge_tree_settings}
""".format(
- replica=node.name
+ replica=node.name,
+ zk_path_suffix=zk_path_suffix,
+ merge_tree_settings=merge_tree_settings,
)
)
@@ -220,16 +217,32 @@ def test_check_replicated_table_simple(started_cluster):
)
-def test_check_replicated_table_corruption(started_cluster):
+@pytest.mark.parametrize(
+ "merge_tree_settings, zk_path_suffix, part_file_ext",
+ [
+ (
+ "",
+ "_0",
+ ".bin",
+ )
+ ],
+)
+def test_check_replicated_table_corruption(
+ started_cluster, merge_tree_settings, zk_path_suffix, part_file_ext
+):
for node in [node1, node2]:
- node.query_with_retry("DROP TABLE IF EXISTS replicated_mt_1")
+ node.query_with_retry("DROP TABLE IF EXISTS replicated_mt_1 SYNC")
node.query_with_retry(
"""
CREATE TABLE replicated_mt_1(date Date, id UInt32, value Int32)
- ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt_1', '{replica}') PARTITION BY toYYYYMM(date) ORDER BY id;
+ ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt_1_{zk_path_suffix}', '{replica}')
+ PARTITION BY toYYYYMM(date) ORDER BY id
+ {merge_tree_settings}
""".format(
- replica=node.name
+ replica=node.name,
+ merge_tree_settings=merge_tree_settings,
+ zk_path_suffix=zk_path_suffix,
)
)
@@ -248,7 +261,10 @@ def test_check_replicated_table_corruption(started_cluster):
"SELECT name from system.parts where table = 'replicated_mt_1' and partition_id = '201901' and active = 1"
).strip()
- corrupt_data_part_on_disk(node1, "default", "replicated_mt_1", part_name)
+ corrupt_part_data_on_disk(
+ node1, "replicated_mt_1", part_name, part_file_ext, database="default"
+ )
+
assert node1.query(
"CHECK TABLE replicated_mt_1 PARTITION 201901",
settings={"check_query_single_value_result": 0, "max_threads": 1},
diff --git a/tests/integration/test_distributed_storage_configuration/configs/config.d/overrides.xml b/tests/integration/test_distributed_storage_configuration/configs/config.d/overrides.xml
index 91a22a81a22..e1e2444992a 100644
--- a/tests/integration/test_distributed_storage_configuration/configs/config.d/overrides.xml
+++ b/tests/integration/test_distributed_storage_configuration/configs/config.d/overrides.xml
@@ -27,14 +27,14 @@
-
+
disk1
disk2
-
+
diff --git a/tests/integration/test_distributed_storage_configuration/test.py b/tests/integration/test_distributed_storage_configuration/test.py
index b0e17da37b2..00620668bd9 100644
--- a/tests/integration/test_distributed_storage_configuration/test.py
+++ b/tests/integration/test_distributed_storage_configuration/test.py
@@ -53,7 +53,7 @@ def test_insert(start_cluster):
test,
foo,
key%2,
- 'default'
+ 'jbod_policy'
)
"""
)
diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py
index c0f1f7a4a32..9216b08f942 100644
--- a/tests/integration/test_merge_tree_s3/test.py
+++ b/tests/integration/test_merge_tree_s3/test.py
@@ -928,6 +928,10 @@ def test_merge_canceled_by_s3_errors_when_move(cluster, broken_s3, node_name):
def test_s3_engine_heavy_write_check_mem(
cluster, broken_s3, node_name, in_flight_memory
):
+ pytest.skip(
+ "Disabled, will be fixed after https://github.com/ClickHouse/ClickHouse/issues/51152"
+ )
+
in_flight = in_flight_memory[0]
memory = in_flight_memory[1]
@@ -947,12 +951,18 @@ def test_s3_engine_heavy_write_check_mem(
)
broken_s3.setup_fake_multpartuploads()
- broken_s3.setup_slow_answers(10 * 1024 * 1024, timeout=15, count=10)
+ slow_responces = 10
+ slow_timeout = 15
+ broken_s3.setup_slow_answers(
+ 10 * 1024 * 1024, timeout=slow_timeout, count=slow_responces
+ )
query_id = f"INSERT_INTO_S3_ENGINE_QUERY_ID_{in_flight}"
node.query(
"INSERT INTO s3_test SELECT number, toString(number) FROM numbers(50000000)"
- f" SETTINGS max_memory_usage={2*memory}"
+ f" SETTINGS "
+ f" max_memory_usage={2*memory}"
+ f", max_threads=1" # ParallelFormattingOutputFormat consumption depends on it
f", s3_max_inflight_parts_for_one_file={in_flight}",
query_id=query_id,
)
@@ -969,7 +979,8 @@ def test_s3_engine_heavy_write_check_mem(
assert int(memory_usage) < 1.2 * memory
assert int(memory_usage) > 0.8 * memory
- assert int(wait_inflight) > in_flight * 1000 * 1000
+ # The more in_flight value is the less time CH waits.
+ assert int(wait_inflight) / 1000 / 1000 > slow_responces * slow_timeout / in_flight
check_no_objects_after_drop(cluster, node_name=node_name)
diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py
index f45841124d9..a591c93d264 100644
--- a/tests/integration/test_replicated_database/test.py
+++ b/tests/integration/test_replicated_database/test.py
@@ -1351,3 +1351,48 @@ def test_replicated_table_structure_alter(started_cluster):
assert "1\t2\t3\t0\n1\t2\t3\t4\n" == dummy_node.query(
"SELECT * FROM table_structure.rmt ORDER BY k"
)
+
+
+def test_modify_comment(started_cluster):
+ main_node.query(
+ "CREATE DATABASE modify_comment_db ENGINE = Replicated('/test/modify_comment', 'shard1', 'replica' || '1');"
+ )
+
+ dummy_node.query(
+ "CREATE DATABASE modify_comment_db ENGINE = Replicated('/test/modify_comment', 'shard1', 'replica' || '2');"
+ )
+
+ main_node.query(
+ "CREATE TABLE modify_comment_db.modify_comment_table (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree ORDER BY k PARTITION BY toYYYYMM(d);"
+ )
+
+ def restart_verify_not_readonly():
+ main_node.restart_clickhouse()
+ assert (
+ main_node.query(
+ "SELECT is_readonly FROM system.replicas WHERE table = 'modify_comment_table'"
+ )
+ == "0\n"
+ )
+ dummy_node.restart_clickhouse()
+ assert (
+ dummy_node.query(
+ "SELECT is_readonly FROM system.replicas WHERE table = 'modify_comment_table'"
+ )
+ == "0\n"
+ )
+
+ main_node.query(
+ "ALTER TABLE modify_comment_db.modify_comment_table COMMENT COLUMN d 'Some comment'"
+ )
+
+ restart_verify_not_readonly()
+
+ main_node.query(
+ "ALTER TABLE modify_comment_db.modify_comment_table MODIFY COMMENT 'Some error comment'"
+ )
+
+ restart_verify_not_readonly()
+
+ main_node.query("DROP DATABASE modify_comment_db SYNC")
+ dummy_node.query("DROP DATABASE modify_comment_db SYNC")
diff --git a/tests/integration/test_replicated_merge_tree_encryption_codec/test.py b/tests/integration/test_replicated_merge_tree_encryption_codec/test.py
index d2dbc7c5466..c8b63f9502a 100644
--- a/tests/integration/test_replicated_merge_tree_encryption_codec/test.py
+++ b/tests/integration/test_replicated_merge_tree_encryption_codec/test.py
@@ -91,9 +91,14 @@ def test_different_keys():
copy_keys(node2, "key_b")
create_table()
- insert_data()
+ # Insert two blocks without duplicated blocks to force each replica to actually fetch parts from another replica.
+ node1.query("INSERT INTO tbl VALUES (1, 'str1')")
+ node2.query("INSERT INTO tbl VALUES (2, 'str2')")
node1.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
+ # After "SYSTEM SYNC REPLICA" we expect node1 and node2 here both having a part for (1, 'str1') encrypted with "key_a",
+ # and a part for (2, 'str2') encrypted with "key_b".
+ # So the command "SELECT * from tbl" must fail on both nodes because each node has only one encryption key.
assert "BAD_DECRYPT" in node1.query_and_get_error("SELECT * FROM tbl")
assert "BAD_DECRYPT" in node2.query_and_get_error("SELECT * FROM tbl")
diff --git a/tests/integration/test_storage_azure_blob_storage/test.py b/tests/integration/test_storage_azure_blob_storage/test.py
index e0365f70e7f..96fff6b891f 100644
--- a/tests/integration/test_storage_azure_blob_storage/test.py
+++ b/tests/integration/test_storage_azure_blob_storage/test.py
@@ -1156,3 +1156,37 @@ def test_filtering_by_file_or_path(cluster):
)
assert int(result) == 1
+
+
+def test_size_virtual_column(cluster):
+ node = cluster.instances["node"]
+ storage_account_url = cluster.env_variables["AZURITE_STORAGE_ACCOUNT_URL"]
+ azure_query(
+ node,
+ f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}','cont', 'test_size_virtual_column1.tsv', 'devstoreaccount1', "
+ f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 1",
+ )
+
+ azure_query(
+ node,
+ f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}','cont', 'test_size_virtual_column2.tsv', 'devstoreaccount1', "
+ f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 11",
+ )
+
+ azure_query(
+ node,
+ f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_size_virtual_column3.tsv', 'devstoreaccount1', "
+ f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 111",
+ )
+
+ result = azure_query(
+ node,
+ f"select _file, _size from azureBlobStorage('{storage_account_url}', 'cont', 'test_size_virtual_column*.tsv', 'devstoreaccount1', "
+ f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') "
+ f"order by _file",
+ )
+
+ assert (
+ result
+ == "test_size_virtual_column1.tsv\t2\ntest_size_virtual_column2.tsv\t3\ntest_size_virtual_column3.tsv\t4\n"
+ )
diff --git a/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql b/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql
index 965ce45fb90..422f4a010f1 100644
--- a/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql
+++ b/tests/queries/0_stateless/00184_shard_distributed_group_by_no_merge.sql
@@ -8,8 +8,6 @@ SELECT count(), uniq(dummy) FROM remote('127.0.0.{2,3}', system.one) LIMIT 1 SET
SELECT 'distributed_group_by_no_merge=2';
SET max_distributed_connections=1;
SET max_threads=1;
--- breaks any(_shard_num)
-SET optimize_move_functions_out_of_any=0;
SELECT 'LIMIT';
SELECT * FROM (SELECT any(_shard_num) shard_num, count(), uniq(dummy) FROM remote('127.0.0.{2,3}', system.one)) ORDER BY shard_num LIMIT 1 SETTINGS distributed_group_by_no_merge=2;
diff --git a/tests/queries/0_stateless/00718_format_datetime.reference b/tests/queries/0_stateless/00718_format_datetime.reference
index 50874ac9b2e..f22c953e739 100644
--- a/tests/queries/0_stateless/00718_format_datetime.reference
+++ b/tests/queries/0_stateless/00718_format_datetime.reference
@@ -64,3 +64,13 @@ no formatting pattern no formatting pattern
2022-12-08 18:11:29.000000
2022-12-08 00:00:00.000000
2022-12-08 00:00:00.000000
+01
+01
+02
+02
+02
+1
+01
+2
+2
+02
diff --git a/tests/queries/0_stateless/00718_format_datetime.sql b/tests/queries/0_stateless/00718_format_datetime.sql
index c0db6a4f64e..4f2ce70965b 100644
--- a/tests/queries/0_stateless/00718_format_datetime.sql
+++ b/tests/queries/0_stateless/00718_format_datetime.sql
@@ -90,3 +90,15 @@ select formatDateTime(toDateTime64('2022-12-08 18:11:29.1234', 0, 'UTC'), '%F %T
select formatDateTime(toDateTime('2022-12-08 18:11:29', 'UTC'), '%F %T.%f');
select formatDateTime(toDate32('2022-12-08 18:11:29', 'UTC'), '%F %T.%f');
select formatDateTime(toDate('2022-12-08 18:11:29', 'UTC'), '%F %T.%f');
+
+-- %c %k %l with different formatdatetime_format_without_leading_zeros
+select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%c') settings formatdatetime_format_without_leading_zeros = 0;
+select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%m') settings formatdatetime_format_without_leading_zeros = 0;
+select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%k') settings formatdatetime_format_without_leading_zeros = 0;
+select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%l') settings formatdatetime_format_without_leading_zeros = 0;
+select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%h') settings formatdatetime_format_without_leading_zeros = 0;
+select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%c') settings formatdatetime_format_without_leading_zeros = 1;
+select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%m') settings formatdatetime_format_without_leading_zeros = 1;
+select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%k') settings formatdatetime_format_without_leading_zeros = 1;
+select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%l') settings formatdatetime_format_without_leading_zeros = 1;
+select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%h') settings formatdatetime_format_without_leading_zeros = 1;
diff --git a/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh b/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh
index f61a60a0bda..8ebe1807a1b 100755
--- a/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh
+++ b/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh
@@ -69,7 +69,7 @@ function alter_table()
if [ -z "$table" ]; then continue; fi
$CLICKHOUSE_CLIENT --distributed_ddl_task_timeout=0 -q \
"alter table $table update n = n + (select max(n) from merge(REGEXP('${CLICKHOUSE_DATABASE}.*'), '.*')) where 1 settings allow_nondeterministic_mutations=1" \
- 2>&1| grep -Fa "Exception: " | grep -Fv "Cannot enqueue query" | grep -Fv "ZooKeeper session expired" | grep -Fv UNKNOWN_DATABASE | grep -Fv UNKNOWN_TABLE | grep -Fv TABLE_IS_READ_ONLY | grep -Fv TABLE_IS_DROPPED | grep -Fv "Error while executing table function merge"
+ 2>&1| grep -Fa "Exception: " | grep -Fv "Cannot enqueue query" | grep -Fv "ZooKeeper session expired" | grep -Fv UNKNOWN_DATABASE | grep -Fv UNKNOWN_TABLE | grep -Fv TABLE_IS_READ_ONLY | grep -Fv TABLE_IS_DROPPED | grep -Fv ABORTED | grep -Fv "Error while executing table function merge"
sleep 0.$RANDOM
done
}
diff --git a/tests/queries/0_stateless/01280_ttl_where_group_by.reference b/tests/queries/0_stateless/01280_ttl_where_group_by.reference
deleted file mode 100644
index 65e7e5b158f..00000000000
--- a/tests/queries/0_stateless/01280_ttl_where_group_by.reference
+++ /dev/null
@@ -1,26 +0,0 @@
-ttl_01280_1
-1 1 0 4
-1 2 3 7
-1 3 0 5
-2 1 0 1
-2 1 20 1
-ttl_01280_2
-1 1 [0,2,3] 4
-1 1 [5,4,1] 13
-1 3 [1,0,1,0] 17
-2 1 [3,1,0,3] 8
-3 1 [2,4,5] 8
-ttl_01280_3
-1 1 0 4
-1 1 10 6
-2 1 0 3
-3 1 8 2
-ttl_01280_4
-0 4
-13 9
-ttl_01280_5
-1 2 7 5
-2 3 6 5
-ttl_01280_6
-1 3 5
-2 3 5
diff --git a/tests/queries/0_stateless/01280_ttl_where_group_by.sh b/tests/queries/0_stateless/01280_ttl_where_group_by.sh
deleted file mode 100755
index e6f83d6edd1..00000000000
--- a/tests/queries/0_stateless/01280_ttl_where_group_by.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env bash
-# Tags: no-parallel, no-fasttest
-
-CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
-# shellcheck source=../shell_config.sh
-. "$CURDIR"/../shell_config.sh
-
-$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_1"
-
-function optimize()
-{
- for _ in {0..20}; do
- $CLICKHOUSE_CLIENT --query "OPTIMIZE TABLE $1 FINAL SETTINGS optimize_throw_if_noop=1" 2>/dev/null && break
- sleep 0.3
- done
-}
-
-# "SETTINGS max_parts_to_merge_at_once = 1" prevents merges to start before our own OPTIMIZE FINAL
-
-echo "ttl_01280_1"
-$CLICKHOUSE_CLIENT -n --query "
-create table ttl_01280_1 (a Int, b Int, x Int, y Int, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second delete where x % 10 == 0 and y > 5 SETTINGS max_parts_to_merge_at_once = 1;
-insert into ttl_01280_1 values (1, 1, 0, 4, now() + 10);
-insert into ttl_01280_1 values (1, 1, 10, 6, now());
-insert into ttl_01280_1 values (1, 2, 3, 7, now());
-insert into ttl_01280_1 values (1, 3, 0, 5, now());
-insert into ttl_01280_1 values (2, 1, 20, 1, now());
-insert into ttl_01280_1 values (2, 1, 0, 1, now());
-insert into ttl_01280_1 values (3, 1, 0, 8, now());"
-
-sleep 2
-optimize "ttl_01280_1"
-$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_1 ORDER BY a, b, x, y"
-
-$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_2"
-
-echo "ttl_01280_2"
-$CLICKHOUSE_CLIENT -n --query "
-create table ttl_01280_2 (a Int, b Int, x Array(Int32), y Double, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a, b set x = minForEach(x), y = sum(y), d = max(d) SETTINGS max_parts_to_merge_at_once = 1;
-insert into ttl_01280_2 values (1, 1, array(0, 2, 3), 4, now() + 10);
-insert into ttl_01280_2 values (1, 1, array(5, 4, 3), 6, now());
-insert into ttl_01280_2 values (1, 1, array(5, 5, 1), 7, now());
-insert into ttl_01280_2 values (1, 3, array(3, 0, 4), 5, now());
-insert into ttl_01280_2 values (1, 3, array(1, 1, 2, 1), 9, now());
-insert into ttl_01280_2 values (1, 3, array(3, 2, 1, 0), 3, now());
-insert into ttl_01280_2 values (2, 1, array(3, 3, 3), 7, now());
-insert into ttl_01280_2 values (2, 1, array(11, 1, 0, 3), 1, now());
-insert into ttl_01280_2 values (3, 1, array(2, 4, 5), 8, now());"
-
-sleep 2
-optimize "ttl_01280_2"
-$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_2 ORDER BY a, b, x, y"
-
-$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_3"
-
-echo "ttl_01280_3"
-$CLICKHOUSE_CLIENT -n --query "
-create table ttl_01280_3 (a Int, b Int, x Int64, y Int, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a set b = min(b), x = argMax(x, d), y = argMax(y, d), d = max(d) SETTINGS max_parts_to_merge_at_once = 1;
-insert into ttl_01280_3 values (1, 1, 0, 4, now() + 10);
-insert into ttl_01280_3 values (1, 1, 10, 6, now() + 1);
-insert into ttl_01280_3 values (1, 2, 3, 7, now());
-insert into ttl_01280_3 values (1, 3, 0, 5, now());
-insert into ttl_01280_3 values (2, 1, 20, 1, now());
-insert into ttl_01280_3 values (2, 1, 0, 3, now() + 1);
-insert into ttl_01280_3 values (3, 1, 0, 3, now());
-insert into ttl_01280_3 values (3, 2, 8, 2, now() + 1);
-insert into ttl_01280_3 values (3, 5, 5, 8, now());"
-
-sleep 2
-optimize "ttl_01280_3"
-$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_3 ORDER BY a, b, x, y"
-
-$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_4"
-
-echo "ttl_01280_4"
-$CLICKHOUSE_CLIENT -n --query "
-create table ttl_01280_4 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), -(a + b)) ttl d + interval 1 second group by toDate(d) set x = sum(x), y = max(y) SETTINGS max_parts_to_merge_at_once = 1;
-insert into ttl_01280_4 values (1, 1, 0, 4, now() + 10);
-insert into ttl_01280_4 values (10, 2, 3, 3, now());
-insert into ttl_01280_4 values (2, 10, 1, 7, now());
-insert into ttl_01280_4 values (3, 3, 5, 2, now());
-insert into ttl_01280_4 values (1, 5, 4, 9, now())"
-
-sleep 2
-optimize "ttl_01280_4"
-$CLICKHOUSE_CLIENT --query "select x, y from ttl_01280_4 ORDER BY a, b, x, y"
-
-$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_5"
-
-echo "ttl_01280_5"
-$CLICKHOUSE_CLIENT -n --query "create table ttl_01280_5 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a, -b) ttl d + interval 1 second group by toDate(d), a set x = sum(x), b = argMax(b, -b) SETTINGS max_parts_to_merge_at_once = 1;
-insert into ttl_01280_5 values (1, 2, 3, 5, now());
-insert into ttl_01280_5 values (2, 10, 1, 5, now());
-insert into ttl_01280_5 values (2, 3, 5, 5, now());
-insert into ttl_01280_5 values (1, 5, 4, 5, now());"
-
-sleep 2
-optimize "ttl_01280_5"
-$CLICKHOUSE_CLIENT --query "select a, b, x, y from ttl_01280_5 ORDER BY a, b, x, y"
-
-$CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_6"
-
-echo "ttl_01280_6"
-$CLICKHOUSE_CLIENT -n --query "
-create table ttl_01280_6 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a, -b) ttl d + interval 1 second group by toDate(d), a SETTINGS max_parts_to_merge_at_once = 1;
-insert into ttl_01280_6 values (1, 2, 3, 5, now());
-insert into ttl_01280_6 values (2, 10, 3, 5, now());
-insert into ttl_01280_6 values (2, 3, 3, 5, now());
-insert into ttl_01280_6 values (1, 5, 3, 5, now())"
-
-sleep 2
-optimize "ttl_01280_6"
-$CLICKHOUSE_CLIENT --query "select a, x, y from ttl_01280_6 ORDER BY a, b, x, y"
-
-$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_1"
-$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_2"
-$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_3"
-$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_4"
-$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_5"
-$CLICKHOUSE_CLIENT -q "DROP TABLE ttl_01280_6"
diff --git a/tests/queries/0_stateless/01321_aggregate_functions_of_group_by_keys.sql b/tests/queries/0_stateless/01321_aggregate_functions_of_group_by_keys.sql
index 2937e856bf5..3f08936e636 100644
--- a/tests/queries/0_stateless/01321_aggregate_functions_of_group_by_keys.sql
+++ b/tests/queries/0_stateless/01321_aggregate_functions_of_group_by_keys.sql
@@ -1,5 +1,4 @@
set optimize_aggregators_of_group_by_keys = 1;
-set optimize_move_functions_out_of_any = 0;
SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b;
SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b;
diff --git a/tests/queries/0_stateless/01322_any_input_optimize.reference b/tests/queries/0_stateless/01322_any_input_optimize.reference
deleted file mode 100644
index f88f2f5937c..00000000000
--- a/tests/queries/0_stateless/01322_any_input_optimize.reference
+++ /dev/null
@@ -1,32 +0,0 @@
-SELECT any(number) + (any(number) * 2)
-FROM numbers(1, 2)
-3
-SELECT anyLast(number) + (anyLast(number) * 2)
-FROM numbers(1, 2)
-6
-WITH any(number) * 3 AS x
-SELECT x
-FROM numbers(1, 2)
-3
-SELECT
- anyLast(number) * 3 AS x,
- x
-FROM numbers(1, 2)
-6 6
-SELECT any(number + (number * 2))
-FROM numbers(1, 2)
-3
-SELECT anyLast(number + (number * 2))
-FROM numbers(1, 2)
-6
-WITH any(number * 3) AS x
-SELECT x
-FROM numbers(1, 2)
-3
-SELECT
- anyLast(number * 3) AS x,
- x
-FROM numbers(1, 2)
-6 6
-arrayJoin
-0 []
diff --git a/tests/queries/0_stateless/01322_any_input_optimize.sql b/tests/queries/0_stateless/01322_any_input_optimize.sql
deleted file mode 100644
index 4c3345f4be4..00000000000
--- a/tests/queries/0_stateless/01322_any_input_optimize.sql
+++ /dev/null
@@ -1,34 +0,0 @@
-SET optimize_move_functions_out_of_any = 1;
-
-EXPLAIN SYNTAX SELECT any(number + number * 2) FROM numbers(1, 2);
-SELECT any(number + number * 2) FROM numbers(1, 2);
-
-EXPLAIN SYNTAX SELECT anyLast(number + number * 2) FROM numbers(1, 2);
-SELECT anyLast(number + number * 2) FROM numbers(1, 2);
-
-EXPLAIN SYNTAX WITH any(number * 3) AS x SELECT x FROM numbers(1, 2);
-WITH any(number * 3) AS x SELECT x FROM numbers(1, 2);
-
-EXPLAIN SYNTAX SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2);
-SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2);
-
-SELECT any(anyLast(number)) FROM numbers(1); -- { serverError 184 }
-
-SET optimize_move_functions_out_of_any = 0;
-
-EXPLAIN SYNTAX SELECT any(number + number * 2) FROM numbers(1, 2);
-SELECT any(number + number * 2) FROM numbers(1, 2);
-
-EXPLAIN SYNTAX SELECT anyLast(number + number * 2) FROM numbers(1, 2);
-SELECT anyLast(number + number * 2) FROM numbers(1, 2);
-
-EXPLAIN SYNTAX WITH any(number * 3) AS x SELECT x FROM numbers(1, 2);
-WITH any(number * 3) AS x SELECT x FROM numbers(1, 2);
-
-EXPLAIN SYNTAX SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2);
-SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2);
-
-SELECT any(anyLast(number)) FROM numbers(1); -- { serverError 184 }
-
-SELECT 'arrayJoin';
-SELECT *, any(arrayJoin([[], []])) FROM numbers(1) GROUP BY number;
diff --git a/tests/queries/0_stateless/01398_any_with_alias.reference b/tests/queries/0_stateless/01398_any_with_alias.reference
deleted file mode 100644
index 4f8e72ef29c..00000000000
--- a/tests/queries/0_stateless/01398_any_with_alias.reference
+++ /dev/null
@@ -1,8 +0,0 @@
-"n"
-0
-SELECT any(number) * any(number) AS n
-FROM numbers(100)
-"n"
-0,0
-SELECT (any(number), any(number) * 2) AS n
-FROM numbers(100)
diff --git a/tests/queries/0_stateless/01398_any_with_alias.sql b/tests/queries/0_stateless/01398_any_with_alias.sql
deleted file mode 100644
index a65b8132c67..00000000000
--- a/tests/queries/0_stateless/01398_any_with_alias.sql
+++ /dev/null
@@ -1,7 +0,0 @@
-SET optimize_move_functions_out_of_any = 1;
-
-SELECT any(number * number) AS n FROM numbers(100) FORMAT CSVWithNames;
-EXPLAIN SYNTAX SELECT any(number * number) AS n FROM numbers(100);
-
-SELECT any((number, number * 2)) as n FROM numbers(100) FORMAT CSVWithNames;
-EXPLAIN SYNTAX SELECT any((number, number * 2)) as n FROM numbers(100);
diff --git a/tests/queries/0_stateless/01414_optimize_any_bug.sql b/tests/queries/0_stateless/01414_optimize_any_bug.sql
deleted file mode 100644
index ec24a09fc11..00000000000
--- a/tests/queries/0_stateless/01414_optimize_any_bug.sql
+++ /dev/null
@@ -1,19 +0,0 @@
-DROP TABLE IF EXISTS test;
-
-CREATE TABLE test
-(
- `Source.C1` Array(UInt64),
- `Source.C2` Array(UInt64)
-)
-ENGINE = MergeTree()
-ORDER BY tuple();
-
-SET enable_positional_arguments=0;
-SET optimize_move_functions_out_of_any = 1;
-
-SELECT any(arrayFilter((c, d) -> (4 = d), `Source.C1`, `Source.C2`)[1]) AS x
-FROM test
-WHERE 0
-GROUP BY 42;
-
-DROP TABLE test;
diff --git a/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.reference b/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.reference
index 8c76b239991..1fb8df14afc 100644
--- a/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.reference
+++ b/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.reference
@@ -1,10 +1,8 @@
1
1
-1
other
google
1
-1
2
other
other
diff --git a/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.sql b/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.sql
index 1e1d87a5ad5..91044859c1c 100644
--- a/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.sql
+++ b/tests/queries/0_stateless/01456_ast_optimizations_over_distributed.sql
@@ -1,11 +1,9 @@
-- Tags: distributed
-SET optimize_move_functions_out_of_any = 1;
SET optimize_injective_functions_inside_uniq = 1;
SET optimize_arithmetic_operations_in_aggregate_functions = 1;
SET optimize_if_transform_strings_to_enum = 1;
-SELECT any(number + 1) FROM numbers(1);
SELECT uniq(bitNot(number)) FROM numbers(1);
SELECT sum(number + 1) FROM numbers(1);
SELECT transform(number, [1, 2], ['google', 'censor.net'], 'other') FROM numbers(1);
@@ -20,7 +18,6 @@ CREATE TABLE dist AS local_table ENGINE = Distributed(test_cluster_two_shards_lo
INSERT INTO local_table SELECT number FROM numbers(1);
-SELECT any(number + 1) FROM dist;
SELECT uniq(bitNot(number)) FROM dist;
SELECT sum(number + 1) FROM dist;
SELECT transform(number, [1, 2], ['google', 'censor.net'], 'other') FROM dist;
diff --git a/tests/queries/0_stateless/01650_any_null_if.reference b/tests/queries/0_stateless/01650_any_null_if.reference
deleted file mode 100644
index e965047ad7c..00000000000
--- a/tests/queries/0_stateless/01650_any_null_if.reference
+++ /dev/null
@@ -1 +0,0 @@
-Hello
diff --git a/tests/queries/0_stateless/01650_any_null_if.sql b/tests/queries/0_stateless/01650_any_null_if.sql
deleted file mode 100644
index 17f57e92032..00000000000
--- a/tests/queries/0_stateless/01650_any_null_if.sql
+++ /dev/null
@@ -1,6 +0,0 @@
-SELECT any(nullIf(s, '')) FROM (SELECT arrayJoin(['', 'Hello']) AS s);
-
-SET optimize_move_functions_out_of_any = 0;
-EXPLAIN SYNTAX select any(nullIf('', ''), 'some text'); -- { serverError 42 }
-SET optimize_move_functions_out_of_any = 1;
-EXPLAIN SYNTAX select any(nullIf('', ''), 'some text'); -- { serverError 42 }
diff --git a/tests/queries/0_stateless/01414_optimize_any_bug.reference b/tests/queries/0_stateless/01710_normal_projection_join_plan_fix.reference
similarity index 100%
rename from tests/queries/0_stateless/01414_optimize_any_bug.reference
rename to tests/queries/0_stateless/01710_normal_projection_join_plan_fix.reference
diff --git a/tests/queries/0_stateless/01710_normal_projection_join_plan_fix.sql b/tests/queries/0_stateless/01710_normal_projection_join_plan_fix.sql
new file mode 100644
index 00000000000..40847a301c2
--- /dev/null
+++ b/tests/queries/0_stateless/01710_normal_projection_join_plan_fix.sql
@@ -0,0 +1,13 @@
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+
+CREATE TABLE t1 (id UInt32, s String) Engine = MergeTree ORDER BY id;
+CREATE TABLE t2 (id1 UInt32, id2 UInt32) Engine = MergeTree ORDER BY id1 SETTINGS index_granularity = 1;
+INSERT INTO t2 SELECT number, number from numbers(100);
+ALTER TABLE t2 ADD PROJECTION proj (SELECT id2 ORDER BY id2);
+INSERT INTO t2 SELECT number, number from numbers(100);
+
+SELECT s FROM t1 as lhs LEFT JOIN (SELECT * FROM t2 WHERE id2 = 2) as rhs ON lhs.id = rhs.id2;
+
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference
index 9016e731106..6adb2382a6f 100644
--- a/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference
+++ b/tests/queries/0_stateless/01952_optimize_distributed_group_by_sharding_key.reference
@@ -19,10 +19,8 @@ explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2
Expression (Projection)
LimitBy
Union
- Expression (Before LIMIT BY)
- LimitBy
- Expression ((Before LIMIT BY + (Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))))
- ReadFromStorage (SystemNumbers)
+ Expression ((Before LIMIT BY + (Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY))))))
+ ReadFromStorage (SystemNumbers)
Expression
ReadFromRemote (Read from remote replica)
explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized
@@ -58,11 +56,10 @@ Expression (Projection)
Expression (Before LIMIT BY)
Sorting (Merge sorted streams for ORDER BY, without aggregation)
Union
- LimitBy
- Expression ((Before LIMIT BY + (Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) [lifted up part]))
- Sorting (Sorting for ORDER BY)
- Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))))
- ReadFromStorage (SystemNumbers)
+ Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))) [lifted up part])
+ Sorting (Sorting for ORDER BY)
+ Expression ((Before ORDER BY + (Convert VIEW subquery result to VIEW table structure + (Materialize constants after VIEW subquery + (Projection + Before ORDER BY)))))
+ ReadFromStorage (SystemNumbers)
ReadFromRemote (Read from remote replica)
explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized
Expression (Projection)
diff --git a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.reference b/tests/queries/0_stateless/02052_last_granula_adjust_logical_error.reference
similarity index 100%
rename from tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.reference
rename to tests/queries/0_stateless/02052_last_granula_adjust_logical_error.reference
diff --git a/tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2 b/tests/queries/0_stateless/02052_last_granula_adjust_logical_error.sql.j2
similarity index 100%
rename from tests/queries/0_stateless/02052_last_granula_adjust_LOGICAL_ERROR.sql.j2
rename to tests/queries/0_stateless/02052_last_granula_adjust_logical_error.sql.j2
diff --git a/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh b/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh
index 9654d3146e2..ae2a2351c6b 100755
--- a/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh
+++ b/tests/queries/0_stateless/02266_protobuf_format_google_wrappers.sh
@@ -90,7 +90,7 @@ hexdump -C $BINARY_FILE_PATH
echo
echo "Decoded with protoc:"
-(cd $SCHEMADIR && $PROTOC_BINARY --decode Message "$PROTOBUF_FILE_NAME".proto) < $BINARY_FILE_PATH
+(cd $SCHEMADIR && $PROTOC_BINARY --proto_path=. --proto_path=/usr/share/clickhouse/protos --decode Message "$PROTOBUF_FILE_NAME".proto) < $BINARY_FILE_PATH
echo
echo "Proto message with wrapper for (NULL, 1), ('', 2), ('str', 3):"
diff --git a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference
index 379eea4dbbb..7bb0b965fbc 100644
--- a/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference
+++ b/tests/queries/0_stateless/02415_all_new_functions_must_be_documented.reference
@@ -320,6 +320,7 @@ geoDistance
geohashDecode
geohashEncode
geohashesInBox
+getClientHTTPHeader
getMacro
getOSKernelVersion
getServerPort
diff --git a/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.reference b/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.reference
index 1f991703c7b..2ece1147d78 100644
--- a/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.reference
+++ b/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.reference
@@ -1,4 +1,15 @@
1 rmt
-1 rmt1
2 rmt
+1 rmt1
2 rmt1
+0
+1 rmt
+2 rmt
+1 rmt1
+2 rmt1
+1 rmt2
+1 rmt2
+3 rmt2
+5 rmt2
+7 rmt2
+9 rmt2
diff --git a/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql b/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql
index fbd90d8ab0f..52e8be236c8 100644
--- a/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql
+++ b/tests/queries/0_stateless/02486_truncate_and_unexpected_parts.sql
@@ -24,4 +24,29 @@ insert into rmt1 values (2);
system sync replica rmt;
system sync replica rmt1;
-select *, _table from merge(currentDatabase(), '') order by (*,), _table;
+select *, _table from merge(currentDatabase(), '') order by _table, (*,);
+select 0;
+
+create table rmt2 (n int) engine=ReplicatedMergeTree('/test/02468/{database}2', '1') order by tuple() partition by n % 2 settings replicated_max_ratio_of_wrong_parts=0, max_suspicious_broken_parts=0, max_suspicious_broken_parts_bytes=0;
+
+system stop cleanup rmt;
+system stop merges rmt1;
+insert into rmt select * from numbers(10) settings max_block_size=1;
+system sync replica rmt1 lightweight;
+
+alter table rmt replace partition id '0' from rmt2;
+alter table rmt1 move partition id '1' to table rmt2;
+
+detach table rmt sync;
+detach table rmt1 sync;
+
+attach table rmt;
+attach table rmt1;
+
+insert into rmt values (1);
+insert into rmt1 values (2);
+system sync replica rmt;
+system sync replica rmt1;
+system sync replica rmt2;
+
+select *, _table from merge(currentDatabase(), '') order by _table, (*,);
diff --git a/tests/queries/0_stateless/02668_parse_datetime.reference b/tests/queries/0_stateless/02668_parse_datetime.reference
index f6c53ce1887..d21a51ce70c 100644
--- a/tests/queries/0_stateless/02668_parse_datetime.reference
+++ b/tests/queries/0_stateless/02668_parse_datetime.reference
@@ -243,3 +243,30 @@ select parseDateTime('12 AM'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH
select parseDateTime('12 AM', '%h %p', 'UTC', 'a fourth argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
-- Fuzzer crash bug #53715
select parseDateTime('', '', toString(number)) from numbers(13); -- { serverError ILLEGAL_COLUMN }
+-- %h
+select parseDateTime('Aug 13, 2022, 7:58:32 PM', '%b %e, %G, %h:%i:%s %p', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME }
+select parseDateTime('Aug 13, 2022, 07:58:32 PM', '%b %e, %G, %h:%i:%s %p', 'UTC');
+2022-08-13 19:58:32
+-- %l accepts single or double digits inputs
+select parseDateTime('Aug 13, 2022, 7:58:32 PM', '%b %e, %G, %l:%i:%s %p', 'UTC');
+2022-08-13 19:58:32
+select parseDateTime('Aug 13, 2022, 07:58:32 PM', '%b %e, %G, %l:%i:%s %p', 'UTC');
+2022-08-13 19:58:32
+-- %H
+select parseDateTime('Aug 13, 2022, 7:58:32', '%b %e, %G, %H:%i:%s', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME }
+select parseDateTime('Aug 13, 2022, 07:58:32', '%b %e, %G, %H:%i:%s', 'UTC');
+2022-08-13 07:58:32
+-- %k accepts single or double digits inputs
+select parseDateTime('Aug 13, 2022, 7:58:32', '%b %e, %G, %k:%i:%s', 'UTC');
+2022-08-13 07:58:32
+select parseDateTime('Aug 13, 2022, 07:58:32', '%b %e, %G, %k:%i:%s', 'UTC');
+2022-08-13 07:58:32
+-- %m
+select parseDateTime('8 13, 2022, 7:58:32', '%m %e, %G, %k:%i:%s', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME }
+select parseDateTime('08 13, 2022, 07:58:32', '%m %e, %G, %k:%i:%s', 'UTC');
+2022-08-13 07:58:32
+-- %c accepts single or double digits inputs
+select parseDateTime('8 13, 2022, 7:58:32', '%c %e, %G, %k:%i:%s', 'UTC');
+2022-08-13 07:58:32
+select parseDateTime('08 13, 2022, 07:58:32', '%c %e, %G, %k:%i:%s', 'UTC');
+2022-08-13 07:58:32
diff --git a/tests/queries/0_stateless/02668_parse_datetime.sql b/tests/queries/0_stateless/02668_parse_datetime.sql
index d8f2a94e188..02ac0c5f35c 100644
--- a/tests/queries/0_stateless/02668_parse_datetime.sql
+++ b/tests/queries/0_stateless/02668_parse_datetime.sql
@@ -168,4 +168,23 @@ select parseDateTime('12 AM', '%h %p', 'UTC', 'a fourth argument'); -- { serverE
-- Fuzzer crash bug #53715
select parseDateTime('', '', toString(number)) from numbers(13); -- { serverError ILLEGAL_COLUMN }
+-- %h
+select parseDateTime('Aug 13, 2022, 7:58:32 PM', '%b %e, %G, %h:%i:%s %p', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME }
+select parseDateTime('Aug 13, 2022, 07:58:32 PM', '%b %e, %G, %h:%i:%s %p', 'UTC');
+-- %l accepts single or double digits inputs
+select parseDateTime('Aug 13, 2022, 7:58:32 PM', '%b %e, %G, %l:%i:%s %p', 'UTC');
+select parseDateTime('Aug 13, 2022, 07:58:32 PM', '%b %e, %G, %l:%i:%s %p', 'UTC');
+-- %H
+select parseDateTime('Aug 13, 2022, 7:58:32', '%b %e, %G, %H:%i:%s', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME }
+select parseDateTime('Aug 13, 2022, 07:58:32', '%b %e, %G, %H:%i:%s', 'UTC');
+-- %k accepts single or double digits inputs
+select parseDateTime('Aug 13, 2022, 7:58:32', '%b %e, %G, %k:%i:%s', 'UTC');
+select parseDateTime('Aug 13, 2022, 07:58:32', '%b %e, %G, %k:%i:%s', 'UTC');
+-- %m
+select parseDateTime('8 13, 2022, 7:58:32', '%m %e, %G, %k:%i:%s', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME }
+select parseDateTime('08 13, 2022, 07:58:32', '%m %e, %G, %k:%i:%s', 'UTC');
+-- %c accepts single or double digits inputs
+select parseDateTime('8 13, 2022, 7:58:32', '%c %e, %G, %k:%i:%s', 'UTC');
+select parseDateTime('08 13, 2022, 07:58:32', '%c %e, %G, %k:%i:%s', 'UTC');
+
-- { echoOff }
diff --git a/tests/queries/0_stateless/02763_row_policy_storage_merge.reference b/tests/queries/0_stateless/02763_row_policy_storage_merge.reference
new file mode 100644
index 00000000000..9fa5612e7cd
--- /dev/null
+++ b/tests/queries/0_stateless/02763_row_policy_storage_merge.reference
@@ -0,0 +1,314 @@
+SELECT * FROM 02763_merge_log_1 ORDER BY x
+1 11
+2 12
+3 13
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge) ORDER BY x
+1 11
+1 11
+1 11
+1 11
+2 12
+2 12
+2 12
+2 12
+3 13
+3 13
+3 13
+3 13
+4 14
+4 14
+4 14
+4 14
+SETTINGS optimize_move_to_prewhere= 0
+SELECT * FROM 02763_merge_log_1
+3 13
+SELECT * FROM merge(currentDatabase(), 02763_merge_log_1)
+3 13
+SELECT * FROM merge(currentDatabase(), 02763_merge_log)
+1 11
+2 12
+3 13
+3 13
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>2
+3 13
+3 13
+4 14
+SELECT * FROM 02763_merge_merge_1
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge_merge_1)
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge_merge)
+1 11
+2 12
+3 13
+4 14
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>2
+3 13
+4 14
+4 14
+SELECT * FROM engine_merge_12 WHERE x>2
+3 13
+4 14
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge)
+1 11
+1 11
+2 12
+2 12
+3 13
+3 13
+3 13
+4 14
+4 14
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge) WHERE x>2
+3 13
+3 13
+3 13
+4 14
+4 14
+4 14
+aaa 6 39
+aaa 6 39
+aaa 6 39
+aaa 8 42
+aaa 8 42
+aaa 8 42
+3
+3
+3
+4
+4
+4
+SELECT * FROM merge(...) LEFT JOIN merge(...)
+3 13 13
+3 13 13
+4 14 14
+4 14 14
+SELECT * FROM merge(...) UNION ALL SELECT * FROM merge(...)
+1 11
+1 11
+2 12
+2 12
+3 13
+3 13
+3 13
+4 14
+4 14
+4 14
+SELECT x, SUM(x) FROM (SELECT * FROM merge(...) UNION ALL ...) GROUP BY x
+1 22
+2 24
+3 39
+4 42
+1 11 0
+2 12 0
+3 13 0
+4 14 1
+4 14 1
+SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>1 -- with y>12
+2 12
+3 13
+3 13
+4 14
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>1 -- with y>12
+2 12
+3 13
+3 13
+4 14
+4 14
+2 12 0
+3 13 1
+3 13 1
+4 14 1
+4 14 1
+SELECT y from merge(currentDatabase(), 02763_merge)
+11
+11
+12
+12
+13
+13
+13
+13
+14
+14
+14
+14
+02763_merge_fancycols
+SELECT *
+SELECT x, lc
+SELECT *
+1 11 111 111 42
+1 11 111 111 42
+SELECT x, lc
+1 111
+1 111
+SELECT x, lc, cnst
+1 111 42
+1 111 42
+SELECT x, y from merge(currentDatabase(), 02763_merge
+1 11
+1 11
+1 11
+1 11
+2 12
+2 12
+3 13
+3 13
+3 13
+3 13
+4 14
+4 14
+4 14
+4 14
+SETTINGS optimize_move_to_prewhere= 1
+SELECT * FROM 02763_merge_log_1
+3 13
+SELECT * FROM merge(currentDatabase(), 02763_merge_log_1)
+3 13
+SELECT * FROM merge(currentDatabase(), 02763_merge_log)
+1 11
+2 12
+3 13
+3 13
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>2
+3 13
+3 13
+4 14
+SELECT * FROM 02763_merge_merge_1
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge_merge_1)
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge_merge)
+1 11
+2 12
+3 13
+4 14
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>2
+3 13
+4 14
+4 14
+SELECT * FROM engine_merge_12 WHERE x>2
+3 13
+4 14
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge)
+1 11
+1 11
+2 12
+2 12
+3 13
+3 13
+3 13
+4 14
+4 14
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge) WHERE x>2
+3 13
+3 13
+3 13
+4 14
+4 14
+4 14
+aaa 6 39
+aaa 6 39
+aaa 6 39
+aaa 8 42
+aaa 8 42
+aaa 8 42
+3
+3
+3
+4
+4
+4
+SELECT * FROM merge(...) LEFT JOIN merge(...)
+3 13 13
+3 13 13
+4 14 14
+4 14 14
+SELECT * FROM merge(...) UNION ALL SELECT * FROM merge(...)
+1 11
+1 11
+2 12
+2 12
+3 13
+3 13
+3 13
+4 14
+4 14
+4 14
+SELECT x, SUM(x) FROM (SELECT * FROM merge(...) UNION ALL ...) GROUP BY x
+1 22
+2 24
+3 39
+4 42
+1 11 0
+2 12 0
+3 13 0
+4 14 1
+4 14 1
+SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>1 -- with y>12
+2 12
+3 13
+3 13
+4 14
+4 14
+SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>1 -- with y>12
+2 12
+3 13
+3 13
+4 14
+4 14
+2 12 0
+3 13 1
+3 13 1
+4 14 1
+4 14 1
+SELECT y from merge(currentDatabase(), 02763_merge)
+11
+11
+12
+12
+13
+13
+13
+13
+14
+14
+14
+14
+02763_merge_fancycols
+SELECT *
+SELECT x, lc
+SELECT *
+1 11 111 111 42
+1 11 111 111 42
+SELECT x, lc
+1 111
+1 111
+SELECT x, lc, cnst
+1 111 42
+1 111 42
+SELECT x, y from merge(currentDatabase(), 02763_merge
+1 11
+1 11
+1 11
+1 11
+2 12
+2 12
+3 13
+3 13
+3 13
+3 13
+4 14
+4 14
+4 14
+4 14
diff --git a/tests/queries/0_stateless/02763_row_policy_storage_merge.sql.j2 b/tests/queries/0_stateless/02763_row_policy_storage_merge.sql.j2
new file mode 100644
index 00000000000..0263e1a974f
--- /dev/null
+++ b/tests/queries/0_stateless/02763_row_policy_storage_merge.sql.j2
@@ -0,0 +1,143 @@
+DROP TABLE IF EXISTS 02763_merge_log_1;
+DROP TABLE IF EXISTS 02763_merge_log_2;
+DROP TABLE IF EXISTS 02763_merge_merge_1;
+DROP TABLE IF EXISTS 02763_merge_merge_2;
+DROP TABLE IF EXISTS 02763_merge_fancycols;
+DROP ROW POLICY IF EXISTS 02763_filter_1 ON 02763_merge_log_1;
+DROP ROW POLICY IF EXISTS 02763_filter_2 ON 02763_merge_merge_1;
+DROP ROW POLICY IF EXISTS 02763_filter_3 ON 02763_merge_log_1;
+DROP ROW POLICY IF EXISTS 02763_filter_4 ON 02763_merge_merge_1;
+DROP ROW POLICY IF EXISTS 02763_filter_5 ON 02763_merge_fancycols;
+DROP ROW POLICY IF EXISTS 02763_filter_6 ON 02763_merge_fancycols;
+
+
+CREATE TABLE 02763_merge_log_1 (x UInt8, y UInt64) ENGINE = Log;
+CREATE TABLE 02763_merge_log_2 (x UInt8, y UInt64) ENGINE = Log;
+
+CREATE TABLE 02763_merge_merge_1 (x UInt8, y UInt64) ENGINE = MergeTree ORDER BY x;
+CREATE TABLE 02763_merge_merge_2 (x UInt8, y UInt64) ENGINE = MergeTree ORDER BY x;
+
+CREATE TABLE 02763_engine_merge_12 (x UInt8, y UInt64) ENGINE = Merge(currentDatabase(), '02763_merge_merge');
+
+INSERT INTO 02763_merge_log_1 VALUES (1, 11), (2, 12), (3, 13), (4, 14);
+INSERT INTO 02763_merge_log_2 VALUES (1, 11), (2, 12), (3, 13), (4, 14);
+INSERT INTO 02763_merge_merge_1 VALUES (1, 11), (2, 12), (3, 13), (4, 14);
+INSERT INTO 02763_merge_merge_2 VALUES (1, 11), (2, 12), (3, 13), (4, 14);
+
+SELECT 'SELECT * FROM 02763_merge_log_1 ORDER BY x';
+SELECT * FROM 02763_merge_log_1 ORDER BY x;
+
+SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge) ORDER BY x';
+SELECT * FROM merge(currentDatabase(), '02763_merge') ORDER BY x;
+
+
+{% for prew in [0 , 1] -%}
+
+SELECT 'SETTINGS optimize_move_to_prewhere= {{prew}}';
+
+CREATE ROW POLICY 02763_filter_1 ON 02763_merge_log_1 USING x=3 AS permissive TO ALL;
+
+SELECT 'SELECT * FROM 02763_merge_log_1';
+SELECT * FROM 02763_merge_log_1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_log_1)';
+SELECT * FROM merge(currentDatabase(), '02763_merge_log_1') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_log)';
+SELECT * FROM merge(currentDatabase(), '02763_merge_log') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>2';
+SELECT * FROM merge(currentDatabase(), '02763_merge_log') WHERE x>2 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+CREATE ROW POLICY 02763_filter_2 ON 02763_merge_merge_1 USING x=4 AS permissive TO ALL;
+
+SELECT 'SELECT * FROM 02763_merge_merge_1';
+SELECT * FROM 02763_merge_merge_1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_merge_1)';
+SELECT * FROM merge(currentDatabase(), '02763_merge_merge_1') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_merge)';
+SELECT * FROM merge(currentDatabase(), '02763_merge_merge') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>2';
+SELECT * FROM merge(currentDatabase(), '02763_merge_merge') WHERE x>2 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+
+SELECT 'SELECT * FROM engine_merge_12 WHERE x>2';
+SELECT * FROM 02763_engine_merge_12 WHERE x>2 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+
+SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge)';
+SELECT * FROM merge(currentDatabase(), '02763_merge') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge) WHERE x>2';
+SELECT * FROM merge(currentDatabase(), '02763_merge') WHERE x>2 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+SELECT 'aaa', x*2 as x_2, y*3 as y_3 FROM merge(currentDatabase(), '02763_merge') WHERE x>2 ORDER BY x_2 SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT x FROM (SELECT * FROM merge(currentDatabase(), '02763_merge') WHERE x IN (3,4)) ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+SELECT 'SELECT * FROM merge(...) LEFT JOIN merge(...)';
+SELECT * FROM merge(currentDatabase(), '02763_merge.*1') as a
+LEFT JOIN
+merge(currentDatabase(), '02763_merge.*2') as b
+USING (x)
+ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+SELECT 'SELECT * FROM merge(...) UNION ALL SELECT * FROM merge(...)';
+SELECT * FROM
+(
+SELECT * FROM merge(currentDatabase(), '02763_merge.*1')
+UNION ALL
+SELECT * FROM merge(currentDatabase(), '02763_merge.*2')
+)
+ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+SELECT 'SELECT x, SUM(x) FROM (SELECT * FROM merge(...) UNION ALL ...) GROUP BY x';
+SELECT x, SUM(y) FROM
+(SELECT * FROM merge(currentDatabase(), '02763_merge.*1')
+UNION ALL
+SELECT * FROM merge(currentDatabase(), '02763_merge.*2'))
+GROUP BY x
+ORDER BY x;
+
+SELECT *, x=4 FROM merge(currentDatabase(), '02763_merge_merge') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+CREATE ROW POLICY 02763_filter_3 ON 02763_merge_log_1 USING y>12 AS permissive TO ALL;
+SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_log) WHERE x>1 -- with y>12';
+SELECT * FROM merge(currentDatabase(), '02763_merge_log') WHERE x>1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+CREATE ROW POLICY 02763_filter_4 ON 02763_merge_merge_1 USING y>12 AS permissive TO ALL;
+SELECT 'SELECT * FROM merge(currentDatabase(), 02763_merge_merge) WHERE x>1 -- with y>12';
+SELECT * FROM merge(currentDatabase(), '02763_merge_merge') WHERE x>1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+SELECT *, (x=4 OR y>12) FROM merge(currentDatabase(), '02763_merge_merge') WHERE x>1 ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+SELECT 'SELECT y from merge(currentDatabase(), 02763_merge)';
+SELECT y from merge(currentDatabase(), '02763_merge') ORDER BY y SETTINGS optimize_move_to_prewhere= {{prew}};
+
+SELECT '02763_merge_fancycols';
+CREATE TABLE 02763_merge_fancycols (x UInt8, y Nullable(UInt64), z String DEFAULT CONCAT(toString(x), toString(y)), lc LowCardinality(String) DEFAULT z, cnst UInt32 MATERIALIZED 42) ENGINE = MergeTree() ORDER BY tuple();
+INSERT INTO 02763_merge_fancycols (x, y) SELECT x, y from merge(currentDatabase(), '02763_merge');
+
+CREATE ROW POLICY 02763_filter_5 ON 02763_merge_fancycols USING cnst<>42 AS permissive TO ALL;
+SELECT 'SELECT *';
+SELECT * from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'SELECT x, lc';
+SELECT x, lc from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+CREATE ROW POLICY 02763_filter_6 ON 02763_merge_fancycols USING lc='111' AS permissive TO ALL;
+SELECT 'SELECT *';
+SELECT * from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'SELECT x, lc';
+SELECT x, lc from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'SELECT x, lc, cnst';
+SELECT x, lc, cnst from merge(currentDatabase(), '02763_merge_fancycols') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'SELECT x, y from merge(currentDatabase(), 02763_merge';
+SELECT x, y from merge(currentDatabase(), '02763_merge') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+DROP TABLE 02763_merge_fancycols;
+
+DROP ROW POLICY 02763_filter_1 ON 02763_merge_log_1;
+DROP ROW POLICY 02763_filter_2 ON 02763_merge_merge_1;
+
+DROP ROW POLICY 02763_filter_3 ON 02763_merge_log_1;
+DROP ROW POLICY 02763_filter_4 ON 02763_merge_merge_1;
+
+DROP ROW POLICY 02763_filter_5 ON 02763_merge_fancycols;
+DROP ROW POLICY 02763_filter_6 ON 02763_merge_fancycols;
+
+{% endfor %}
diff --git a/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.reference b/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.reference
new file mode 100644
index 00000000000..56bfdbe0b18
--- /dev/null
+++ b/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.reference
@@ -0,0 +1,49 @@
+02763_merge_aliases
+x, y, z FROM 02763_a_merge
+3 13 16
+4 14 18
+* FROM 02763_a_merge
+3 13 16
+4 14 18
+x, y FROM 02763_a_merge
+3 13
+4 14
+SELECT x, y FROM merge(currentDatabase(), 02763_alias)
+3 13
+4 14
+SELECT x, y FROM merge(currentDatabase(), 02763_alias)
+2 12
+3 13
+4 14
+SELECT x FROM merge(currentDatabase(), 02763_alias)
+12
+13
+14
+SELECT y FROM merge(currentDatabase(), 02763_alias)
+2
+3
+4
+x, y, z FROM 02763_a_merge
+3 13 16
+4 14 18
+* FROM 02763_a_merge
+3 13 16
+4 14 18
+x, y FROM 02763_a_merge
+3 13
+4 14
+SELECT x, y FROM merge(currentDatabase(), 02763_alias)
+3 13
+4 14
+SELECT x, y FROM merge(currentDatabase(), 02763_alias)
+2 12
+3 13
+4 14
+SELECT x FROM merge(currentDatabase(), 02763_alias)
+12
+13
+14
+SELECT y FROM merge(currentDatabase(), 02763_alias)
+2
+3
+4
diff --git a/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.sql.j2 b/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.sql.j2
new file mode 100644
index 00000000000..bdd456951dd
--- /dev/null
+++ b/tests/queries/0_stateless/02763_row_policy_storage_merge_alias.sql.j2
@@ -0,0 +1,41 @@
+DROP TABLE IF EXISTS 02763_alias;
+DROP TABLE IF EXISTS 02763_a_merge;
+
+
+SELECT '02763_merge_aliases';
+CREATE TABLE 02763_alias (x UInt8, y UInt64, z UInt64 ALIAS plus(x,y)) ENGINE = MergeTree ORDER BY x;
+INSERT INTO 02763_alias VALUES (1, 11), (2, 12), (3, 13), (4, 14);
+
+CREATE ROW POLICY 02763_filter_7 ON 02763_alias USING z>15 AS permissive TO ALL;
+
+CREATE TABLE 02763_a_merge (x UInt8, y UInt64, z UInt64) ENGINE = Merge(currentDatabase(), '02763_alias');
+
+{% for prew in [0 , 1] -%}
+
+
+
+SELECT 'x, y, z FROM 02763_a_merge';
+SELECT x, y, z FROM 02763_a_merge ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT '* FROM 02763_a_merge';
+SELECT * FROM 02763_a_merge ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'x, y FROM 02763_a_merge';
+SELECT x, y FROM 02763_a_merge ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'SELECT x, y FROM merge(currentDatabase(), 02763_alias)';
+SELECT x, y FROM merge(currentDatabase(), '02763_alias') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+CREATE ROW POLICY 02763_filter_8 ON 02763_alias USING y>11 AS permissive TO ALL;
+
+SELECT 'SELECT x, y FROM merge(currentDatabase(), 02763_alias)';
+SELECT x, y FROM merge(currentDatabase(), '02763_alias') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'SELECT x FROM merge(currentDatabase(), 02763_alias)';
+SELECT y FROM merge(currentDatabase(), '02763_alias') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+SELECT 'SELECT y FROM merge(currentDatabase(), 02763_alias)';
+SELECT x FROM merge(currentDatabase(), '02763_alias') ORDER BY x SETTINGS optimize_move_to_prewhere= {{prew}};
+
+DROP ROW POLICY 02763_filter_8 ON 02763_alias;
+{% endfor %}
+
+DROP TABLE 02763_alias;
+DROP TABLE 02763_a_merge;
+
+DROP ROW POLICY 02763_filter_7 ON 02763_alias;
diff --git a/tests/queries/0_stateless/02813_analyzer_push_any_to_functions.reference b/tests/queries/0_stateless/02813_analyzer_push_any_to_functions.reference
deleted file mode 100644
index 025c04af1da..00000000000
--- a/tests/queries/0_stateless/02813_analyzer_push_any_to_functions.reference
+++ /dev/null
@@ -1,124 +0,0 @@
--- { echoOn }
-SET optimize_move_functions_out_of_any = 1;
-EXPLAIN QUERY TREE SELECT any(number + number * 2) FROM numbers(1, 2);
-QUERY id: 0
- PROJECTION COLUMNS
- any(plus(number, multiply(number, 2))) UInt64
- PROJECTION
- LIST id: 1, nodes: 1
- FUNCTION id: 2, function_name: plus, function_type: ordinary, result_type: UInt64
- ARGUMENTS
- LIST id: 3, nodes: 2
- FUNCTION id: 4, function_name: any, function_type: aggregate, result_type: UInt64
- ARGUMENTS
- LIST id: 5, nodes: 1
- COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7
- FUNCTION id: 8, function_name: multiply, function_type: ordinary, result_type: UInt64
- ARGUMENTS
- LIST id: 9, nodes: 2
- FUNCTION id: 10, function_name: any, function_type: aggregate, result_type: UInt64
- ARGUMENTS
- LIST id: 11, nodes: 1
- COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7
- CONSTANT id: 12, constant_value: UInt64_2, constant_value_type: UInt8
- JOIN TREE
- TABLE_FUNCTION id: 7, table_function_name: numbers
- ARGUMENTS
- LIST id: 13, nodes: 2
- CONSTANT id: 14, constant_value: UInt64_1, constant_value_type: UInt8
- CONSTANT id: 15, constant_value: UInt64_2, constant_value_type: UInt8
-SELECT any(number + number * 2) FROM numbers(1, 2);
-3
-EXPLAIN QUERY TREE SELECT anyLast(number + number * 2) FROM numbers(1, 2);
-QUERY id: 0
- PROJECTION COLUMNS
- anyLast(plus(number, multiply(number, 2))) UInt64
- PROJECTION
- LIST id: 1, nodes: 1
- FUNCTION id: 2, function_name: plus, function_type: ordinary, result_type: UInt64
- ARGUMENTS
- LIST id: 3, nodes: 2
- FUNCTION id: 4, function_name: anyLast, function_type: aggregate, result_type: UInt64
- ARGUMENTS
- LIST id: 5, nodes: 1
- COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7
- FUNCTION id: 8, function_name: multiply, function_type: ordinary, result_type: UInt64
- ARGUMENTS
- LIST id: 9, nodes: 2
- FUNCTION id: 10, function_name: anyLast, function_type: aggregate, result_type: UInt64
- ARGUMENTS
- LIST id: 11, nodes: 1
- COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7
- CONSTANT id: 12, constant_value: UInt64_2, constant_value_type: UInt8
- JOIN TREE
- TABLE_FUNCTION id: 7, table_function_name: numbers
- ARGUMENTS
- LIST id: 13, nodes: 2
- CONSTANT id: 14, constant_value: UInt64_1, constant_value_type: UInt8
- CONSTANT id: 15, constant_value: UInt64_2, constant_value_type: UInt8
-SELECT anyLast(number + number * 2) FROM numbers(1, 2);
-6
-EXPLAIN QUERY TREE WITH any(number * 3) AS x SELECT x FROM numbers(1, 2);
-QUERY id: 0
- PROJECTION COLUMNS
- x UInt64
- PROJECTION
- LIST id: 1, nodes: 1
- FUNCTION id: 2, function_name: multiply, function_type: ordinary, result_type: UInt64
- ARGUMENTS
- LIST id: 3, nodes: 2
- FUNCTION id: 4, function_name: any, function_type: aggregate, result_type: UInt64
- ARGUMENTS
- LIST id: 5, nodes: 1
- COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7
- CONSTANT id: 8, constant_value: UInt64_3, constant_value_type: UInt8
- JOIN TREE
- TABLE_FUNCTION id: 7, table_function_name: numbers
- ARGUMENTS
- LIST id: 9, nodes: 2
- CONSTANT id: 10, constant_value: UInt64_1, constant_value_type: UInt8
- CONSTANT id: 11, constant_value: UInt64_2, constant_value_type: UInt8
-WITH any(number * 3) AS x SELECT x FROM numbers(1, 2);
-3
-EXPLAIN QUERY TREE SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2);
-QUERY id: 0
- PROJECTION COLUMNS
- x UInt64
- x UInt64
- PROJECTION
- LIST id: 1, nodes: 2
- FUNCTION id: 2, function_name: multiply, function_type: ordinary, result_type: UInt64
- ARGUMENTS
- LIST id: 3, nodes: 2
- FUNCTION id: 4, function_name: anyLast, function_type: aggregate, result_type: UInt64
- ARGUMENTS
- LIST id: 5, nodes: 1
- COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7
- CONSTANT id: 8, constant_value: UInt64_3, constant_value_type: UInt8
- FUNCTION id: 2, function_name: multiply, function_type: ordinary, result_type: UInt64
- ARGUMENTS
- LIST id: 3, nodes: 2
- FUNCTION id: 4, function_name: anyLast, function_type: aggregate, result_type: UInt64
- ARGUMENTS
- LIST id: 5, nodes: 1
- COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7
- CONSTANT id: 8, constant_value: UInt64_3, constant_value_type: UInt8
- JOIN TREE
- TABLE_FUNCTION id: 7, table_function_name: numbers
- ARGUMENTS
- LIST id: 9, nodes: 2
- CONSTANT id: 10, constant_value: UInt64_1, constant_value_type: UInt8
- CONSTANT id: 11, constant_value: UInt64_2, constant_value_type: UInt8
-SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2);
-6 6
-SELECT any(anyLast(number)) FROM numbers(1); -- { serverError 184 }
-SET optimize_move_functions_out_of_any = 0;
-SELECT any(number + number * 2) FROM numbers(1, 2);
-3
-SELECT anyLast(number + number * 2) FROM numbers(1, 2);
-6
-WITH any(number * 3) AS x SELECT x FROM numbers(1, 2);
-3
-SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2);
-6 6
-SELECT any(anyLast(number)) FROM numbers(1); -- { serverError 184 }
diff --git a/tests/queries/0_stateless/02813_analyzer_push_any_to_functions.sql b/tests/queries/0_stateless/02813_analyzer_push_any_to_functions.sql
deleted file mode 100644
index c9707d10fde..00000000000
--- a/tests/queries/0_stateless/02813_analyzer_push_any_to_functions.sql
+++ /dev/null
@@ -1,33 +0,0 @@
-SET allow_experimental_analyzer = 1;
-
--- { echoOn }
-SET optimize_move_functions_out_of_any = 1;
-
-EXPLAIN QUERY TREE SELECT any(number + number * 2) FROM numbers(1, 2);
-SELECT any(number + number * 2) FROM numbers(1, 2);
-
-EXPLAIN QUERY TREE SELECT anyLast(number + number * 2) FROM numbers(1, 2);
-SELECT anyLast(number + number * 2) FROM numbers(1, 2);
-
-EXPLAIN QUERY TREE WITH any(number * 3) AS x SELECT x FROM numbers(1, 2);
-WITH any(number * 3) AS x SELECT x FROM numbers(1, 2);
-
-EXPLAIN QUERY TREE SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2);
-SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2);
-
-SELECT any(anyLast(number)) FROM numbers(1); -- { serverError 184 }
-
-
-
-SET optimize_move_functions_out_of_any = 0;
-
-SELECT any(number + number * 2) FROM numbers(1, 2);
-
-SELECT anyLast(number + number * 2) FROM numbers(1, 2);
-
-WITH any(number * 3) AS x SELECT x FROM numbers(1, 2);
-
-SELECT anyLast(number * 3) AS x, x FROM numbers(1, 2);
-
-SELECT any(anyLast(number)) FROM numbers(1); -- { serverError 184 }
--- { echoOff }
diff --git a/tests/queries/0_stateless/02888_obsolete_settings.reference b/tests/queries/0_stateless/02888_obsolete_settings.reference
index 63553092c0c..378a5c7c389 100644
--- a/tests/queries/0_stateless/02888_obsolete_settings.reference
+++ b/tests/queries/0_stateless/02888_obsolete_settings.reference
@@ -40,6 +40,7 @@ multiple_joins_rewriter_version
odbc_max_field_size
optimize_duplicate_order_by_and_distinct
optimize_fuse_sum_count_avg
+optimize_move_functions_out_of_any
parallel_replicas_min_number_of_granules_to_enable
partial_merge_join_optimizations
query_cache_store_results_of_queries_with_nondeterministic_functions
diff --git a/tests/queries/0_stateless/02892_rocksdb_trivial_count.reference b/tests/queries/0_stateless/02892_rocksdb_trivial_count.reference
index 9289ddcee34..4598404dd40 100644
--- a/tests/queries/0_stateless/02892_rocksdb_trivial_count.reference
+++ b/tests/queries/0_stateless/02892_rocksdb_trivial_count.reference
@@ -1 +1,10 @@
+-- { echoOn }
+SELECT count() FROM dict SETTINGS optimize_trivial_approximate_count_query = 0, max_rows_to_read = 1; -- { serverError TOO_MANY_ROWS }
+SELECT count() FROM dict SETTINGS optimize_trivial_approximate_count_query = 1, max_rows_to_read = 1;
121
+SET optimize_trivial_approximate_count_query = 1;
+-- needs more data to see total_bytes or just detach and attach the table
+DETACH TABLE dict SYNC;
+ATTACH TABLE dict;
+SELECT total_rows, total_bytes > 0 FROM system.tables WHERE database = currentDatabase() AND name = 'dict' FORMAT CSV;
+121,1
diff --git a/tests/queries/0_stateless/02892_rocksdb_trivial_count.sql b/tests/queries/0_stateless/02892_rocksdb_trivial_count.sql
index 0cdf2d1b2b2..a770b153760 100644
--- a/tests/queries/0_stateless/02892_rocksdb_trivial_count.sql
+++ b/tests/queries/0_stateless/02892_rocksdb_trivial_count.sql
@@ -2,5 +2,11 @@
CREATE TABLE dict (key UInt64, value String) ENGINE = EmbeddedRocksDB PRIMARY KEY key;
INSERT INTO dict SELECT number, toString(number) FROM numbers(121);
+-- { echoOn }
SELECT count() FROM dict SETTINGS optimize_trivial_approximate_count_query = 0, max_rows_to_read = 1; -- { serverError TOO_MANY_ROWS }
SELECT count() FROM dict SETTINGS optimize_trivial_approximate_count_query = 1, max_rows_to_read = 1;
+SET optimize_trivial_approximate_count_query = 1;
+-- needs more data to see total_bytes or just detach and attach the table
+DETACH TABLE dict SYNC;
+ATTACH TABLE dict;
+SELECT total_rows, total_bytes > 0 FROM system.tables WHERE database = currentDatabase() AND name = 'dict' FORMAT CSV;
diff --git a/tests/queries/0_stateless/02899_distributed_limit_by.reference b/tests/queries/0_stateless/02899_distributed_limit_by.reference
new file mode 100644
index 00000000000..c20ecbcc4e4
--- /dev/null
+++ b/tests/queries/0_stateless/02899_distributed_limit_by.reference
@@ -0,0 +1,52 @@
+Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=0,distributed_push_down_limit=1
+0
+0
+Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=0,distributed_push_down_limit=0
+0
+0
+Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=1,distributed_push_down_limit=1
+0
+0
+0
+0
+Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=1,distributed_push_down_limit=0
+0
+0
+0
+0
+Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=2,distributed_push_down_limit=1
+0
+0
+0
+0
+Used settings: prefer_localhost_replica=0,distributed_group_by_no_merge=2,distributed_push_down_limit=0
+0
+0
+0
+0
+Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=0,distributed_push_down_limit=1
+0
+0
+Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=0,distributed_push_down_limit=0
+0
+0
+Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=1,distributed_push_down_limit=1
+0
+0
+0
+0
+Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=1,distributed_push_down_limit=0
+0
+0
+0
+0
+Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=2,distributed_push_down_limit=1
+0
+0
+0
+0
+Used settings: prefer_localhost_replica=1,distributed_group_by_no_merge=2,distributed_push_down_limit=0
+0
+0
+0
+0
diff --git a/tests/queries/0_stateless/02899_distributed_limit_by.sql.j2 b/tests/queries/0_stateless/02899_distributed_limit_by.sql.j2
new file mode 100644
index 00000000000..4f885ef2b6c
--- /dev/null
+++ b/tests/queries/0_stateless/02899_distributed_limit_by.sql.j2
@@ -0,0 +1,26 @@
+{#
+Randomize settings:
+- prefer_localhost_replica
+- distributed_group_by_no_merge (0 = WithMergeableState, 1 = Complete, 2 = WithMergeableStateAfterAggregation/WithMergeableStateAfterAggregationAndLimit)
+- distributed_push_down_limit (0/1 = dis/allows WithMergeableStateAfterAggregationAndLimit
+#}
+{% for settings in product(
+ [
+ 'prefer_localhost_replica=0',
+ 'prefer_localhost_replica=1',
+ ],
+ [
+ 'distributed_group_by_no_merge=0',
+ 'distributed_group_by_no_merge=1',
+ 'distributed_group_by_no_merge=2',
+ ],
+ [
+ 'distributed_push_down_limit=1',
+ 'distributed_push_down_limit=0',
+ ],
+) %}
+{% set settings = settings | join(',') %}
+select 'Used settings: {{ settings }}';
+select dummy from remote('127.{1,1}', system.one) where dummy + dummy >= 0 limit 1 by dummy + dummy + 0 as l settings {{ settings }};
+select dummy from (select dummy + dummy + 0 as l, dummy from remote('127.{1,1}', system.one) where dummy + dummy >= 0 limit 1 by l) settings {{ settings }};
+{% endfor %}
diff --git a/tests/queries/0_stateless/02900_limit_by_query_stage.reference b/tests/queries/0_stateless/02900_limit_by_query_stage.reference
new file mode 100644
index 00000000000..b01fb1ca5b0
--- /dev/null
+++ b/tests/queries/0_stateless/02900_limit_by_query_stage.reference
@@ -0,0 +1,3 @@
+0 0
+0 0
+0 0
diff --git a/tests/queries/0_stateless/02900_limit_by_query_stage.sh b/tests/queries/0_stateless/02900_limit_by_query_stage.sh
new file mode 100755
index 00000000000..d34d0d81bcd
--- /dev/null
+++ b/tests/queries/0_stateless/02900_limit_by_query_stage.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CUR_DIR"/../shell_config.sh
+
+$CLICKHOUSE_CLIENT --stage with_mergeable_state --query 'SELECT dummy FROM system.one WHERE (dummy + dummy) >= 0 LIMIT 1 BY (dummy + dummy) + 0 AS l'
+$CLICKHOUSE_CLIENT --stage with_mergeable_state_after_aggregation --query 'SELECT dummy FROM system.one WHERE (dummy + dummy) >= 0 LIMIT 1 BY (dummy + dummy) + 0 AS l'
+$CLICKHOUSE_CLIENT --stage with_mergeable_state_after_aggregation_and_limit --query 'SELECT dummy FROM system.one WHERE (dummy + dummy) >= 0 LIMIT 1 BY (dummy + dummy) + 0 AS l'
diff --git a/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh b/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh
index 074a3a6725e..095239954f4 100755
--- a/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh
+++ b/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh
@@ -10,7 +10,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# (i.e. "No active replica has part X or covering part")
# does not appears as errors (level=Error), only as info message (level=Information).
-$CLICKHOUSE_CLIENT -nm -q "
+cluster=default
+if [[ $($CLICKHOUSE_CLIENT -q "select count()>0 from system.clusters where cluster = 'test_cluster_database_replicated'") = 1 ]]; then
+ cluster=test_cluster_database_replicated
+fi
+
+$CLICKHOUSE_CLIENT -nm --distributed_ddl_output_mode=none -q "
drop table if exists rmt1;
drop table if exists rmt2;
@@ -21,7 +26,12 @@ $CLICKHOUSE_CLIENT -nm -q "
insert into rmt1 values (2);
system sync replica rmt1;
- system stop pulling replication log rmt2;
+ -- SYSTEM STOP PULLING REPLICATION LOG does not waits for the current pull,
+ -- trigger it explicitly to 'avoid race' (though proper way will be to wait
+ -- for current pull in the StorageReplicatedMergeTree::getActionLock())
+ system sync replica rmt2;
+ -- NOTE: CLICKHOUSE_DATABASE is required
+ system stop pulling replication log on cluster $cluster $CLICKHOUSE_DATABASE.rmt2;
optimize table rmt1 final settings alter_sync=0, optimize_throw_if_noop=1;
" || exit 1
diff --git a/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.reference b/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.sql b/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.sql
new file mode 100644
index 00000000000..84250059c58
--- /dev/null
+++ b/tests/queries/0_stateless/02910_replicated_with_simple_aggregate_column.sql
@@ -0,0 +1,17 @@
+CREATE TABLE t_r1
+(
+ `id` UInt64,
+ `val` SimpleAggregateFunction(max, Nullable(String))
+)
+ENGINE = ReplicatedAggregatingMergeTree('/tables/{database}/t', 'r1')
+ORDER BY id
+SETTINGS index_granularity = 8192;
+
+CREATE TABLE t_r2
+(
+ `id` UInt64,
+ `val` SimpleAggregateFunction(anyLast, Nullable(String))
+)
+ENGINE = ReplicatedAggregatingMergeTree('/tables/{database}/t', 'r2')
+ORDER BY id
+SETTINGS index_granularity = 8192; -- { serverError INCOMPATIBLE_COLUMNS }
diff --git a/tests/queries/0_stateless/02910_rocksdb_optimize.reference b/tests/queries/0_stateless/02910_rocksdb_optimize.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/02910_rocksdb_optimize.sql b/tests/queries/0_stateless/02910_rocksdb_optimize.sql
new file mode 100644
index 00000000000..575ba6db212
--- /dev/null
+++ b/tests/queries/0_stateless/02910_rocksdb_optimize.sql
@@ -0,0 +1,5 @@
+-- Tags: use-rocksdb
+
+CREATE TABLE dict (key UInt64, value String) ENGINE = EmbeddedRocksDB PRIMARY KEY key;
+INSERT INTO dict SELECT number, toString(number) FROM numbers(1e3);
+OPTIMIZE TABLE dict;
diff --git a/tests/queries/0_stateless/02911_getHTTPHeaderFuncion.reference b/tests/queries/0_stateless/02911_getHTTPHeaderFuncion.reference
new file mode 100644
index 00000000000..61effdb19c4
--- /dev/null
+++ b/tests/queries/0_stateless/02911_getHTTPHeaderFuncion.reference
@@ -0,0 +1,13 @@
+value
+value1 value2
+value1 value1 value2
+NOT-FOUND-KEY is not in HTTP request headers
+FORBIDDEN-KEY1 is in get_client_http_header_forbidden_headers
+1 row1_value1 row1_value2 row1_value3 row1_value4 row1_value5 row1_value6 row1_value7
+2 row2_value1 row2_value2 row2_value3 row2_value4 row2_value5 row2_value6 row2_value7
+3
+value_from_query_1 value_from_query_2 value_from_query_3 1 row1_value1 row1_value2 row1_value3 row1_value4 row1_value5 row1_value6 row1_value7
+value_from_query_1 value_from_query_2 value_from_query_3 2 row2_value1 row2_value2 row2_value3 row2_value4 row2_value5 row2_value6 row2_value7
+value_from_query_1 value_from_query_2 value_from_query_3 3
+http_value1
+http_value2
diff --git a/tests/queries/0_stateless/02911_getHTTPHeaderFuncion.sh b/tests/queries/0_stateless/02911_getHTTPHeaderFuncion.sh
new file mode 100755
index 00000000000..505e017ee5d
--- /dev/null
+++ b/tests/queries/0_stateless/02911_getHTTPHeaderFuncion.sh
@@ -0,0 +1,75 @@
+#!/usr/bin/env bash
+
+CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CUR_DIR"/../shell_config.sh
+
+echo "SELECT getClientHTTPHeader('key')" | curl -s -H 'X-ClickHouse-User: default' -H 'X-ClickHouse-Key: ' -H 'key: value' 'http://localhost:8123/' -d @-
+
+echo "SELECT getClientHTTPHeader('key1'), getClientHTTPHeader('key2')" | curl -s -H 'X-Clickhouse-User: default' \
+ -H 'X-ClickHouse-Key: ' -H 'key1: value1' -H 'key2: value2' 'http://localhost:8123/' -d @-
+
+echo "SELECT getClientHTTPHeader('test-' || 'key' || '-1'), getClientHTTPHeader('test-key-1'), getClientHTTPHeader('key2')" | curl -s -H 'X-Clickhouse-User: default' \
+ -H 'X-ClickHouse-Key: ' -H 'test-key-1: value1' -H 'key2: value2' 'http://localhost:8123/' -d @-
+
+#Code: 36. DB::Exception: NOT-FOUND-KEY is not in HTTP request headers
+echo "SELECT getClientHTTPHeader('NOT-FOUND-KEY')"| curl -s -H 'X-Clickhouse-User: default' \
+ -H 'X-ClickHouse-Key: ' -H 'key1: value1' -H 'key2: value2' 'http://localhost:8123/' -d @- | grep -o -e "NOT-FOUND-KEY is not in HTTP request headers"
+
+#Code: 36. DB::Exception: The header FORBIDDEN-KEY is in headers_forbidden_to_return, you can config it in config file.
+echo "SELECT getClientHTTPHeader('FORBIDDEN-KEY1')" | curl -s -H 'X-ClickHouse-User: default' -H 'X-ClickHouse-Key: ' \
+ -H 'FORBIDDEN-KEY1: forbbiden1' 'http://localhost:8123/' -d @- | grep -o -e "FORBIDDEN-KEY1 is in get_client_http_header_forbidden_headers"
+
+db_name=${CLICKHOUSE_DATABASE}
+
+$CLICKHOUSE_CLIENT -q "CREATE DATABASE IF NOT EXISTS ${db_name};"
+
+$CLICKHOUSE_CLIENT -q "CREATE TABLE ${db_name}.02884_get_http_header
+ (id UInt32,
+ http_key1 String DEFAULT getClientHTTPHeader('http_header_key1'),
+ http_key2 String DEFAULT getClientHTTPHeader('http_header_key2'),
+ http_key3 String DEFAULT getClientHTTPHeader('http_header_key3'),
+ http_key4 String DEFAULT getClientHTTPHeader('http_header_key4'),
+ http_key5 String DEFAULT getClientHTTPHeader('http_header_key5'),
+ http_key6 String DEFAULT getClientHTTPHeader('http_header_key6'),
+ http_key7 String DEFAULT getClientHTTPHeader('http_header_key7')
+ )
+ Engine=MergeTree()
+ ORDER BY id"
+
+#Insert data via http request
+echo "INSERT INTO ${db_name}.02884_get_http_header (id) values (1)" | curl -s -H 'X-ClickHouse-User: default' -H 'X-ClickHouse-Key: ' \
+ -H 'http_header_key1: row1_value1'\
+ -H 'http_header_key2: row1_value2'\
+ -H 'http_header_key3: row1_value3'\
+ -H 'http_header_key4: row1_value4'\
+ -H 'http_header_key5: row1_value5'\
+ -H 'http_header_key6: row1_value6'\
+ -H 'http_header_key7: row1_value7' 'http://localhost:8123/' -d @-
+
+echo "INSERT INTO ${db_name}.02884_get_http_header (id) values (2)" | curl -s -H 'X-ClickHouse-User: default' -H 'X-ClickHouse-Key: ' \
+ -H 'http_header_key1: row2_value1'\
+ -H 'http_header_key2: row2_value2'\
+ -H 'http_header_key3: row2_value3'\
+ -H 'http_header_key4: row2_value4'\
+ -H 'http_header_key5: row2_value5'\
+ -H 'http_header_key6: row2_value6'\
+ -H 'http_header_key7: row2_value7' 'http://localhost:8123/' -d @-
+
+$CLICKHOUSE_CLIENT -q "SELECT id, http_key1, http_key2, http_key3, http_key4, http_key5, http_key6, http_key7 FROM ${db_name}.02884_get_http_header ORDER BY id;"
+#Insert data via tcp client
+$CLICKHOUSE_CLIENT --param_db="$db_name" -q "INSERT INTO ${db_name}.02884_get_http_header (id) values (3)"
+$CLICKHOUSE_CLIENT --param_db="$db_name" -q "SELECT * FROM ${db_name}.02884_get_http_header where id = 3"
+
+echo "SELECT getClientHTTPHeader('key_from_query_1'), getClientHTTPHeader('key_from_query_2'), getClientHTTPHeader('key_from_query_3'), * FROM ${db_name}.02884_get_http_header ORDER BY id" | curl -s -H 'X-Clickhouse-User: default' \
+ -H 'X-ClickHouse-Key: ' -H 'key_from_query_1: value_from_query_1' -H 'key_from_query_2: value_from_query_2' -H 'key_from_query_3: value_from_query_3' 'http://localhost:8123/' -d @-
+
+$CLICKHOUSE_CLIENT -q "DROP TABLE IF EXISTS ${db_name}.02884_get_http_header"
+
+$CLICKHOUSE_CLIENT -q "CREATE TABLE IF NOT EXISTS ${db_name}.02884_header_from_table (header_name String) Engine=Memory"
+$CLICKHOUSE_CLIENT -q "INSERT INTO ${db_name}.02884_header_from_table values ('http_key1'), ('http_key2')"
+
+echo "SELECT getClientHTTPHeader(header_name) as value from (select * FROM ${db_name}.02884_header_from_table) order by value" | curl -s -H 'X-Clickhouse-User: default' \
+ -H 'X-ClickHouse-Key: ' -H 'http_key1: http_value1' -H 'http_key2: http_value2' 'http://localhost:8123/' -d @-
+
+$CLICKHOUSE_CLIENT -q "DROP DATABASE ${db_name}"
diff --git a/tests/queries/0_stateless/02915_analyzer_fuzz_6.reference b/tests/queries/0_stateless/02915_analyzer_fuzz_6.reference
new file mode 100644
index 00000000000..b5c035d8576
--- /dev/null
+++ b/tests/queries/0_stateless/02915_analyzer_fuzz_6.reference
@@ -0,0 +1,2 @@
+[(0,0)]
+[(1,1)]
diff --git a/tests/queries/0_stateless/02915_analyzer_fuzz_6.sql b/tests/queries/0_stateless/02915_analyzer_fuzz_6.sql
new file mode 100644
index 00000000000..b4eb1b4aff4
--- /dev/null
+++ b/tests/queries/0_stateless/02915_analyzer_fuzz_6.sql
@@ -0,0 +1,19 @@
+set allow_suspicious_low_cardinality_types=1;
+set allow_experimental_analyzer=1;
+
+create table tab (x LowCardinality(Nullable(Float64))) engine = MergeTree order by x settings allow_nullable_key=1;
+insert into tab select number from numbers(2);
+SELECT [(arrayJoin([x]), x)] AS row FROM tab;
+
+
+CREATE TABLE t__fuzz_307 (`k1` DateTime, `k2` LowCardinality(Nullable(Float64)), `v` Nullable(UInt32)) ENGINE =
+ ReplacingMergeTree ORDER BY (k1, k2) settings allow_nullable_key=1;
+ insert into t__fuzz_307 select * from generateRandom() limit 10;
+ SELECT arrayJoin([tuple([(toNullable(NULL), -9223372036854775808, toNullable(3.4028234663852886e38), arrayJoin(
+[tuple([(toNullable(NULL), 2147483647, toNullable(0.5), k2)])]), k2)])]) AS row, arrayJoin([(1024, k2)]), -9223372036854775807, 256, tupleElement(row, 1048576, 1024) AS k FROM t__fuzz_307 FINAL ORDER BY (toNullable('655.36'), 2, toNullable
+('0.2147483648'), k2) ASC, toNullable('102.3') DESC NULLS FIRST, '10.25' DESC, k ASC NULLS FIRST format Null;
+
+CREATE TABLE t__fuzz_282 (`k1` DateTime, `k2` LowCardinality(Nullable(Float64)), `v` Nullable(UInt32)) ENGINE = ReplacingMergeTree ORDER BY (k1, k2) SETTINGS allow_nullable_key = 1;
+INSERT INTO t__fuzz_282 VALUES (1, 2, 3) (1, 2, 4) (2, 3, 4), (2, 3, 5);
+
+SELECT arrayJoin([tuple([(toNullable(NULL), -9223372036854775808, toNullable(3.4028234663852886e38), arrayJoin([tuple([(toNullable(NULL), 2147483647, toNullable(0.5), k2)])]), k2)])]) AS row, arrayJoin([(1024, k2)]), -9223372036854775807, 256, tupleElement(row, 1048576, 1024) AS k FROM t__fuzz_282 FINAL ORDER BY (toNullable('655.36'), 2, toNullable('0.2147483648'), k2) ASC, toNullable('102.3') DESC NULLS FIRST, '10.25' DESC, k ASC NULLS FIRST format Null;
diff --git a/tests/queries/0_stateless/02915_move_partition_inactive_replica.reference b/tests/queries/0_stateless/02915_move_partition_inactive_replica.reference
new file mode 100644
index 00000000000..573541ac970
--- /dev/null
+++ b/tests/queries/0_stateless/02915_move_partition_inactive_replica.reference
@@ -0,0 +1 @@
+0
diff --git a/tests/queries/0_stateless/02915_move_partition_inactive_replica.sql b/tests/queries/0_stateless/02915_move_partition_inactive_replica.sql
new file mode 100644
index 00000000000..3b30a2b6c2c
--- /dev/null
+++ b/tests/queries/0_stateless/02915_move_partition_inactive_replica.sql
@@ -0,0 +1,57 @@
+-- Tags: no-parallel
+
+create database if not exists shard_0;
+create database if not exists shard_1;
+
+drop table if exists shard_0.from_0;
+drop table if exists shard_1.from_0;
+drop table if exists shard_0.from_1;
+drop table if exists shard_1.from_1;
+drop table if exists shard_0.to;
+drop table if exists shard_1.to;
+
+create table shard_0.from_0 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_0_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1;
+create table shard_1.from_0 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_0_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1;
+
+create table shard_0.from_1 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_1_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1;
+create table shard_1.from_1 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_1_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1;
+
+insert into shard_0.from_0 select number from numbers(10);
+insert into shard_0.from_0 select number + 10 from numbers(10);
+
+insert into shard_0.from_1 select number + 20 from numbers(10);
+insert into shard_0.from_1 select number + 30 from numbers(10);
+
+system sync replica shard_1.from_0;
+system sync replica shard_1.from_1;
+
+
+create table shard_0.to (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/to_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1;
+
+create table shard_1.to (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/to_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1;
+
+detach table shard_1.to;
+
+alter table shard_0.from_0 on cluster test_cluster_two_shards_different_databases move partition tuple() to table shard_0.to format Null settings distributed_ddl_output_mode='never_throw', distributed_ddl_task_timeout = 1;
+
+alter table shard_0.from_1 on cluster test_cluster_two_shards_different_databases move partition tuple() to table shard_0.to format Null settings distributed_ddl_output_mode='never_throw', distributed_ddl_task_timeout = 1;
+
+OPTIMIZE TABLE shard_0.from_0;
+OPTIMIZE TABLE shard_1.from_0;
+OPTIMIZE TABLE shard_0.from_1;
+OPTIMIZE TABLE shard_1.from_1;
+OPTIMIZE TABLE shard_0.to;
+
+system restart replica shard_0.to;
+
+select sleep(2);
+
+attach table shard_1.to;
+
+drop table if exists shard_0.from_0;
+drop table if exists shard_1.from_0;
+drop table if exists shard_0.from_1;
+drop table if exists shard_1.from_1;
+drop table if exists shard_0.to;
+drop table if exists shard_1.to;
+
diff --git a/tests/queries/0_stateless/02916_glogal_in_cancel.reference b/tests/queries/0_stateless/02916_glogal_in_cancel.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/02916_glogal_in_cancel.sql b/tests/queries/0_stateless/02916_glogal_in_cancel.sql
new file mode 100644
index 00000000000..ad54f1ecdec
--- /dev/null
+++ b/tests/queries/0_stateless/02916_glogal_in_cancel.sql
@@ -0,0 +1,2 @@
+set max_execution_time = 0.5, timeout_overflow_mode = 'break';
+SELECT number FROM remote('127.0.0.{3|2}', numbers(1)) WHERE number GLOBAL IN (SELECT number FROM numbers(10000000000.)) format Null;
diff --git a/tests/queries/0_stateless/02918_fuzzjson_table_function.reference b/tests/queries/0_stateless/02918_fuzzjson_table_function.reference
new file mode 100644
index 00000000000..1b5c6f46f77
--- /dev/null
+++ b/tests/queries/0_stateless/02918_fuzzjson_table_function.reference
@@ -0,0 +1,152 @@
+{"QJC4GhRByEtEAjku":{}}
+{}
+{}
+{}
+{}
+{}
+{}
+{}
+{}
+{}
+{"Cicktxh":true, "SpByjZKtr2VAyHCO":false}
+{"ClickHouse":"Is Fast", "VO7TCIkyu1akvN":{}}
+{"ClickHouse":"Is Fast"}
+{"ISlW1DB":"Is Fast", "5j4ATkq":{}}
+{"ClickHouse":false}
+{"ClickHouse":"Is Fast", "tRSz":13522460516091116060}
+{"ClickHouse":"Is Fast"}
+{"ClickHouse":"Is Fast"}
+{"CzTcYkQdSce":"Is Fast"}
+{"ClickHouse":"Is Fast"}
+{"ClickHouse":false}
+{"ClickHouse":"Is Fast"}
+{"ClickHouse":"Is Fast", "jql0YAY":[]}
+{"ClickHouse":"Is Fast"}
+{"ClickHouse":"Is Fast"}
+{"ClickHouse":"Is Fast", "lF2vXus":false}
+{"ClickHouse":"Is Fast"}
+{"ClickHouse":"Is Fast"}
+{"ClickHouse":"Is Fast"}
+{"QJiGcwkonghk":"Is Fast"}
+{"sidetx":[{"name":"Alice"}, {"R6Vm":false}, {}], "SpByjZKtr2VAyHCO":false}
+{"students":[{"name":"Alice"}, {"name":"Bob"}]}
+{"students":[{"name":"Alice"}, {"name":true}]}
+{"students":[{"name":"Alice"}, {"name":"Bob"}]}
+{"ISuW1":[{"naYmS":"Alice", "hzTDYZQdScOct0RS":[]}, {"name":"Bob"}]}
+{"students":[{"name":"Alice"}, {"name":"Bob"}], "jql0YAY":[]}
+{"students":[{"name":"Alice"}, {"name":"Bob"}], "lF2vXus":false}
+{"students":[{"QJmGe":"Alice"}, {"name":"Bob"}]}
+{"students":[{"name":"Alice"}, {"name":"Bob"}]}
+{"kXtdet":[{"name":"Alice"}, {"name":"Bob"}]}
+{"students":[{"name":"Alice"}, {"name":"Bob"}], "Qcm4":{}}
+{"students":[{"name":"Alice"}, {"PmjG":"Bob"}]}
+{"students":[{"name":6128974479331836233}, {"name":"Bob"}]}
+{"sGudyet5u":[{"name":"Alice"}, {"name":"Bob"}, {}]}
+{"students":[{"name":"Alice"}, {"name":"Bob"}]}
+{"students":[{"Kamc":true}, {"name":"rKKN+5#NKEi-uf5U"}]}
+{"students":[{"name":"Alice"}, {"nPL6":1455900058404521160}]}
+{"students":[{"name":"Alice", "dzm5g9aPI21iIP9":[]}, {"name":"Bob"}]}
+{"students":[{"n4z4N":true, "uJrCh4ifo":{}}, {"name":"Bob", "kMnsl0BBFk":[]}], "kG21YiAcUKpcUS2":true}
+{"students":[{"name":"Alice"}, {"name":"Bob", "wQCN":{}}]}
+{"schedule":[{"breakfast":"7am", "5ZB35":{"nHypO":[]}}, {"lunch":"12pm"}]}
+{"schedule":[{"bdvelrflX":"7am", "5ZB35":{"nHypO":[]}}, {"23slh":"12pm"}]}
+{"tkdu8hl":[{"bdvelrflX":"7am", "5ZB35":{"nHypO":[]}}, {"23slh":"12pm"}]}
+{"tkdu8hl":[{"bdvelrflX":"7am", "5mkj5":{"nHypO":[]}}, {"23slh":"12pm"}], "n8HX5N6DVpBa":["fYOPSVVK*Brv_-AajZwT"]}
+{"tkdu8hl":[{"nQ4PePPfX":16091119822740071899, "5mkj5":{"npOE":[[]]}}, {"23slh":"12pm"}], "nHXa6BVq8E":["fYOPSVVK*Brv_-AajZwT"], "BHUNvB8sHk8ts6":true}
+{"tkdu8hl":[{"nQ4PePPfX":16091119822740071899, "5mkj5":{"G71D":[[], []]}}, {"23slh":"12pm"}], "FOIRaJ6VqVCKD0E":["fYOPSVVK*Brv_-AajZwT", 17244534201851710710], "BHUNvB8sHk8ts6":true, "qnk47QAn0yQ3ESEgO":true}
+{"tkdu8hl":[{"nQ4PePPfX":16091119822740071899, "5mkj5":{"G71D":[[], []]}}, {"23slh":"-plal2e"}], "FOIRaJ6VqVCKD0E":["fYOPSVVK*Brv_-AajZwT", 17244534201851710710], "BHUNvB8sHk8ts6":true, "qnk47QAn0yQ3ESEgO":true}
+{"tkdu8hl":[{"nQ4PePPfX":16091119822740071899, "5mkj5":{"Gpq7":[[], [false]]}, "YgbEtY":true}, {"23slh":false}], "FOIRaJ6VqVCKD0E":["fYOPSVVK*Brv_-AajZwT", 17244534201851710710], "ByRvBC4H0kgydJ":false, "zqokAQz8z0KnPOBrs8":true}
+{"kzcUZOl":[{"nQ4PePPfX":16091119822740071899, "Ekmj":{"lBKR":[[], [false], []], "dLc32r2f":{}}, "xbguW":"vGV&bitEteAH%-Eigg_7VlejYuHP"}, {"23slh":false}, {}], "FOIRaJ6VqVCKD0E":["fYOPSVVK*Brv_-AajZwT", 17244534201851710710], "ByRvBC4H0kgydJ":false, "zqokAQz8z0KnPOBrs8":true}
+{"kzcUZOl":[{"nQ4PePPfX":16091119822740071899, "Ekmj":{"lBKR":[[3774015142547830176], [false], []], "rCmVPvvf":{"wU6YWjag":[]}}, "xb7uW":"pWUTs&ikTCNRQt"}, {"23slh":false}, {}], "h3IK06PQGfCRQ":[false, false], "SyRRLBzEjy8YJ":false, "zqokAQz8z0KnPOBrs8":true}
+{"ukrzZl":[{"nQ4PePPfX":16091119822740071899, "5kmG":{"lBKR":[[14228925827882160318, "TpCrsW@11Io1sSu1@nFm"], [true], []], "rOmNvc":{"wU6YWjag":[], "pIK6tGXUp1gekWViJ":{}}, "igqgnb":[]}, "xb7uW":"pWUTs&ikTCNRQt", "jBT1ImcYb77bl2":true}, {"dsyf":true}, {}, {"qOElRhbehMXQNrln":{"PDoZa8OJHh1al59Ggq":{}}}], "h3IK06PQGfCRQ":[false, false], "SyRRLBzEjy8YJ":false, "zqokAQz8z0KnPOBrs8":true}
+{"ukrzZl":[{"nQ4PePPfX":16091119822740071899, "5kmG":{"lBKR":[[14228925827882160318, "TpCrsW@11Io1sSu1@nFm"], [true], []], "rOmNvc":{"wU6YWjag":[], "pIK6tGXUp1gekWViJ":{}}, "igqgnb":[]}, "xb7uW":"pWUTs&ikTCNRQt", "jBT1ImcYb77bl2":true}, {"dsyf":18233789955605096603}, {}, {"qOElRhbehMXQNrln":{"PoZngOHXMaWGRJq":{"QlnPi9zKoBtW2nGWB":"LgFazuGX*CuDy7X%4hkEmykg@6"}}}], "h3IK06PQGfCRQ":[false, false], "SyRRLBzEjy8YJ":false, "zQO8BA7nazqKW7CRP8":true}
+{"ukrzZl":[{"nQ4PePPfX":16091119822740071899, "5kmG":{"lBKR":[[16730631663303458403, "TpCrsW@11Io1sSu1@nFm"], [true], []], "rOmNvc":{"wU6YWjag":[false], "pIK6tGXUp1gekWViJ":{}}, "igqgnb":[]}, "xb7uW":"pWUTs&ikTCNRQt", "jBT1ImcYb77bl2":true}, {"dsyf":18233789955605096603, "mmCFLovnBThJPtpQG0Tv":false}, {}, {"qOElRhbehMXQNrln":{"PoZngOHXMaWGRJq":{"QlnPi9zKoBtW2nGWB":"LgFazuGX*CuDy7X%4hkEmykg@6"}}}, {"sx21nRmS69bXRo":[]}], "h3IK06PQGfCRQ":[false, "HjPw@G1Icu#dn"], "SyRRLBzEjy8YJ":false, "zQO8BA7nazqKW7CRP8":true}
+{"ukrzZl":[{"nQ4PePPfX":16091119822740071899, "5kmG":{"lBKR":[[16730631663303458403, "eiUmT%F$FQBWtWz^Tt7Ix&D"], [true], [], []], "rOmNvc":{"wOWxSWQf":[false], "pIK6tGXUp1gekWViJ":{}, "pFKIzg3HC":14538916875375166988}, "igqgnb":[]}, "xb7uW":"pWUTs&ikTCNRQt", "jlT1T35c27wbl2":true}, {"dsyf":18233789955605096603, "mYikENkiDhPRtQHOr":true}, {}, {"qOElRhbehMXQNrln":{"4GBqJBrnoOHJW5GA":{"QaPSqINbjb7nGx9qz":8975023301134451623, "JWOUP4WB1":14622543266409160782}}}, {"sx21nRmS69bXRo":[]}], "h3IK06PQGfCRQ":[false, "HjPw@G1Icu#dn"], "S1ncA0ERs8Y9v":"@7EShAFjSycp%Wo0gHn", "zQO8BA7nazqKW7CRP8":true}
+{"ukrzZl":[{"nQ4PePPfX":11197252787701758701, "5kmG":{"lBKR":[[16730631663303458403, "eiUmT%F$FQBWtWz^Tt7Ix&D", false], [true, true], [], []], "rOmNvc":{"wOWxSWQf":[false], "pIK6tGXUp1gekWViJ":{}, "pFKIzg3HC":14538916875375166988}, "igqgnb":[], "pUDeAJw":"MN^9hUPKv811Vq!"}, "oiU7x8":false, "jlT1T35c27wbl2":false}, {"dsyf":18233789955605096603, "mYikENkiDhPRtQHOr":true}, {}, {"qOElRhbehMXQNrln":{"4GBqJBrnoOHJW5GA":{"QaPSqINbjb7nGx9qz":8975023301134451623, "aOUaQBB":false}}}, {"x27uem04bX6R87b":[[]]}, {"MqSQ5v":[]}], "h3IK06PQGfCRQ":[false, "7pq+IfdiKeTkTym7AWjlc"], "S1ncA0ERs8Y9v":"@7EShAFjSycp%Wo0gHn", "zQO8BA7nazqKW7CRP8":true}
+{"UkPbWZl":[{"nQ4PePPfX":11197252787701758701, "5kmG":{"lBKR":[[16730631663303458403, "eiUmT%F$FQBWtWz^Tt7Ix&D", false], [true, true], [false], []], "rvCMyf":{"2pnWUuQ6J":[false, "q-5Gl5B8uOK"], "pIK6tGXUp1gekWViJ":{}, "pFKIzg3HC":14538916875375166988, "yeNIt3JgSC0K":1931793149388080066}, "BVH5PAgEe4b":[], "pUDeAJw":"LnJMn0D&2lr^k!A", "uDl68z":516601863564431352}, "oiU7x8":false, "jlT1T35c27wbl2":false}, {"dsyf":"F!*nU1V_WOni8$a9RXBHGob^sg", "mYikENkiDhPRtQHOr":true}, {}, {"qOURhbeBpKE8qrhC":{"4GBqJBrnoOHJW5GA":{"QaPSqINbjb7nGx9qz":8975023301134451623, "OUlR":false}}}, {"x27uem04bX6R87b":[[]]}, {"MqSQ5v":[]}], "h3IK06PQGfCRQ":[false, "7pq+IfdiKeTkTym7AWjlc", true], "dlCX4s8LF":"@7EShAFjSycp%Wo0gHn", "zQO8BA7nazqKW7CRP8":true, "XahaweEPjnHUyKsT":{}}
+{"IkkCdvbW8oLK":[{"nQ4PePPfX":11197252787701758701, "5kmG":{"lB3l":[[16730631663303458403, "eiUmT%F$FQBWtWz^Tt7Ix&D", 17822336972471685000], [true, true], [false], [], []], "rvCMyf":{"2pnWUuQ6J":[false, "q-5Gl5B8uOK"], "pIK6tGXUp1gekWViJ":{}, "pFKIzg3HC":14538916875375166988, "yeNIt3JgSC0K":1931793149388080066}, "BVH5PAgEe4b":[], "pUDeAJw":"LnJMn0D&2lr^k!A", "uDl68z":"fDT@hLdFJNXwBfJ__Fok7u2@BWY^t0"}, "oiU7x8":false, "jlT1T35c27wbl2":false}, {"dsyf":false, "mYikENkiDhPRtQHOr":true}, {}, {"qOURhbeBpKE8qrhC":{"7Qf27pQMkchIOBWX":{"QaPSqINbjb7nGx9qz":8975023301134451623, "OUlR":false, "EoEJ7GlbhI0":[]}}}, {"x27uem04bX6R87b":[[[]], []]}, {"MqSQ5v":[9304041946960766827]}, {}], "h3IK06PQGfCRQ":[false, "7pq+IfdiKeTkTym7AWjlc", true], "dlCX4s8LF":true, "zQO8BA7nazqKW7CRP8":true, "fOa5rfhNLCiqjrnUrtZ6":{}}
+{"IkkCdvbW8oLK":[{"nQ4PePPfX":11197252787701758701, "mGJx":{"lB3l":[[16730631663303458403, "eiUmT%F$FQBWtWz^Tt7Ix&D", 17822336972471685000], [true, true], [10370853850869029207], [], ["VaTduwAFH0ahN5xeJU"]], "rvCMyf":{"2pnWUuQ6J":[false, "6J%Orinf%4"], "pIK6tGXUp1gekWViJ":{}, "pFKIzg3HC":14538916875375166988, "yeNIt3JgSC0K":1931793149388080066}, "BVH5PAgEe4b":[], "pUDeAJw":"LnJMn0D&2lr^k!A", "uDl68z":"fDT@hLdFJNXwBfJ__Fok7u2@BWY^t0"}, "oiU7x8":false, "jlT1T35c27wbl2":false}, {"dsyf":false, "mYikENkiDhPRtQHOr":true}, {}, {"qOURhbeBpKE8qrhC":{"7Qf27pQMkchIOBWX":{"aKaShNyxj7Gx9qB":8975023301134451623, "OUlR":false, "EoEJ7GlbhI0":[]}}}, {"x27uem04bX6R87b":[[[]], []]}, {"MqSQ5v":[9304041946960766827, "T##LF8eDM"]}, {}], "h3IK06PQGfCRQ":[false, 6667769656296380039, true], "dlCX4s8LF":true, "zQO8BA7nazqKW7CRP8":true, "fOa5rfhNLCiqjrnUrtZ6":{}}
+{"IkkCdvbW8oLK":[{"nQ4PePPfX":11197252787701758701, "xGBZx":{"lB3l":[[16730631663303458403, "eiUmT%F$FQBWtWz^Tt7Ix&D", "sFwAP3"], [true, "-TBj_T1BS7OJh8^p1qO3!DK_X&CfwetZ"], [5795439407585677270, false], [], ["VaTduwAFH0ahN5xeJU"]], "OvMy":{"2pnWUuQ6J":[false, "6J%Orinf%4"], "wni3QGXfpgeq":{"QF0hiIqRIKp2mp04U":14287172497490584292}, "M8pg0INzhg3Hz":14538916875375166988, "yeNIt3JgSC0K":false, "TeFWw":[]}, "BVH5PAgEe4b":[], "pUDeAJw":"LnJMn0D&2lr^k!A", "uDl68z":"fDT@hLdFJNXwBfJ__Fok7u2@BWY^t0"}, "oiU7x8":false, "jlT1T35c27wbl2":false}, {"dsyf":false, "mYikENkiDhPRtQHOr":true}, {}, {"DjYSOeUFNepEK4XvC":{"7Qf27pQMkchIOBWX":{"aKaShNyxj7Gx9qB":8975023301134451623, "OUlR":false, "EoEJ7GlbhI0":[]}}}, {"x27uem04bX6R87b":[[[15632688604980432085]], []]}, {"MqSQ5v":[9304041946960766827, "T##LF8eDM"]}, {}], "h3IK06PQGfCRQ":[false, 6667769656296380039, true], "dlCX4s8LF":true, "zQO8BA7nazqKW7CRP8":true, "fOa5rfhNLCiqjrnUrtZ6":{}}
+{"IkkCdvbW8oLK":[{"nQ4PePPfX":11197252787701758701, "xGBZx":{"lB3l":[["_#JSXSLdVKXb+c", "eiUmT%F$FQBWtWz^Tt7Ix&D", "sFwAP3"], [true, "-TBj_T1BS7OJh8^p1qO3!DK_X&CfwetZ"], [5795439407585677270, false], [], ["VaTduwAFH0ahN5xeJU"]], "OvMy":{"2pnWUuQ6J":[false, "6J%Orinf%4"], "wni3QGXfpgeq":{"QF0hiIqRIKp2mp04U":14287172497490584292}, "M8pg0INzhg3Hz":14538916875375166988, "yeNIt3JgSC0K":false, "TeFWw":[]}, "BVH5PAgEe4b":[], "pUDeAJw":"LnJMn0D&2lr^k!A", "uDl68z":"8&VE7"}, "oiU7x8":false, "jlT1T35c27wbl2":false}, {"dsyf":false, "mYikENkiDhPRtQHOr":true, "lbci":{}}, {}, {"DjYSOeUFNepEK4XvC":{"QVEsjfQBcsIEbRWBW":{"uGYvt33UTmxj7t2B":8975023301134451623, "OUlR":false, "EoEJ7GlbhI0":[]}, "Qya8i":{"EMfurslq2KFOCa29od0d":[]}}}, {"x27uem04bX6R87b":[[[15632688604980432085]], [[]]]}, {"MqSQ5v":[9304041946960766827, "T##LF8eDM"]}, {}], "sEdwKHDRafKvC":[false, 6667769656296380039, true], "dlCX4s8LF":true, "zQO8BA7nazqKW7CRP8":true, "fOa5rfhNLCiqjrnUrtZ6":{}}
+{"schedule":[{"breakfast":"7am", "5ZB35":{"nHypO":[]}}, {"lunch":"12pm"}]}
+{"schedule":[{"bdvelrflX":"7am"}, {"lunch":"12pm"}]}
+{"23sldMp":[{"Ob8hrGkHsU8X":"7am"}, {"lunch":"12pm"}]}
+{"schedule":[{"bMnamkjsAsat":"7am"}, {"lunch":"12pm", "OfmJPaS":{}}]}
+{"snjTZul":[{"breakfast":"7am"}, {"lHkn6N":1318333088581732761}, {"bQH4jPs":{}}], "Hrv8ZL6":[]}
+{"schedule":[{"QrqaD":"!uUry9J-#VUCkKD0yyI+xM", "3e8EfNin":"0_Ny&1pcBzd8YEFq8hn4+Q#y^ESEg*"}, {"lunch":"12pm"}], "hGh8RR":{}}
+{"schedule":[{"regEsl2t":true, "q5flU9DI7erByRjh":{}}, {"lH0h":"%yJEbznodCJ8-#KzPNcBHrsr"}, {"pPk2zAcfUxDZcO":{}}]}
+{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}, {}], "hZNsEeUmexM":{}}
+{"lhhG":[{"breakfast":"7am"}, {"lunch":"12pm", "OEgZYuhDWP3vGbV4bi":[]}, {}]}
+{"schedule":[{"breakfast":"kj*RPaKLng*&h4&UBqa-tw%53aE", "WtHnb8mVPvvHDUYWaJSB":[[]]}, {"lunch":"12pm"}], "6EigJgc8sxf7VIfMkDl":[]}
+{"schedule":[{"breakfast":false}, {"lunch":"12pm", "WikTL":1724418800345361559}]}
+{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}
+{"h3hK0l":[{"breakfast":"7am", "fGNLfAC":{}}, {"lETzn6S":"12pm"}]}
+{"schedule":[{"breakfast":"7am"}, {"izEx":9011753325952200749}]}
+{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}
+{"schedule":[{"breakfast":"7am"}, {"mY7la":17408441466865856756, "yIG0VqnoY1TTMjs":{"11BIo1csSuB1n":10038860187222625751}}]}
+{"cSJ8eOuN":[{"breakfast":"7am", "UgpWK":{"Wkha9tqdiOefZfAKQcEg":"EbhMQNrlngPo"}}, {"lunch":"12pm", "wGWGRJqJlPYzCB0":[]}, {}]}
+{"UBgFuue":[{"brrak2st":"kEmykg@6-%h-OQ@O_"}, {"lunch":"12pm", "7DnPaGPqi5Wr7":false}, {}]}
+{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm", "LeH3":{}}]}
+{"schedule":[{"breakon":true}, {"Sx1Rch":9823913620251756169, "0TvaWJUmv0Cv":{}}]}
+{"schedule":[{"breakfast":"7am", "5ZB35":{"nHypO":[]}}, {"lunch":"12pm"}]}
+{"schedule":[{"bdvelrflX":"7am"}, {"lunch":"12pm"}]}
+{"23sldMp":[{"Ob8hrGkHsU8X":"7am"}, {"lunch":"12pm"}]}
+{"schedule":[{"bMnamkjsAsat":"7am"}, {"lunch":"12pm", "OfmJPaS":{}}]}
+{"snjTZul":[{"breakfast":"7am"}, {"lHkn6N":1318333088581732761}, {"bQH4jPs":{}}], "Hrv8ZL6":[]}
+{"schedule":[{"QrqaD":"!uUry9J-#VUCkKD0yyI+xM", "3e8EfNin":"0_Ny&1pcBzd8YEFq8hn4+Q#y^ESEg*"}, {"lunch":"12pm"}], "hGh8RR":{}}
+{"schedule":[{"regEsl2t":true, "q5flU9DI7erByRjh":{}}, {"lH0h":"%yJEbznodCJ8-#KzPNcBHrsr"}, {"pPk2zAcfUxDZcO":{}}]}
+{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}, {}], "hZNsEeUmexM":{}}
+{"lhhG":[{"breakfast":"7am"}, {"lunch":"12pm", "OEgZYuhDWP3vGbV4bi":[]}, {}]}
+{"schedule":[{"breakfast":"kj*RPaKLng*&h4&UBqa-tw%53aE", "WtHnb8mVPvvHDUYWaJSB":[[]]}, {"lunch":"12pm"}]}
+{"schedule":[{"breakfast":false}, {"lunch":"12pm", "WikTL":1724418800345361559}]}
+{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}
+{"h3hK0l":[{"breakfast":"7am", "fGNLfAC":{}}, {"lETzn6S":"12pm"}]}
+{"schedule":[{"breakfast":"7am"}, {"izEx":9011753325952200749}]}
+{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}
+{"schedule":[{"breakfast":"7am"}, {"mY7la":17408441466865856756, "yIG0VqnoY1TTMjs":{"11BIo1csSuB1n":10038860187222625751}}]}
+{"cSJ8eOuN":[{"breakfast":"7am", "UgpWK":{"Wkha9tqdiOefZfAKQcEg":"EbhMQNrlngPo"}}, {"lunch":"12pm", "wGWGRJqJlPYzCB0":[]}, {}]}
+{"UBgFuue":[{"brrak2st":"kEmykg@6-%h-OQ@O_"}, {"lunch":"12pm", "7DnPaGPqi5Wr7":false}, {}]}
+{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm", "LeH3":{}}]}
+{"schedule":[{"breakon":true}, {"Sx1Rch":9823913620251756169, "0TvaWJUmv0Cv":{}}]}
+{"schedule":[{"breakfast":"7am", "5ZB35":{"nHypO":[]}}, {"lunch":"12pm"}]}
+{"schedule":[{"bdvelrflX":"7am"}, {"lunch":"12pm"}]}
+{"23sldMp":[{"Ob8hrGkHsU8X":"7am"}, {"lunch":"12pm"}]}
+{"schedule":[{"bMnamkjsAsat":"7am"}, {"lunch":"12pm", "OfmJPaS":{}}]}
+{"snjTZul":[{"breakfast":"7am"}, {"lHkn6N":1318333088581732761}, {"bQH4jPs":{}}], "Hrv8ZL6":[]}
+{"schedule":[{"QrqaD":"!uUry9J-#VUCkKD0yyI+xM", "3e8EfNin":"0_Ny&1pcBzd8YEFq8hn4+Q#y^ESEg*"}, {"lunch":"12pm"}], "hGh8RR":{}}
+{"schedule":[{"regEsl2t":true, "q5flU9DI7erByRjh":{}}, {"lH0h":"%yJEbznodCJ8-#KzPNcBHrsr"}, {"pPk2zAcfUxDZcO":{}}]}
+{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}, {}], "hZNsEeUmexM":{}}
+{"lhhG":[{"breakfast":"7am"}, {"lunch":"12pm", "OEgZYuhDWP3vGbV4bi":[]}, {}]}
+{"schedule":[{"breakfast":"kj*RPaKLng*&h4&UBqa-tw%53aE", "WtHnb8mVPvvHDUYWaJSB":[[]]}, {"lunch":"12pm"}], "6EigJgc8sxf7VIfMkDl":[]}
+{"schedule":[{"breakfast":false}, {"lunch":"12pm", "WikTL":1724418800345361559}]}
+{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}
+{"h3hK0l":[{"breakfast":"7am", "fGNLfAC":{}}, {"lETzn6S":"12pm"}]}
+{"schedule":[{"breakfast":"7am"}, {"izEx":9011753325952200749}]}
+{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}
+{"schedule":[{"breakfast":"7am"}, {"mY7la":17408441466865856756, "yIG0VqnoY1TTMjs":{"11BIo1csSuB1n":10038860187222625751}}]}
+{"cSJ8eOuN":[{"breakfast":"7am", "UgpWK":{"Wkha9tqdiOefZfAKQcEg":"EbhMQNrlngPo"}}, {"lunch":"12pm", "wGWGRJqJlPYzCB0":[]}, {}]}
+{"UBgFuue":[{"brrak2st":"kEmykg@6-%h-OQ@O_"}, {"lunch":"12pm", "7DnPaGPqi5Wr7":false}, {}]}
+{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm", "LeH3":{}}]}
+{"schedule":[{"breakon":true}, {"Sx1Rch":9823913620251756169, "0TvaWJUmv0Cv":{}}]}
+{}
+{}
+{}
+{"cuNC":"j#Q*KbvL"}
+{}
+{}
+{}
+{"e2mZBQPL9f0pgd0sXR":false}
+{}
+{}
+{}
+{}
+{}
+{}
+{}
+{}
+{}
+{}
+{}
+{}
+730
+200
diff --git a/tests/queries/0_stateless/02918_fuzzjson_table_function.sql b/tests/queries/0_stateless/02918_fuzzjson_table_function.sql
new file mode 100644
index 00000000000..6db0c69dbac
--- /dev/null
+++ b/tests/queries/0_stateless/02918_fuzzjson_table_function.sql
@@ -0,0 +1,106 @@
+-- Tags: no-parallel, no-replicated-database: Named collection is used
+
+SET allow_experimental_object_type = 1;
+--
+
+DROP NAMED COLLECTION IF EXISTS 02918_json_fuzzer;
+CREATE NAMED COLLECTION 02918_json_fuzzer AS json_str='{}';
+
+SELECT * FROM fuzzJSON(02918_json_fuzzer, random_seed=54321) LIMIT 10;
+SELECT * FROM fuzzJSON(02918_json_fuzzer, json_str='{"ClickHouse":"Is Fast"}', random_seed=1337) LIMIT 20;
+SELECT * FROM fuzzJSON(02918_json_fuzzer, json_str='{"students":[{"name":"Alice"}, {"name":"Bob"}]}', random_seed=1337) LIMIT 20;
+SELECT * FROM fuzzJSON(02918_json_fuzzer, json_str='{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}', random_seed=123456, reuse_output=true) LIMIT 20;
+SELECT * FROM fuzzJSON(02918_json_fuzzer, json_str='{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}', random_seed=123456, reuse_output=false) LIMIT 20;
+SELECT * FROM fuzzJSON(02918_json_fuzzer,
+ json_str='{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}',
+ random_seed=123456,
+ reuse_output=0,
+ max_output_length=128) LIMIT 20;
+
+SELECT * FROM fuzzJSON(02918_json_fuzzer,
+ json_str='{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}',
+ random_seed=123456,
+ reuse_output=0,
+ max_output_length=65536,
+ max_nesting_level=10,
+ max_array_size=20) LIMIT 20;
+
+SELECT * FROM fuzzJSON(02918_json_fuzzer,
+ random_seed=6667,
+ max_nesting_level=0) LIMIT 10;
+
+SELECT * FROM fuzzJSON(02918_json_fuzzer,
+ random_seed=6667,
+ max_object_size=0,
+ max_array_size=0) LIMIT 10;
+
+--
+DROP TABLE IF EXISTS 02918_table_str;
+CREATE TABLE 02918_table_str (json_str String) Engine=Memory;
+
+INSERT INTO 02918_table_str SELECT * FROM fuzzJSON(02918_json_fuzzer) limit 10;
+INSERT INTO 02918_table_str SELECT * FROM fuzzJSON(02918_json_fuzzer) limit 10;
+INSERT INTO 02918_table_str SELECT * FROM fuzzJSON(02918_json_fuzzer, random_seed=123, reuse_output=true) limit 10;
+INSERT INTO 02918_table_str SELECT * FROM fuzzJSON(
+ 02918_json_fuzzer,
+ json_str='{"name": "John Doe", "age": 30, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}',
+ random_seed=6666) LIMIT 200;
+
+INSERT INTO 02918_table_str SELECT * FROM fuzzJSON(
+ 02918_json_fuzzer,
+ json_str='{"name": "John Doe", "age": 30, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}',
+ random_seed=6666,
+ min_key_length=1,
+ max_key_length=5) LIMIT 200;
+
+INSERT INTO 02918_table_str SELECT * FROM fuzzJSON(
+ 02918_json_fuzzer,
+ json_str='{"name": "John Doe", "age": 30, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}',
+ max_nesting_level=128,
+ reuse_output=true,
+ random_seed=6666,
+ min_key_length=5,
+ max_key_length=5) LIMIT 200;
+
+INSERT INTO 02918_table_str SELECT * FROM fuzzJSON(
+ 02918_json_fuzzer,
+ json_str='{"name": "John Doe", "age": 30, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}',
+ random_seed=6666,
+ reuse_output=1,
+ probability=0.5,
+ max_output_length=65536,
+ max_nesting_level=18446744073709551615,
+ max_array_size=18446744073709551615,
+ max_object_size=18446744073709551615,
+ max_key_length=65536,
+ max_string_value_length=65536) LIMIT 100;
+
+SELECT count() FROM 02918_table_str;
+
+DROP TABLE IF EXISTS 02918_table_str;
+
+--
+SELECT * FROM fuzzJSON(02918_json_fuzzer, max_output_length="Hello") LIMIT 10; -- { serverError BAD_ARGUMENTS }
+SELECT * FROM fuzzJSON(02918_json_fuzzer, max_output_length=65537) LIMIT 10; -- { serverError BAD_ARGUMENTS }
+SELECT * FROM fuzzJSON(02918_json_fuzzer, probability=10) LIMIT 10; -- { serverError BAD_ARGUMENTS }
+SELECT * FROM fuzzJSON(02918_json_fuzzer, probability=-0.1) LIMIT 10; -- { serverError BAD_ARGUMENTS }
+SELECT * FROM fuzzJSON(02918_json_fuzzer, probability=1.1) LIMIT 10; -- { serverError BAD_ARGUMENTS }
+SELECT * FROM fuzzJSON(02918_json_fuzzer, probability=1.1) LIMIT 10; -- { serverError BAD_ARGUMENTS }
+SELECT * FROM fuzzJSON(02918_json_fuzzer, max_string_value_length=65537) LIMIT 10; -- { serverError BAD_ARGUMENTS }
+SELECT * FROM fuzzJSON(02918_json_fuzzer, max_key_length=65537) LIMIT 10; -- { serverError BAD_ARGUMENTS }
+SELECT * FROM fuzzJSON(02918_json_fuzzer, max_key_length=10, min_key_length=0) LIMIT 10; -- { serverError BAD_ARGUMENTS }
+SELECT * FROM fuzzJSON(02918_json_fuzzer, max_key_length=10, min_key_length=11) LIMIT 10; -- { serverError BAD_ARGUMENTS }
+
+--
+DROP TABLE IF EXISTS 02918_table_obj;
+CREATE TABLE 02918_table_obj (json_obj Object('json')) Engine=Memory;
+
+INSERT INTO 02918_table_obj SELECT * FROM fuzzJSON(
+ 02918_json_fuzzer,
+ json_str='{"name": "John Doe", "age": 27, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}',
+ random_seed=12345) LIMIT 200;
+SELECT count() FROM 02918_table_obj;
+
+DROP TABLE IF EXISTS 02918_table_obj;
+
+DROP NAMED COLLECTION IF EXISTS 02918_json_fuzzer;
diff --git a/tests/queries/0_stateless/02919_segfault_nullable_materialized_update.reference b/tests/queries/0_stateless/02919_segfault_nullable_materialized_update.reference
new file mode 100644
index 00000000000..a1ce6a27bb4
--- /dev/null
+++ b/tests/queries/0_stateless/02919_segfault_nullable_materialized_update.reference
@@ -0,0 +1,3 @@
+0 0 false
+1 1 true
+0 0 false
diff --git a/tests/queries/0_stateless/02919_segfault_nullable_materialized_update.sql b/tests/queries/0_stateless/02919_segfault_nullable_materialized_update.sql
new file mode 100644
index 00000000000..f531ec0311d
--- /dev/null
+++ b/tests/queries/0_stateless/02919_segfault_nullable_materialized_update.sql
@@ -0,0 +1,18 @@
+DROP TABLE IF EXISTS crash_02919;
+
+CREATE TABLE crash_02919 (
+ b Int64,
+ c Nullable(Int64) MATERIALIZED b,
+ d Nullable(Bool) MATERIALIZED b
+)
+ENGINE = MergeTree
+ORDER BY tuple();
+
+INSERT INTO crash_02919 VALUES (0);
+SELECT b, c, d FROM crash_02919;
+ALTER TABLE crash_02919 UPDATE b = 1 WHERE 1=1 SETTINGS mutations_sync = 1;
+SELECT b, c, d FROM crash_02919;
+ALTER TABLE crash_02919 UPDATE b = 0.1 WHERE 1=1 SETTINGS mutations_sync = 1;
+SELECT b, c, d FROM crash_02919;
+
+DROP TABLE crash_02919;
diff --git a/tests/queries/0_stateless/02919_storage_fuzzjson.reference b/tests/queries/0_stateless/02919_storage_fuzzjson.reference
new file mode 100644
index 00000000000..a134ce52c11
--- /dev/null
+++ b/tests/queries/0_stateless/02919_storage_fuzzjson.reference
@@ -0,0 +1,3 @@
+100
+100
+100
diff --git a/tests/queries/0_stateless/02919_storage_fuzzjson.sql b/tests/queries/0_stateless/02919_storage_fuzzjson.sql
new file mode 100644
index 00000000000..80b4a406a08
--- /dev/null
+++ b/tests/queries/0_stateless/02919_storage_fuzzjson.sql
@@ -0,0 +1,44 @@
+DROP TABLE IF EXISTS 02919_test_table_noarg;
+CREATE TABLE 02919_test_table_noarg(str String) ENGINE = FuzzJSON('{}');
+
+SELECT count() FROM (SELECT * FROM 02919_test_table_noarg LIMIT 100);
+
+DROP TABLE IF EXISTS 02919_test_table_noarg;
+
+--
+DROP TABLE IF EXISTS 02919_test_table_valid_args;
+CREATE TABLE 02919_test_table_valid_args(str String) ENGINE = FuzzJSON(
+ '{"pet":"rat"}', NULL);
+
+SELECT count() FROM (SELECT * FROM 02919_test_table_valid_args LIMIT 100);
+
+DROP TABLE IF EXISTS 02919_test_table_valid_args;
+
+--
+DROP TABLE IF EXISTS 02919_test_table_reuse_args;
+CREATE TABLE 02919_test_table_reuse_args(str String) ENGINE = FuzzJSON(
+ '{
+ "name": "Jane Doe",
+ "age": 30,
+ "city": "New York",
+ "contacts": {
+ "email": "jane@example.com",
+ "phone": "+1234567890"
+ },
+ "skills": [
+ "JavaScript",
+ "Python",
+ {
+ "frameworks": ["React", "Django"]
+ }
+ ],
+ "projects": [
+ {"name": "Project A", "status": "completed"},
+ {"name": "Project B", "status": "in-progress"}
+ ]
+ }',
+ 12345);
+
+SELECT count() FROM (SELECT * FROM 02919_test_table_reuse_args LIMIT 100);
+
+DROP TABLE IF EXISTS 02919_test_table_reuse_args;
diff --git a/tests/queries/0_stateless/02921_file_engine_size_virtual_column.reference b/tests/queries/0_stateless/02921_file_engine_size_virtual_column.reference
new file mode 100644
index 00000000000..2f319dfb812
--- /dev/null
+++ b/tests/queries/0_stateless/02921_file_engine_size_virtual_column.reference
@@ -0,0 +1,12 @@
+2
+3
+4
+2
+3
+4
+2
+3
+4
+2
+3
+4
diff --git a/tests/queries/0_stateless/02921_file_engine_size_virtual_column.sh b/tests/queries/0_stateless/02921_file_engine_size_virtual_column.sh
new file mode 100755
index 00000000000..5dd58ec0d7f
--- /dev/null
+++ b/tests/queries/0_stateless/02921_file_engine_size_virtual_column.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+# Tags: no-fasttest
+
+CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CURDIR"/../shell_config.sh
+
+echo "1" > $CLICKHOUSE_TEST_UNIQUE_NAME.data1.tsv
+echo "12" > $CLICKHOUSE_TEST_UNIQUE_NAME.data2.tsv
+echo "123" > $CLICKHOUSE_TEST_UNIQUE_NAME.data3.tsv
+
+$CLICKHOUSE_LOCAL -q "select _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.data{1,2,3}.tsv') order by _size"
+# Run this query twice to check correct behaviour when cache is used
+$CLICKHOUSE_LOCAL -q "select _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.data{1,2,3}.tsv') order by _size"
+
+# Test the same fils in archive
+tar -cf $CLICKHOUSE_TEST_UNIQUE_NAME.archive.tar $CLICKHOUSE_TEST_UNIQUE_NAME.data1.tsv $CLICKHOUSE_TEST_UNIQUE_NAME.data2.tsv $CLICKHOUSE_TEST_UNIQUE_NAME.data3.tsv
+
+$CLICKHOUSE_LOCAL -q "select _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.archive.tar :: $CLICKHOUSE_TEST_UNIQUE_NAME.data{1,2,3}.tsv') order by _size"
+$CLICKHOUSE_LOCAL -q "select _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.archive.tar :: $CLICKHOUSE_TEST_UNIQUE_NAME.data{1,2,3}.tsv') order by _size"
+
+rm $CLICKHOUSE_TEST_UNIQUE_NAME.*
+
diff --git a/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.reference b/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.reference
new file mode 100644
index 00000000000..369837adcbb
--- /dev/null
+++ b/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.reference
@@ -0,0 +1,12 @@
+a.tsv 24
+b.tsv 33
+c.tsv 33
+a.tsv 24
+b.tsv 33
+c.tsv 33
+a.tsv 24
+b.tsv 33
+c.tsv 33
+a.tsv 24
+b.tsv 33
+c.tsv 33
diff --git a/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.sh b/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.sh
new file mode 100755
index 00000000000..51de2117dca
--- /dev/null
+++ b/tests/queries/0_stateless/02922_url_s3_engine_size_virtual_column.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+# Tags: no-fasttest
+
+CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CURDIR"/../shell_config.sh
+
+$CLICKHOUSE_CLIENT -q "select _file, _size from url('http://localhost:11111/test/{a,b,c}.tsv', 'One') order by _file"
+$CLICKHOUSE_CLIENT -q "select _file, _size from url('http://localhost:11111/test/{a,b,c}.tsv', 'One') order by _file"
+
+$CLICKHOUSE_CLIENT -q "select _file, _size from s3('http://localhost:11111/test/{a,b,c}.tsv', 'One') order by _file"
+$CLICKHOUSE_CLIENT -q "select _file, _size from s3('http://localhost:11111/test/{a,b,c}.tsv', 'One') order by _file"
+
diff --git a/tests/queries/0_stateless/02923_cte_equality_disjunction.reference b/tests/queries/0_stateless/02923_cte_equality_disjunction.reference
new file mode 100644
index 00000000000..573541ac970
--- /dev/null
+++ b/tests/queries/0_stateless/02923_cte_equality_disjunction.reference
@@ -0,0 +1 @@
+0
diff --git a/tests/queries/0_stateless/02923_cte_equality_disjunction.sql b/tests/queries/0_stateless/02923_cte_equality_disjunction.sql
new file mode 100644
index 00000000000..288bed9e491
--- /dev/null
+++ b/tests/queries/0_stateless/02923_cte_equality_disjunction.sql
@@ -0,0 +1,12 @@
+--https://github.com/ClickHouse/ClickHouse/issues/5323
+CREATE TABLE test_bug_optimization
+(
+ `path` String
+)
+ENGINE = MergeTree
+ORDER BY path;
+
+WITH (path = 'test1') OR match(path, 'test2') OR (match(path, 'test3') AND match(path, 'test2')) OR match(path, 'test4') OR (path = 'test5') OR (path = 'test6') AS alias_in_error
+SELECT count(1)
+FROM test_bug_optimization
+WHERE alias_in_error;
diff --git a/tests/queries/0_stateless/02923_explain_expired_context.reference b/tests/queries/0_stateless/02923_explain_expired_context.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/02923_explain_expired_context.sql b/tests/queries/0_stateless/02923_explain_expired_context.sql
new file mode 100644
index 00000000000..68277508eb2
--- /dev/null
+++ b/tests/queries/0_stateless/02923_explain_expired_context.sql
@@ -0,0 +1,3 @@
+-- https://github.com/ClickHouse/ClickHouse/issues/51321
+EXPLAIN ESTIMATE SELECT any(toTypeName(s)) FROM (SELECT 'bbbbbbbb', toTypeName(s), CAST('', 'LowCardinality(String)'), NULL, CAST('\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0', 'String') AS s) AS t1 FULL OUTER JOIN (SELECT CAST('bbbbb\0\0bbb\0bb\0bb', 'LowCardinality(String)'), CAST(CAST('a', 'String'), 'LowCardinality(String)') AS s GROUP BY CoNnEcTiOn_Id()) AS t2 USING (s) WITH TOTALS;
+EXPLAIN ESTIMATE SELECT any(s) FROM (SELECT '' AS s) AS t1 JOIN (SELECT '' AS s GROUP BY connection_id()) AS t2 USING (s);
diff --git a/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.reference b/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.reference
new file mode 100644
index 00000000000..bc42121fb39
--- /dev/null
+++ b/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.reference
@@ -0,0 +1,6 @@
+2
+3
+4
+2
+3
+4
diff --git a/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.sh b/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.sh
new file mode 100755
index 00000000000..dc01687772f
--- /dev/null
+++ b/tests/queries/0_stateless/02923_hdfs_engine_size_virtual_column.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+# Tags: no-fasttest, use-hdfs
+
+CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CURDIR"/../shell_config.sh
+
+$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data1.tsv') select 1 settings hdfs_truncate_on_insert=1;"
+$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data2.tsv') select 11 settings hdfs_truncate_on_insert=1;"
+$CLICKHOUSE_CLIENT -q "insert into table function hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data3.tsv') select 111 settings hdfs_truncate_on_insert=1;"
+
+
+$CLICKHOUSE_CLIENT -q "select _size from hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data*.tsv', auto, 'x UInt64') order by _size"
+$CLICKHOUSE_CLIENT -q "select _size from hdfs('hdfs://localhost:12222/$CLICKHOUSE_TEST_UNIQUE_NAME.data*.tsv', auto, 'x UInt64') order by _size"
+
diff --git a/tests/queries/0_stateless/02923_join_use_nulls_modulo.reference b/tests/queries/0_stateless/02923_join_use_nulls_modulo.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/02923_join_use_nulls_modulo.sql b/tests/queries/0_stateless/02923_join_use_nulls_modulo.sql
new file mode 100644
index 00000000000..4134a42c599
--- /dev/null
+++ b/tests/queries/0_stateless/02923_join_use_nulls_modulo.sql
@@ -0,0 +1,22 @@
+--https://github.com/ClickHouse/ClickHouse/issues/47366
+SELECT
+ id % 255,
+ toTypeName(d.id)
+FROM
+(
+ SELECT
+ toLowCardinality(1048577) AS id,
+ toLowCardinality(9223372036854775807) AS value
+ GROUP BY
+ GROUPING SETS (
+ (toLowCardinality(1024)),
+ (id % 10.0001),
+ ((id % 2147483646) != -9223372036854775807),
+ ((id % -1) != 255))
+ ) AS a
+ SEMI LEFT JOIN
+(
+ SELECT toLowCardinality(9223372036854775807) AS id
+ WHERE (id % 2147483646) != NULL
+) AS d USING (id)
+SETTINGS join_use_nulls=1;
diff --git a/tests/queries/0_stateless/02930_client_file_log_comment.reference b/tests/queries/0_stateless/02930_client_file_log_comment.reference
new file mode 100644
index 00000000000..09639302c0f
--- /dev/null
+++ b/tests/queries/0_stateless/02930_client_file_log_comment.reference
@@ -0,0 +1,4 @@
+42
+select 42\n /dev/stdin
+4242
+select 4242\n foo
diff --git a/tests/queries/0_stateless/02930_client_file_log_comment.sh b/tests/queries/0_stateless/02930_client_file_log_comment.sh
new file mode 100755
index 00000000000..c425f28ecbe
--- /dev/null
+++ b/tests/queries/0_stateless/02930_client_file_log_comment.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# reset --log_comment
+CLICKHOUSE_LOG_COMMENT=
+# shellcheck source=../shell_config.sh
+. "$CUR_DIR"/../shell_config.sh
+
+$CLICKHOUSE_CLIENT --queries-file /dev/stdin <<<'select 42'
+$CLICKHOUSE_CLIENT -nm -q "
+ system flush logs;
+ select query, log_comment from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and event_date >= yesterday() and query = 'select 42\n' and type != 'QueryStart';
+"
+
+$CLICKHOUSE_CLIENT --log_comment foo --queries-file /dev/stdin <<<'select 4242'
+$CLICKHOUSE_CLIENT -nm -q "
+ system flush logs;
+ select query, log_comment from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and event_date >= yesterday() and query = 'select 4242\n' and type != 'QueryStart';
+"
diff --git a/tests/queries/0_stateless/format_schemas/02266_protobuf_format_google_wrappers.proto b/tests/queries/0_stateless/format_schemas/02266_protobuf_format_google_wrappers.proto
index e5283907936..7f72d599707 100644
--- a/tests/queries/0_stateless/format_schemas/02266_protobuf_format_google_wrappers.proto
+++ b/tests/queries/0_stateless/format_schemas/02266_protobuf_format_google_wrappers.proto
@@ -1,6 +1,6 @@
syntax = "proto3";
-import "wrappers.proto";
+import "google/protobuf/wrappers.proto";
message Message {
google.protobuf.StringValue str = 1;
diff --git a/tests/queries/0_stateless/format_schemas/wrappers.proto b/tests/queries/0_stateless/format_schemas/wrappers.proto
deleted file mode 100644
index c571f096879..00000000000
--- a/tests/queries/0_stateless/format_schemas/wrappers.proto
+++ /dev/null
@@ -1,123 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2008 Google Inc. All rights reserved.
-// https://developers.google.com/protocol-buffers/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Wrappers for primitive (non-message) types. These types are useful
-// for embedding primitives in the `google.protobuf.Any` type and for places
-// where we need to distinguish between the absence of a primitive
-// typed field and its default value.
-//
-// These wrappers have no meaningful use within repeated fields as they lack
-// the ability to detect presence on individual elements.
-// These wrappers have no meaningful use within a map or a oneof since
-// individual entries of a map or fields of a oneof can already detect presence.
-
-syntax = "proto3";
-
-package google.protobuf;
-
-option csharp_namespace = "Google.Protobuf.WellKnownTypes";
-option cc_enable_arenas = true;
-option go_package = "google.golang.org/protobuf/types/known/wrapperspb";
-option java_package = "com.google.protobuf";
-option java_outer_classname = "WrappersProto";
-option java_multiple_files = true;
-option objc_class_prefix = "GPB";
-
-// Wrapper message for `double`.
-//
-// The JSON representation for `DoubleValue` is JSON number.
-message DoubleValue {
- // The double value.
- double value = 1;
-}
-
-// Wrapper message for `float`.
-//
-// The JSON representation for `FloatValue` is JSON number.
-message FloatValue {
- // The float value.
- float value = 1;
-}
-
-// Wrapper message for `int64`.
-//
-// The JSON representation for `Int64Value` is JSON string.
-message Int64Value {
- // The int64 value.
- int64 value = 1;
-}
-
-// Wrapper message for `uint64`.
-//
-// The JSON representation for `UInt64Value` is JSON string.
-message UInt64Value {
- // The uint64 value.
- uint64 value = 1;
-}
-
-// Wrapper message for `int32`.
-//
-// The JSON representation for `Int32Value` is JSON number.
-message Int32Value {
- // The int32 value.
- int32 value = 1;
-}
-
-// Wrapper message for `uint32`.
-//
-// The JSON representation for `UInt32Value` is JSON number.
-message UInt32Value {
- // The uint32 value.
- uint32 value = 1;
-}
-
-// Wrapper message for `bool`.
-//
-// The JSON representation for `BoolValue` is JSON `true` and `false`.
-message BoolValue {
- // The bool value.
- bool value = 1;
-}
-
-// Wrapper message for `string`.
-//
-// The JSON representation for `StringValue` is JSON string.
-message StringValue {
- // The string value.
- string value = 1;
-}
-
-// Wrapper message for `bytes`.
-//
-// The JSON representation for `BytesValue` is JSON string.
-message BytesValue {
- // The bytes value.
- bytes value = 1;
-}
\ No newline at end of file
diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt
index 077e323ad4e..3a74001e0d3 100644
--- a/utils/check-style/aspell-ignore/en/aspell-dict.txt
+++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt
@@ -1,4 +1,4 @@
-personal_ws-1.1 en 2633
+personal_ws-1.1 en 2646
AArch
ACLs
ALTERs
@@ -261,6 +261,7 @@ FOSDEM
FQDN
Failover
FarmHash
+FileLog
FilesystemCacheBytes
FilesystemCacheElements
FilesystemCacheFiles
@@ -278,7 +279,6 @@ FilesystemMainPathTotalBytes
FilesystemMainPathTotalINodes
FilesystemMainPathUsedBytes
FilesystemMainPathUsedINodes
-FileLog
FixedString
Flink
ForEach
@@ -441,6 +441,7 @@ Kolmogorov
Kubernetes
LDAP
LGPL
+LIMITs
LLDB
LLVM's
LOCALTIME
@@ -571,13 +572,13 @@ NetworkSendPackets
NodeJs
NuRaft
NumHexagons
+NumPy
NumToString
NumToStringClassC
NumberOfDatabases
NumberOfDetachedByUserParts
NumberOfDetachedParts
NumberOfTables
-NumPy
OFNS
OLAP
OLTP
@@ -588,10 +589,10 @@ OSGuestNiceTimeNormalized
OSGuestTime
OSGuestTimeCPU
OSGuestTimeNormalized
+OSIOWaitMicroseconds
OSIOWaitTime
OSIOWaitTimeCPU
OSIOWaitTimeNormalized
-OSIOWaitMicroseconds
OSIdleTime
OSIdleTimeCPU
OSIdleTimeNormalized
@@ -1470,12 +1471,12 @@ fastops
fcoverage
fibonacci
fifo
+filelog
filesystem
filesystemAvailable
filesystemCapacity
filesystemFree
filesystems
-filelog
finalizeAggregation
fips
firstLine
@@ -1501,6 +1502,7 @@ formatRowNoNewline
formated
formatschema
formatter
+formatters
freezed
fromDaysSinceYearZero
fromModifiedJulianDay
@@ -1511,6 +1513,7 @@ fromUnixTimestampInJodaSyntax
fsync
func
fuzzBits
+fuzzJSON
fuzzer
fuzzers
gRPC
@@ -1532,6 +1535,7 @@ geohashEncode
geohashesInBox
geoip
geospatial
+getClientHTTPHeader
getMacro
getOSKernelVersion
getServerPort
@@ -1554,6 +1558,7 @@ graphql
greatCircleAngle
greatCircleDistance
greaterOrEquals
+greaterorequals
greenspace
groupArray
groupArrayInsertAt
@@ -1739,6 +1744,7 @@ lemmatize
lemmatized
lengthUTF
lessOrEquals
+lessorequals
levenshtein
levenshteinDistance
lexicographically
@@ -1917,6 +1923,7 @@ notEquals
notILike
notIn
notLike
+notequals
notretry
nowInBlock
ntile
@@ -2348,6 +2355,7 @@ subtractSeconds
subtractWeeks
subtractYears
subtree
+subtrees
subtype
sudo
sumCount
diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv
index 014ee5e9a17..ebe138d597a 100644
--- a/utils/list-versions/version_date.tsv
+++ b/utils/list-versions/version_date.tsv
@@ -1,12 +1,15 @@
+v23.10.5.20-stable 2023-11-25
v23.10.4.25-stable 2023-11-17
v23.10.3.5-stable 2023-11-10
v23.10.2.13-stable 2023-11-08
v23.10.1.1976-stable 2023-11-02
+v23.9.6.20-stable 2023-11-25
v23.9.5.29-stable 2023-11-17
v23.9.4.11-stable 2023-11-08
v23.9.3.12-stable 2023-10-31
v23.9.2.56-stable 2023-10-19
v23.9.1.1854-stable 2023-09-29
+v23.8.8.20-lts 2023-11-25
v23.8.7.24-lts 2023-11-17
v23.8.6.16-lts 2023-11-08
v23.8.5.16-lts 2023-10-31
@@ -34,6 +37,7 @@ v23.4.4.16-stable 2023-06-17
v23.4.3.48-stable 2023-06-12
v23.4.2.11-stable 2023-05-02
v23.4.1.1943-stable 2023-04-27
+v23.3.18.15-lts 2023-11-25
v23.3.17.13-lts 2023-11-17
v23.3.16.7-lts 2023-11-08
v23.3.15.29-lts 2023-10-31
diff --git a/utils/security-generator/generate_security.py b/utils/security-generator/generate_security.py
index 83180ccce1c..ccf9a82067e 100755
--- a/utils/security-generator/generate_security.py
+++ b/utils/security-generator/generate_security.py
@@ -52,10 +52,7 @@ def generate_supported_versions() -> str:
with open(VERSIONS_FILE, "r", encoding="utf-8") as fd:
versions = [line.split(maxsplit=1)[0][1:] for line in fd.readlines()]
- # The versions in VERSIONS_FILE are ordered ascending, so the first one is
- # the greatest one. We may have supported versions in the previous year
- greatest_year = int(versions[0].split(".", maxsplit=1)[0])
- unsupported_year = greatest_year - 2
+ supported_year = 0 # set automatically when all supported versions are filled
# 3 regular versions
regular = [] # type: List[str]
max_regular = 3
@@ -82,14 +79,12 @@ def generate_supported_versions() -> str:
lts.append(version)
to_append = f"| {version} | ✔️ |"
if to_append:
- if len(regular) == max_regular and len(lts) == max_lts:
- # if we reached the max number of supported versions, the rest
- # are unsopported, so year.* will be used
- unsupported_year = min(greatest_year - 1, year)
+ if len(regular) == max_regular or len(lts) == max_lts:
+ supported_year = year
table.append(to_append)
continue
- if year <= unsupported_year:
- # The whole year is unsopported
+ if year < supported_year:
+ # The whole year is unsupported
version = f"{year}.*"
if not version in unsupported:
unsupported.append(version)