diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index fa046ed40c7..2acc1468328 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -151,8 +151,8 @@ jobs:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
- SplitBuildSmokeTest:
- needs: [BuilderDebSplitted]
+ SharedBuildSmokeTest:
+ needs: [BuilderDebShared]
runs-on: [self-hosted, style-checker]
steps:
- name: Set envs
@@ -171,7 +171,7 @@ jobs:
uses: actions/download-artifact@v2
with:
path: ${{ env.REPORTS_PATH }}
- - name: Split build check
+ - name: Shared build check
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
@@ -598,7 +598,7 @@ jobs:
##########################################################################################
##################################### SPECIAL BUILDS #####################################
##########################################################################################
- BuilderDebSplitted:
+ BuilderDebShared:
needs: [DockerHubPush]
runs-on: [self-hosted, builder]
steps:
@@ -609,7 +609,7 @@ jobs:
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
- BUILD_NAME=binary_splitted
+ BUILD_NAME=binary_shared
EOF
- name: Download changed images
uses: actions/download-artifact@v2
@@ -1012,7 +1012,7 @@ jobs:
# - BuilderBinGCC
- BuilderBinPPC64
- BuilderBinClangTidy
- - BuilderDebSplitted
+ - BuilderDebShared
runs-on: [self-hosted, style-checker]
steps:
- name: Set envs
@@ -3153,7 +3153,7 @@ jobs:
- UnitTestsMsan
- UnitTestsUBsan
- UnitTestsReleaseClang
- - SplitBuildSmokeTest
+ - SharedBuildSmokeTest
runs-on: [self-hosted, style-checker]
steps:
- name: Clear repository
diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml
index cdad9d4f23d..513df8487c4 100644
--- a/.github/workflows/pull_request.yml
+++ b/.github/workflows/pull_request.yml
@@ -216,8 +216,8 @@ jobs:
# shellcheck disable=SC2046
docker rm -f $(docker ps -a -q) ||:
sudo rm -fr "$TEMP_PATH"
- SplitBuildSmokeTest:
- needs: [BuilderDebSplitted]
+ SharedBuildSmokeTest:
+ needs: [BuilderDebShared]
runs-on: [self-hosted, style-checker]
steps:
- name: Set envs
@@ -236,7 +236,7 @@ jobs:
uses: actions/download-artifact@v2
with:
path: ${{ env.REPORTS_PATH }}
- - name: Split build check
+ - name: Shared build check
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
@@ -620,7 +620,7 @@ jobs:
##########################################################################################
##################################### SPECIAL BUILDS #####################################
##########################################################################################
- BuilderDebSplitted:
+ BuilderDebShared:
needs: [DockerHubPush, FastTest, StyleCheck]
runs-on: [self-hosted, builder]
steps:
@@ -631,7 +631,7 @@ jobs:
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
- BUILD_NAME=binary_splitted
+ BUILD_NAME=binary_shared
EOF
- name: Download changed images
uses: actions/download-artifact@v2
@@ -1024,7 +1024,7 @@ jobs:
# - BuilderBinGCC
- BuilderBinPPC64
- BuilderBinClangTidy
- - BuilderDebSplitted
+ - BuilderDebShared
runs-on: [self-hosted, style-checker]
if: ${{ success() || failure() }}
steps:
@@ -3420,7 +3420,7 @@ jobs:
- UnitTestsMsan
- UnitTestsUBsan
- UnitTestsReleaseClang
- - SplitBuildSmokeTest
+ - SharedBuildSmokeTest
- CompatibilityCheck
- IntegrationTestsFlakyCheck
runs-on: [self-hosted, style-checker]
diff --git a/README.md b/README.md
index 93d2da1e39d..20340883853 100644
--- a/README.md
+++ b/README.md
@@ -15,4 +15,4 @@ ClickHouse® is an open-source column-oriented database management system that a
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
## Upcoming events
-* **v22.8 Release Webinar** Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
+* [**v22.8 Release Webinar**](https://clickhouse.com/company/events/v22-8-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
diff --git a/contrib/arrow b/contrib/arrow
index 3e03c6de41a..efdcd015cfd 160000
--- a/contrib/arrow
+++ b/contrib/arrow
@@ -1 +1 @@
-Subproject commit 3e03c6de41a86df2fc54a61e9be1abaefeff6b0e
+Subproject commit efdcd015cfdee1b6aa349c9ca227ca12c3d697f5
diff --git a/docker/packager/packager b/docker/packager/packager
index 16a56e9766f..cf0e555d57c 100755
--- a/docker/packager/packager
+++ b/docker/packager/packager
@@ -100,12 +100,12 @@ def run_docker_image_with_env(
subprocess.check_call(cmd, shell=True)
-def is_release_build(build_type, package_type, sanitizer, split_binary):
+def is_release_build(build_type, package_type, sanitizer, shared_libraries):
return (
build_type == ""
and package_type == "deb"
and sanitizer == ""
- and not split_binary
+ and not shared_libraries
)
@@ -116,7 +116,7 @@ def parse_env_variables(
package_type,
cache,
distcc_hosts,
- split_binary,
+ shared_libraries,
clang_tidy,
version,
author,
@@ -202,7 +202,7 @@ def parse_env_variables(
cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr")
cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc")
cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var")
- if is_release_build(build_type, package_type, sanitizer, split_binary):
+ if is_release_build(build_type, package_type, sanitizer, shared_libraries):
cmake_flags.append("-DSPLIT_DEBUG_SYMBOLS=ON")
result.append("WITH_PERFORMANCE=1")
if is_cross_arm:
@@ -215,11 +215,11 @@ def parse_env_variables(
cmake_flags.append(f"-DCMAKE_C_COMPILER={cc}")
cmake_flags.append(f"-DCMAKE_CXX_COMPILER={cxx}")
- # Create combined output archive for split build and for performance tests.
+ # Create combined output archive for shared library build and for performance tests.
if package_type == "coverity":
result.append("COMBINED_OUTPUT=coverity")
result.append('COVERITY_TOKEN="$COVERITY_TOKEN"')
- elif split_binary:
+ elif shared_libraries:
result.append("COMBINED_OUTPUT=shared_build")
if sanitizer:
@@ -264,13 +264,13 @@ def parse_env_variables(
result.append("BINARY_OUTPUT=tests")
cmake_flags.append("-DENABLE_TESTS=1")
- if split_binary:
+ if shared_libraries:
cmake_flags.append(
"-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1"
)
# We can't always build utils because it requires too much space, but
- # we have to build them at least in some way in CI. The split build is
- # probably the least heavy disk-wise.
+ # we have to build them at least in some way in CI. The shared library
+ # build is probably the least heavy disk-wise.
cmake_flags.append("-DENABLE_UTILS=1")
# utils are not included into clickhouse-bundle, so build everything
build_target = "all"
@@ -351,7 +351,7 @@ if __name__ == "__main__":
default="",
)
- parser.add_argument("--split-binary", action="store_true")
+ parser.add_argument("--shared-libraries", action="store_true")
parser.add_argument("--clang-tidy", action="store_true")
parser.add_argument("--cache", choices=("ccache", "distcc", ""), default="")
parser.add_argument(
@@ -404,7 +404,7 @@ if __name__ == "__main__":
args.package_type,
args.cache,
args.distcc_hosts,
- args.split_binary,
+ args.shared_libraries,
args.clang_tidy,
args.version,
args.author,
diff --git a/docs/en/development/developer-instruction.md b/docs/en/development/developer-instruction.md
index 4415ca8bacb..7f8bf801a27 100644
--- a/docs/en/development/developer-instruction.md
+++ b/docs/en/development/developer-instruction.md
@@ -267,6 +267,12 @@ The system will prepare ClickHouse binary builds for your pull request individua
Most probably some of the builds will fail at first times. This is due to the fact that we check builds both with gcc as well as with clang, with almost all of existing warnings (always with the `-Werror` flag) enabled for clang. On that same page, you can find all of the build logs so that you do not have to build ClickHouse in all of the possible ways.
+## Browse ClickHouse Source Code {#browse-clickhouse-source-code}
+
+You can use the **Woboq** online code browser available [here](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). It provides code navigation, semantic highlighting, search and indexing. The code snapshot is updated daily.
+
+Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual.
+
## Faster builds for development: Split build configuration {#split-build}
ClickHouse is normally statically linked into a single static `clickhouse` binary with minimal dependencies. This is convenient for distribution, but it means that for every change the entire binary needs to be re-linked, which is slow and inconvenient for development. As an alternative, you can instead build dynamically linked shared libraries, allowing for faster incremental builds. To use it, add the following flags to your `cmake` invocation:
diff --git a/docs/en/getting-started/example-datasets/uk-price-paid.md b/docs/en/getting-started/example-datasets/uk-price-paid.md
index b7a486fb057..04e853275ed 100644
--- a/docs/en/getting-started/example-datasets/uk-price-paid.md
+++ b/docs/en/getting-started/example-datasets/uk-price-paid.md
@@ -389,12 +389,6 @@ SETTINGS mutations_sync = 1;
Let's run the same 3 queries.
-[Enable](../../operations/settings/settings.md#allow-experimental-projection-optimization) projections for selects:
-
-```sql
-SET allow_experimental_projection_optimization = 1;
-```
-
### Query 1. Average Price Per Year {#average-price-projections}
Query:
diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md
index 78f55c0398a..936a20c5e9c 100644
--- a/docs/en/operations/server-configuration-parameters/settings.md
+++ b/docs/en/operations/server-configuration-parameters/settings.md
@@ -438,6 +438,18 @@ For more information, see the section “[Configuration files](../../operations/
/etc/metrica.xml
```
+## interserver_listen_host {#interserver-listen-host}
+
+Restriction on hosts that can exchange data between ClickHouse servers.
+The default value equals to `listen_host` setting.
+
+Examples:
+
+``` xml
+::ffff:a00:1
+10.0.0.1
+```
+
## interserver_http_port {#interserver-http-port}
Port for exchanging data between ClickHouse servers.
@@ -970,7 +982,7 @@ Default value: 2.
**Example**
```xml
-3
+3
```
## background_move_pool_size {#background_move_pool_size}
diff --git a/docs/ru/development/developer-instruction.md b/docs/ru/development/developer-instruction.md
index f2fac4ccdf8..8e1320c4dcd 100644
--- a/docs/ru/development/developer-instruction.md
+++ b/docs/ru/development/developer-instruction.md
@@ -285,3 +285,9 @@ Pull request можно создать, даже если работа над з
Система подготовит сборки ClickHouse специально для вашего pull request. Для их получения, нажмите на ссылку «Details» у проверки «Clickhouse build check». Там вы сможете найти прямые ссылки на собранные .deb пакеты ClickHouse, которые, при желании, вы даже сможете установить на свои продакшен серверы (если не страшно).
Вероятнее всего, часть сборок не будет успешной с первого раза. Ведь мы проверяем сборку кода и gcc и clang, а при сборке с помощью clang включаются почти все существующие в природе warnings (всегда с флагом `-Werror`). На той же странице, вы сможете найти логи сборки - вам не обязательно самому собирать ClickHouse всеми возможными способами.
+
+## Навигация по коду ClickHouse {#navigatsiia-po-kodu-clickhouse}
+
+Для навигации по коду онлайн доступен **Woboq**, он расположен [здесь](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). В нём реализовано удобное перемещение между исходными файлами, семантическая подсветка, подсказки, индексация и поиск. Слепок кода обновляется ежедневно.
+
+Также вы можете просматривать исходники на [GitHub](https://github.com/ClickHouse/ClickHouse).
diff --git a/docs/ru/getting-started/example-datasets/uk-price-paid.md b/docs/ru/getting-started/example-datasets/uk-price-paid.md
index c5d13c55b05..ee010da28ea 100644
--- a/docs/ru/getting-started/example-datasets/uk-price-paid.md
+++ b/docs/ru/getting-started/example-datasets/uk-price-paid.md
@@ -389,12 +389,6 @@ SETTINGS mutations_sync = 1;
Давайте выполним те же 3 запроса.
-[Включите](../../operations/settings/settings.md#allow-experimental-projection-optimization) поддержку проекций:
-
-```sql
-SET allow_experimental_projection_optimization = 1;
-```
-
### Запрос 1. Средняя цена за год {#average-price-projections}
Запрос:
@@ -647,4 +641,3 @@ no projection: 100 rows in set. Elapsed: 0.069 sec. Processed 26.32 million rows
### Online Playground {#playground}
Этот набор данных доступен в [Online Playground](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIHRvd24sIGRpc3RyaWN0LCBjb3VudCgpIEFTIGMsIHJvdW5kKGF2ZyhwcmljZSkpIEFTIHByaWNlLCBiYXIocHJpY2UsIDAsIDUwMDAwMDAsIDEwMCkgRlJPTSB1a19wcmljZV9wYWlkIFdIRVJFIGRhdGUgPj0gJzIwMjAtMDEtMDEnIEdST1VQIEJZIHRvd24sIGRpc3RyaWN0IEhBVklORyBjID49IDEwMCBPUkRFUiBCWSBwcmljZSBERVNDIExJTUlUIDEwMA==).
-
diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md
index 0c0c7da330d..222c6bccfaf 100644
--- a/docs/ru/operations/server-configuration-parameters/settings.md
+++ b/docs/ru/operations/server-configuration-parameters/settings.md
@@ -407,6 +407,18 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
/etc/metrica.xml
```
+## interserver_listen_host {#interserver-listen-host}
+
+Ограничение по хостам, для обмена между серверами ClickHouse.
+Значение по умолчанию совпадает со значением параметра listen_host
+
+Примеры:
+
+``` xml
+::ffff:a00:1
+10.0.0.1
+```
+
## interserver_http_port {#interserver-http-port}
Порт для обмена между серверами ClickHouse.
diff --git a/docs/zh/development/developer-instruction.md b/docs/zh/development/developer-instruction.md
index 7ade3ad57fb..77caad105c6 100644
--- a/docs/zh/development/developer-instruction.md
+++ b/docs/zh/development/developer-instruction.md
@@ -264,3 +264,9 @@ ClickHouse成员一旦在您的拉取请求上贴上«可以测试»标签,就
系统将分别为您的拉取请求准备ClickHouse二进制版本。若要检索这些构建信息,请在检查列表中单击« ClickHouse构建检查»旁边的«详细信息»链接。在这里,您会找到指向ClickHouse的.deb软件包的直接链接,此外,甚至可以将其部署在生产服务器上(如果您不担心)。
某些构建项很可能会在首次构建时失败。这是因为我们同时检查了基于gcc和clang的构建,几乎所有现有的被clang启用的警告(总是带有`-Werror`标志)。在同一页面上,您可以找到所有构建的日志,因此不必以所有可能的方式构建ClickHouse。
+
+## 浏览ClickHouse源代码 {#browse-clickhouse-source-code}
+
+您可以使用 **Woboq** 在线代码浏览器 [点击这里](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). 它提供了代码导航和语义突出显示、搜索和索引。 代码快照每天更新。
+
+此外,您还可以像往常一样浏览源代码 [GitHub](https://github.com/ClickHouse/ClickHouse)
diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp
index 056ed85c670..6891d2113a5 100644
--- a/programs/obfuscator/Obfuscator.cpp
+++ b/programs/obfuscator/Obfuscator.cpp
@@ -46,6 +46,7 @@
#include
#include
#include
+#include
static const char * documentation = R"(
@@ -186,7 +187,7 @@ static UInt64 transform(UInt64 x, UInt64 seed)
if (x == 2 || x == 3)
return x ^ (seed & 1);
- size_t num_leading_zeros = __builtin_clzll(x);
+ size_t num_leading_zeros = std::countl_zero(x);
return feistelNetwork(x, 64 - num_leading_zeros - 1, seed);
}
diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp
index 14108d7931d..b86ce4a841c 100644
--- a/programs/server/Server.cpp
+++ b/programs/server/Server.cpp
@@ -367,7 +367,7 @@ Poco::Net::SocketAddress Server::socketBindListen(
return address;
}
-std::vector getListenHosts(const Poco::Util::AbstractConfiguration & config)
+Strings getListenHosts(const Poco::Util::AbstractConfiguration & config)
{
auto listen_hosts = DB::getMultipleValuesFromConfig(config, "", "listen_host");
if (listen_hosts.empty())
@@ -378,6 +378,16 @@ std::vector getListenHosts(const Poco::Util::AbstractConfiguration
return listen_hosts;
}
+Strings getInterserverListenHosts(const Poco::Util::AbstractConfiguration & config)
+{
+ auto interserver_listen_hosts = DB::getMultipleValuesFromConfig(config, "", "interserver_listen_host");
+ if (!interserver_listen_hosts.empty())
+ return interserver_listen_hosts;
+
+ /// Use more general restriction in case of emptiness
+ return getListenHosts(config);
+}
+
bool getListenTry(const Poco::Util::AbstractConfiguration & config)
{
bool listen_try = config.getBool("listen_try", false);
@@ -1234,6 +1244,7 @@ int Server::main(const std::vector & /*args*/)
/* already_loaded = */ false); /// Reload it right now (initial loading)
const auto listen_hosts = getListenHosts(config());
+ const auto interserver_listen_hosts = getInterserverListenHosts(config());
const auto listen_try = getListenTry(config());
if (config().has("keeper_server"))
@@ -1629,7 +1640,7 @@ int Server::main(const std::vector & /*args*/)
{
std::lock_guard lock(servers_lock);
- createServers(config(), listen_hosts, listen_try, server_pool, async_metrics, servers);
+ createServers(config(), listen_hosts, interserver_listen_hosts, listen_try, server_pool, async_metrics, servers);
if (servers.empty())
throw Exception(
"No servers started (add valid listen_host and 'tcp_port' or 'http_port' to configuration file.)",
@@ -1811,7 +1822,8 @@ int Server::main(const std::vector & /*args*/)
void Server::createServers(
Poco::Util::AbstractConfiguration & config,
- const std::vector & listen_hosts,
+ const Strings & listen_hosts,
+ const Strings & interserver_listen_hosts,
bool listen_try,
Poco::ThreadPool & server_pool,
AsynchronousMetrics & async_metrics,
@@ -1929,51 +1941,6 @@ void Server::createServers(
#endif
});
- /// Interserver IO HTTP
- port_name = "interserver_http_port";
- createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
- {
- Poco::Net::ServerSocket socket;
- auto address = socketBindListen(config, socket, listen_host, port);
- socket.setReceiveTimeout(settings.http_receive_timeout);
- socket.setSendTimeout(settings.http_send_timeout);
- return ProtocolServerAdapter(
- listen_host,
- port_name,
- "replica communication (interserver): http://" + address.toString(),
- std::make_unique(
- context(),
- createHandlerFactory(*this, async_metrics, "InterserverIOHTTPHandler-factory"),
- server_pool,
- socket,
- http_params));
- });
-
- port_name = "interserver_https_port";
- createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
- {
-#if USE_SSL
- Poco::Net::SecureServerSocket socket;
- auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
- socket.setReceiveTimeout(settings.http_receive_timeout);
- socket.setSendTimeout(settings.http_send_timeout);
- return ProtocolServerAdapter(
- listen_host,
- port_name,
- "secure replica communication (interserver): https://" + address.toString(),
- std::make_unique(
- context(),
- createHandlerFactory(*this, async_metrics, "InterserverIOHTTPSHandler-factory"),
- server_pool,
- socket,
- http_params));
-#else
- UNUSED(port);
- throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
- ErrorCodes::SUPPORT_IS_DISABLED};
-#endif
- });
-
port_name = "mysql_port";
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
{
@@ -2032,6 +1999,55 @@ void Server::createServers(
});
}
+ /// Now iterate over interserver_listen_hosts
+ for (const auto & interserver_listen_host : interserver_listen_hosts)
+ {
+ /// Interserver IO HTTP
+ const char * port_name = "interserver_http_port";
+ createServer(config, interserver_listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
+ {
+ Poco::Net::ServerSocket socket;
+ auto address = socketBindListen(config, socket, interserver_listen_host, port);
+ socket.setReceiveTimeout(settings.http_receive_timeout);
+ socket.setSendTimeout(settings.http_send_timeout);
+ return ProtocolServerAdapter(
+ interserver_listen_host,
+ port_name,
+ "replica communication (interserver): http://" + address.toString(),
+ std::make_unique(
+ context(),
+ createHandlerFactory(*this, async_metrics, "InterserverIOHTTPHandler-factory"),
+ server_pool,
+ socket,
+ http_params));
+ });
+
+ port_name = "interserver_https_port";
+ createServer(config, interserver_listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
+ {
+#if USE_SSL
+ Poco::Net::SecureServerSocket socket;
+ auto address = socketBindListen(config, socket, interserver_listen_host, port, /* secure = */ true);
+ socket.setReceiveTimeout(settings.http_receive_timeout);
+ socket.setSendTimeout(settings.http_send_timeout);
+ return ProtocolServerAdapter(
+ interserver_listen_host,
+ port_name,
+ "secure replica communication (interserver): https://" + address.toString(),
+ std::make_unique(
+ context(),
+ createHandlerFactory(*this, async_metrics, "InterserverIOHTTPSHandler-factory"),
+ server_pool,
+ socket,
+ http_params));
+#else
+ UNUSED(port);
+ throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
+ ErrorCodes::SUPPORT_IS_DISABLED};
+#endif
+ });
+ }
+
}
void Server::updateServers(
@@ -2043,6 +2059,7 @@ void Server::updateServers(
Poco::Logger * log = &logger();
const auto listen_hosts = getListenHosts(config);
+ const auto interserver_listen_hosts = getInterserverListenHosts(config);
const auto listen_try = getListenTry(config);
/// Remove servers once all their connections are closed
@@ -2075,7 +2092,7 @@ void Server::updateServers(
}
}
- createServers(config, listen_hosts, listen_try, server_pool, async_metrics, servers, /* start_servers= */ true);
+ createServers(config, listen_hosts, interserver_listen_hosts, listen_try, server_pool, async_metrics, servers, /* start_servers= */ true);
std::erase_if(servers, std::bind_front(check_server, ""));
}
diff --git a/programs/server/Server.h b/programs/server/Server.h
index 4235fcc2d3b..9b664b6213c 100644
--- a/programs/server/Server.h
+++ b/programs/server/Server.h
@@ -86,7 +86,8 @@ private:
void createServers(
Poco::Util::AbstractConfiguration & config,
- const std::vector & listen_hosts,
+ const Strings & listen_hosts,
+ const Strings & interserver_listen_hosts,
bool listen_try,
Poco::ThreadPool & server_pool,
AsynchronousMetrics & async_metrics,
diff --git a/programs/server/config.xml b/programs/server/config.xml
index 1060cb3db0a..2121df501a8 100644
--- a/programs/server/config.xml
+++ b/programs/server/config.xml
@@ -188,6 +188,10 @@
127.0.0.1
-->
+
+
+
+
diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp
index faef19fe1a3..977d2bca01f 100644
--- a/src/Client/ClientBase.cpp
+++ b/src/Client/ClientBase.cpp
@@ -583,9 +583,14 @@ try
if (has_vertical_output_suffix)
current_format = "Vertical";
- /// It is not clear how to write progress intermixed with data with parallel formatting.
+ bool logs_into_stdout = server_logs_file == "-";
+ bool extras_into_stdout = need_render_progress || logs_into_stdout;
+ bool select_only_into_file = select_into_file && !select_into_file_and_stdout;
+
+ /// It is not clear how to write progress and logs
+ /// intermixed with data with parallel formatting.
/// It may increase code complexity significantly.
- if (!need_render_progress || (select_into_file && !select_into_file_and_stdout))
+ if (!extras_into_stdout || select_only_into_file)
output_format = global_context->getOutputFormatParallelIfPossible(
current_format, out_file_buf ? *out_file_buf : *out_buf, block);
else
diff --git a/src/Columns/ColumnDecimal.cpp b/src/Columns/ColumnDecimal.cpp
index 4290a7a4cb1..63b76dbb230 100644
--- a/src/Columns/ColumnDecimal.cpp
+++ b/src/Columns/ColumnDecimal.cpp
@@ -298,7 +298,7 @@ ColumnPtr ColumnDecimal::filter(const IColumn::Filter & filt, ssize_t result_
{
while (mask)
{
- size_t index = __builtin_ctzll(mask);
+ size_t index = std::countr_zero(mask);
res_data.push_back(data_pos[index]);
#ifdef __BMI__
mask = _blsr_u64(mask);
diff --git a/src/Columns/ColumnFixedString.cpp b/src/Columns/ColumnFixedString.cpp
index b71323f2ec7..158d154f3f8 100644
--- a/src/Columns/ColumnFixedString.cpp
+++ b/src/Columns/ColumnFixedString.cpp
@@ -240,7 +240,7 @@ ColumnPtr ColumnFixedString::filter(const IColumn::Filter & filt, ssize_t result
size_t res_chars_size = res->chars.size();
while (mask)
{
- size_t index = __builtin_ctzll(mask);
+ size_t index = std::countr_zero(mask);
res->chars.resize(res_chars_size + n);
memcpySmallAllowReadWriteOverflow15(&res->chars[res_chars_size], data_pos + index * n, n);
res_chars_size += n;
diff --git a/src/Columns/ColumnVector.cpp b/src/Columns/ColumnVector.cpp
index 60423e2b0fe..9084e47c2f5 100644
--- a/src/Columns/ColumnVector.cpp
+++ b/src/Columns/ColumnVector.cpp
@@ -508,7 +508,7 @@ ColumnPtr ColumnVector::filter(const IColumn::Filter & filt, ssize_t result_s
{
while (mask)
{
- size_t index = __builtin_ctzll(mask);
+ size_t index = std::countr_zero(mask);
res_data.push_back(data_pos[index]);
#ifdef __BMI__
mask = _blsr_u64(mask);
diff --git a/src/Columns/ColumnsCommon.cpp b/src/Columns/ColumnsCommon.cpp
index cbc62140bef..17f521c242c 100644
--- a/src/Columns/ColumnsCommon.cpp
+++ b/src/Columns/ColumnsCommon.cpp
@@ -2,13 +2,14 @@
#include
#include
#include
+#include
#include "ColumnsCommon.h"
namespace DB
{
-#if defined(__SSE2__) && defined(__POPCNT__)
+#if defined(__SSE2__)
/// Transform 64-byte mask to 64-bit mask.
static UInt64 toBits64(const Int8 * bytes64)
{
@@ -41,11 +42,11 @@ size_t countBytesInFilter(const UInt8 * filt, size_t start, size_t end)
const Int8 * end_pos = pos + (end - start);
-#if defined(__SSE2__) && defined(__POPCNT__)
+#if defined(__SSE2__)
const Int8 * end_pos64 = pos + (end - start) / 64 * 64;
for (; pos < end_pos64; pos += 64)
- count += __builtin_popcountll(toBits64(pos));
+ count += std::popcount(toBits64(pos));
/// TODO Add duff device for tail?
#endif
@@ -74,11 +75,11 @@ size_t countBytesInFilterWithNull(const IColumn::Filter & filt, const UInt8 * nu
const Int8 * pos2 = reinterpret_cast(null_map) + start;
const Int8 * end_pos = pos + (end - start);
-#if defined(__SSE2__) && defined(__POPCNT__)
+#if defined(__SSE2__)
const Int8 * end_pos64 = pos + (end - start) / 64 * 64;
for (; pos < end_pos64; pos += 64, pos2 += 64)
- count += __builtin_popcountll(toBits64(pos) & ~toBits64(pos2));
+ count += std::popcount(toBits64(pos) & ~toBits64(pos2));
/// TODO Add duff device for tail?
#endif
@@ -259,7 +260,7 @@ namespace
{
while (mask)
{
- size_t index = __builtin_ctzll(mask);
+ size_t index = std::countr_zero(mask);
copy_array(offsets_pos + index);
#ifdef __BMI__
mask = _blsr_u64(mask);
diff --git a/src/Columns/ColumnsCommon.h b/src/Columns/ColumnsCommon.h
index 1e5849e2b88..607cff75a98 100644
--- a/src/Columns/ColumnsCommon.h
+++ b/src/Columns/ColumnsCommon.h
@@ -36,7 +36,7 @@ inline UInt64 bytes64MaskToBits64Mask(const UInt8 * bytes64)
_mm256_loadu_si256(reinterpret_cast(bytes64)), zero32))) & 0xffffffff)
| (static_cast(_mm256_movemask_epi8(_mm256_cmpeq_epi8(
_mm256_loadu_si256(reinterpret_cast(bytes64+32)), zero32))) << 32);
-#elif defined(__SSE2__) && defined(__POPCNT__)
+#elif defined(__SSE2__)
static const __m128i zero16 = _mm_setzero_si128();
UInt64 res =
(static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(
diff --git a/src/Common/HashTable/StringHashTable.h b/src/Common/HashTable/StringHashTable.h
index 8d15fde4ce0..c947e746e8d 100644
--- a/src/Common/HashTable/StringHashTable.h
+++ b/src/Common/HashTable/StringHashTable.h
@@ -3,6 +3,7 @@
#include
#include
+#include
#include
#include
@@ -21,17 +22,17 @@ struct StringKey24
inline StringRef ALWAYS_INLINE toStringRef(const StringKey8 & n)
{
assert(n != 0);
- return {reinterpret_cast(&n), 8ul - (__builtin_clzll(n) >> 3)};
+ return {reinterpret_cast(&n), 8ul - (std::countl_zero(n) >> 3)};
}
inline StringRef ALWAYS_INLINE toStringRef(const StringKey16 & n)
{
assert(n.items[1] != 0);
- return {reinterpret_cast(&n), 16ul - (__builtin_clzll(n.items[1]) >> 3)};
+ return {reinterpret_cast(&n), 16ul - (std::countl_zero(n.items[1]) >> 3)};
}
inline StringRef ALWAYS_INLINE toStringRef(const StringKey24 & n)
{
assert(n.c != 0);
- return {reinterpret_cast(&n), 24ul - (__builtin_clzll(n.c) >> 3)};
+ return {reinterpret_cast(&n), 24ul - (std::countl_zero(n.c) >> 3)};
}
struct StringHashTableHash
diff --git a/src/Common/HyperLogLogCounter.h b/src/Common/HyperLogLogCounter.h
index 4644d18ecf3..cdd4f246e53 100644
--- a/src/Common/HyperLogLogCounter.h
+++ b/src/Common/HyperLogLogCounter.h
@@ -11,6 +11,7 @@
#include
#include
+#include
#include
#include
@@ -205,7 +206,7 @@ struct TrailingZerosCounter
{
static int apply(UInt32 val)
{
- return __builtin_ctz(val);
+ return std::countr_zero(val);
}
};
@@ -214,7 +215,7 @@ struct TrailingZerosCounter
{
static int apply(UInt64 val)
{
- return __builtin_ctzll(val);
+ return std::countr_zero(val);
}
};
diff --git a/src/Common/IPv6ToBinary.cpp b/src/Common/IPv6ToBinary.cpp
index a8363a46de7..8d335d89353 100644
--- a/src/Common/IPv6ToBinary.cpp
+++ b/src/Common/IPv6ToBinary.cpp
@@ -5,6 +5,7 @@
#include
#include
+#include
namespace DB
@@ -89,7 +90,7 @@ bool matchIPv6Subnet(const uint8_t * addr, const uint8_t * cidr_addr, UInt8 pref
if (mask)
{
- auto offset = __builtin_ctz(mask);
+ auto offset = std::countr_zero(mask);
if (prefix / 8 != offset)
return prefix / 8 < offset;
diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp
index 580e5f94952..453ed9ec37c 100644
--- a/src/Common/ProfileEvents.cpp
+++ b/src/Common/ProfileEvents.cpp
@@ -25,6 +25,10 @@
M(WriteBufferFromFileDescriptorWrite, "Number of writes (write/pwrite) to a file descriptor. Does not include sockets.") \
M(WriteBufferFromFileDescriptorWriteFailed, "Number of times the write (write/pwrite) to a file descriptor have failed.") \
M(WriteBufferFromFileDescriptorWriteBytes, "Number of bytes written to file descriptors. If the file is compressed, this will show compressed data size.") \
+ M(FileSync, "Number of times the F_FULLFSYNC/fsync/fdatasync function was called for files.") \
+ M(DirectorySync, "Number of times the F_FULLFSYNC/fsync/fdatasync function was called for directories.") \
+ M(FileSyncElapsedMicroseconds, "Total time spent waiting for F_FULLFSYNC/fsync/fdatasync syscall for files.") \
+ M(DirectorySyncElapsedMicroseconds, "Total time spent waiting for F_FULLFSYNC/fsync/fdatasync syscall for directories.") \
M(ReadCompressedBytes, "Number of bytes (the number of bytes before decompression) read from compressed sources (files, network).") \
M(CompressedReadBufferBlocks, "Number of compressed blocks (the blocks of data that are compressed independent of each other) read from compressed sources (files, network).") \
M(CompressedReadBufferBytes, "Number of uncompressed bytes (the number of bytes after decompression) read from compressed sources (files, network).") \
diff --git a/src/Common/SpaceSaving.h b/src/Common/SpaceSaving.h
index 0f577349722..20d6812b91b 100644
--- a/src/Common/SpaceSaving.h
+++ b/src/Common/SpaceSaving.h
@@ -78,7 +78,7 @@ private:
constexpr uint64_t nextAlphaSize(uint64_t x)
{
constexpr uint64_t alpha_map_elements_per_counter = 6;
- return 1ULL << (sizeof(uint64_t) * 8 - __builtin_clzll(x * alpha_map_elements_per_counter));
+ return 1ULL << (sizeof(uint64_t) * 8 - std::countl_zero(x * alpha_map_elements_per_counter));
}
public:
diff --git a/src/Common/UTF8Helpers.cpp b/src/Common/UTF8Helpers.cpp
index b1d38c4f31a..0af31726f40 100644
--- a/src/Common/UTF8Helpers.cpp
+++ b/src/Common/UTF8Helpers.cpp
@@ -2,6 +2,7 @@
#include
#include
+#include
namespace DB
@@ -124,7 +125,7 @@ size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t l
if (non_regular_width_mask)
{
- auto num_regular_chars = __builtin_ctz(non_regular_width_mask);
+ auto num_regular_chars = std::countr_zero(non_regular_width_mask);
width += num_regular_chars;
i += num_regular_chars;
break;
diff --git a/src/Common/UTF8Helpers.h b/src/Common/UTF8Helpers.h
index 72bdb965789..ce90af3d5ce 100644
--- a/src/Common/UTF8Helpers.h
+++ b/src/Common/UTF8Helpers.h
@@ -83,7 +83,7 @@ inline size_t countCodePoints(const UInt8 * data, size_t size)
const auto threshold = vdupq_n_s8(0xBF);
for (; data < src_end_sse; data += bytes_sse)
- res += __builtin_popcountll(get_nibble_mask(vcgtq_s8(vld1q_s8(reinterpret_cast(data)), threshold)));
+ res += std::popcount(get_nibble_mask(vcgtq_s8(vld1q_s8(reinterpret_cast(data)), threshold)));
res >>= 2;
#endif
diff --git a/src/Common/examples/average.cpp b/src/Common/examples/average.cpp
index 5a42d8c5020..d2802717fc8 100644
--- a/src/Common/examples/average.cpp
+++ b/src/Common/examples/average.cpp
@@ -1,5 +1,6 @@
#include
#include
+#include
#include
@@ -561,7 +562,7 @@ int main(int argc, char ** argv)
/// Fill source data
for (size_t i = 0; i < size; ++i)
{
- keys[i] = __builtin_ctz(i + 1); /// Make keys to have just slightly more realistic distribution.
+ keys[i] = std::countr_zero(i + 1); /// Make keys to have just slightly more realistic distribution.
values[i] = 1234.5; /// The distribution of values does not affect execution speed.
}
diff --git a/src/Common/memcmpSmall.h b/src/Common/memcmpSmall.h
index 7b977a4a23c..e95a21b836d 100644
--- a/src/Common/memcmpSmall.h
+++ b/src/Common/memcmpSmall.h
@@ -1,6 +1,7 @@
#pragma once
#include
+#include
#include
#include
@@ -50,7 +51,7 @@ inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char
if (mask)
{
- offset += __builtin_ctz(mask);
+ offset += std::countr_zero(mask);
if (offset >= min_size)
break;
@@ -82,7 +83,7 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
if (mask)
{
- offset += __builtin_ctz(mask);
+ offset += std::countr_zero(mask);
if (offset >= min_size)
break;
@@ -123,7 +124,7 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
if (mask)
{
- offset += __builtin_ctz(mask);
+ offset += std::countr_zero(mask);
if (offset >= max_size)
return 0;
@@ -150,7 +151,7 @@ inline int memcmpSmallAllowOverflow15(const Char * a, const Char * b, size_t siz
if (mask)
{
- offset += __builtin_ctz(mask);
+ offset += std::countr_zero(mask);
if (offset >= size)
return 0;
@@ -180,7 +181,7 @@ inline bool memequalSmallAllowOverflow15(const Char * a, size_t a_size, const Ch
if (mask)
{
- offset += __builtin_ctz(mask);
+ offset += std::countr_zero(mask);
return offset >= a_size;
}
}
@@ -203,7 +204,7 @@ inline int memcmpSmallMultipleOf16(const Char * a, const Char * b, size_t size)
if (mask)
{
- offset += __builtin_ctz(mask);
+ offset += std::countr_zero(mask);
return detail::cmp(a[offset], b[offset]);
}
}
@@ -222,7 +223,7 @@ inline int memcmp16(const Char * a, const Char * b)
if (mask)
{
- auto offset = __builtin_ctz(mask);
+ auto offset = std::countr_zero(mask);
return detail::cmp(a[offset], b[offset]);
}
@@ -252,7 +253,7 @@ inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
if (mask)
{
- offset += __builtin_ctz(mask);
+ offset += std::countr_zero(mask);
return offset >= size;
}
}
@@ -285,7 +286,7 @@ inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char
if (mask)
{
- offset += __builtin_ctz(mask);
+ offset += std::countr_zero(mask);
if (offset >= min_size)
break;
@@ -317,7 +318,7 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
if (mask)
{
- offset += __builtin_ctz(mask);
+ offset += std::countr_zero(mask);
if (offset >= min_size)
break;
@@ -359,7 +360,7 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
if (mask)
{
- offset += __builtin_ctz(mask);
+ offset += std::countr_zero(mask);
if (offset >= max_size)
return 0;
@@ -386,7 +387,7 @@ inline int memcmpSmallAllowOverflow15(const Char * a, const Char * b, size_t siz
if (mask)
{
- offset += __builtin_ctz(mask);
+ offset += std::countr_zero(mask);
if (offset >= size)
return 0;
@@ -416,7 +417,7 @@ inline bool memequalSmallAllowOverflow15(const Char * a, size_t a_size, const Ch
if (mask)
{
- offset += __builtin_ctz(mask);
+ offset += std::countr_zero(mask);
return offset >= a_size;
}
}
@@ -439,7 +440,7 @@ inline int memcmpSmallMultipleOf16(const Char * a, const Char * b, size_t size)
if (mask)
{
- offset += __builtin_ctz(mask);
+ offset += std::countr_zero(mask);
return detail::cmp(a[offset], b[offset]);
}
}
@@ -459,7 +460,7 @@ inline int memcmp16(const Char * a, const Char * b)
if (mask)
{
- auto offset = __builtin_ctz(mask);
+ auto offset = std::countr_zero(mask);
return detail::cmp(a[offset], b[offset]);
}
@@ -490,7 +491,7 @@ inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
if (mask)
{
- offset += __builtin_ctz(mask);
+ offset += std::countr_zero(mask);
return offset >= size;
}
}
@@ -523,7 +524,7 @@ inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char
if (mask)
{
- offset += __builtin_ctzll(mask) >> 2;
+ offset += std::countr_zero(mask) >> 2;
if (offset >= min_size)
break;
@@ -548,7 +549,7 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
if (mask)
{
- offset += __builtin_ctzll(mask) >> 2;
+ offset += std::countr_zero(mask) >> 2;
if (offset >= min_size)
break;
@@ -589,7 +590,7 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
if (mask)
{
- offset += __builtin_ctzll(mask) >> 2;
+ offset += std::countr_zero(mask) >> 2;
if (offset >= max_size)
return 0;
@@ -611,7 +612,7 @@ inline int memcmpSmallAllowOverflow15(const Char * a, const Char * b, size_t siz
if (mask)
{
- offset += __builtin_ctzll(mask) >> 2;
+ offset += std::countr_zero(mask) >> 2;
if (offset >= size)
return 0;
@@ -637,7 +638,7 @@ inline bool memequalSmallAllowOverflow15(const Char * a, size_t a_size, const Ch
if (mask)
{
- offset += __builtin_ctzll(mask) >> 2;
+ offset += std::countr_zero(mask) >> 2;
return offset >= a_size;
}
}
@@ -656,7 +657,7 @@ inline int memcmpSmallMultipleOf16(const Char * a, const Char * b, size_t size)
if (mask)
{
- offset += __builtin_ctzll(mask) >> 2;
+ offset += std::countr_zero(mask) >> 2;
return detail::cmp(a[offset], b[offset]);
}
}
@@ -672,7 +673,7 @@ inline int memcmp16(const Char * a, const Char * b)
mask = ~mask;
if (mask)
{
- auto offset = __builtin_ctzll(mask) >> 2;
+ auto offset = std::countr_zero(mask) >> 2;
return detail::cmp(a[offset], b[offset]);
}
return 0;
@@ -694,7 +695,7 @@ inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
if (mask)
{
- offset += __builtin_ctzll(mask) >> 2;
+ offset += std::countr_zero(mask) >> 2;
return offset >= size;
}
}
diff --git a/src/Compression/CompressedReadBufferBase.cpp b/src/Compression/CompressedReadBufferBase.cpp
index 2c85dc6d9a9..9101caf568e 100644
--- a/src/Compression/CompressedReadBufferBase.cpp
+++ b/src/Compression/CompressedReadBufferBase.cpp
@@ -1,5 +1,6 @@
#include "CompressedReadBufferBase.h"
+#include
#include
#include
#include
@@ -93,8 +94,8 @@ static void validateChecksum(char * data, size_t size, const Checksum expected_c
}
/// Check if the difference caused by single bit flip in stored checksum.
- size_t difference = __builtin_popcountll(expected_checksum.first ^ calculated_checksum.first)
- + __builtin_popcountll(expected_checksum.second ^ calculated_checksum.second);
+ size_t difference = std::popcount(expected_checksum.first ^ calculated_checksum.first)
+ + std::popcount(expected_checksum.second ^ calculated_checksum.second);
if (difference == 1)
{
diff --git a/src/Compression/CompressionCodecT64.cpp b/src/Compression/CompressionCodecT64.cpp
index 0efa98fbd82..9ed37c2d676 100644
--- a/src/Compression/CompressionCodecT64.cpp
+++ b/src/Compression/CompressionCodecT64.cpp
@@ -8,6 +8,7 @@
#include
#include
#include
+#include
namespace DB
@@ -413,7 +414,7 @@ UInt32 getValuableBitsNumber(UInt64 min, UInt64 max)
{
UInt64 diff_bits = min ^ max;
if (diff_bits)
- return 64 - __builtin_clzll(diff_bits);
+ return 64 - std::countl_zero(diff_bits);
return 0;
}
diff --git a/src/Core/Settings.h b/src/Core/Settings.h
index 4e88a46dc53..51197022908 100644
--- a/src/Core/Settings.h
+++ b/src/Core/Settings.h
@@ -344,7 +344,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
M(UInt64, max_temporary_non_const_columns, 0, "", 0) \
\
M(UInt64, max_subquery_depth, 100, "", 0) \
- M(UInt64, max_pipeline_depth, 10000, "", 0) \
+ M(UInt64, max_pipeline_depth, 1000, "", 0) \
M(UInt64, max_ast_depth, 1000, "Maximum depth of query syntax tree. Checked after parsing.", 0) \
M(UInt64, max_ast_elements, 50000, "Maximum size of query syntax tree in number of nodes. Checked after parsing.", 0) \
M(UInt64, max_expanded_ast_elements, 500000, "Maximum size of query syntax tree in number of nodes after expansion of aliases and the asterisk.", 0) \
diff --git a/src/Dictionaries/SSDCacheDictionaryStorage.h b/src/Dictionaries/SSDCacheDictionaryStorage.h
index 459c4c44668..5f73352a4c9 100644
--- a/src/Dictionaries/SSDCacheDictionaryStorage.h
+++ b/src/Dictionaries/SSDCacheDictionaryStorage.h
@@ -34,6 +34,8 @@ namespace ProfileEvents
extern const Event AIOWriteBytes;
extern const Event AIORead;
extern const Event AIOReadBytes;
+ extern const Event FileSync;
+ extern const Event FileSyncElapsedMicroseconds;
}
namespace DB
@@ -544,6 +546,9 @@ public:
file_path,
std::to_string(bytes_written));
+ ProfileEvents::increment(ProfileEvents::FileSync);
+
+ Stopwatch watch;
#if defined(OS_DARWIN)
if (::fsync(file.fd) < 0)
throwFromErrnoWithPath("Cannot fsync " + file_path, file_path, ErrorCodes::CANNOT_FSYNC);
@@ -551,6 +556,7 @@ public:
if (::fdatasync(file.fd) < 0)
throwFromErrnoWithPath("Cannot fdatasync " + file_path, file_path, ErrorCodes::CANNOT_FSYNC);
#endif
+ ProfileEvents::increment(ProfileEvents::FileSyncElapsedMicroseconds, watch.elapsedMicroseconds());
current_block_index += buffer_size_in_blocks;
diff --git a/src/Disks/LocalDirectorySyncGuard.cpp b/src/Disks/LocalDirectorySyncGuard.cpp
index 2610cd7c37f..843d1c1ed43 100644
--- a/src/Disks/LocalDirectorySyncGuard.cpp
+++ b/src/Disks/LocalDirectorySyncGuard.cpp
@@ -1,6 +1,8 @@
#include
+#include
#include
#include
+#include
#include // O_RDWR
/// OSX does not have O_DIRECTORY
@@ -8,6 +10,12 @@
#define O_DIRECTORY O_RDWR
#endif
+namespace ProfileEvents
+{
+ extern const Event DirectorySync;
+ extern const Event DirectorySyncElapsedMicroseconds;
+}
+
namespace DB
{
@@ -29,8 +37,12 @@ LocalDirectorySyncGuard::LocalDirectorySyncGuard(const String & full_path)
LocalDirectorySyncGuard::~LocalDirectorySyncGuard()
{
+ ProfileEvents::increment(ProfileEvents::DirectorySync);
+
try
{
+ Stopwatch watch;
+
#if defined(OS_DARWIN)
if (fcntl(fd, F_FULLFSYNC, 0))
throwFromErrno("Cannot fcntl(F_FULLFSYNC)", ErrorCodes::CANNOT_FSYNC);
@@ -40,6 +52,8 @@ LocalDirectorySyncGuard::~LocalDirectorySyncGuard()
#endif
if (-1 == ::close(fd))
throw Exception("Cannot close file", ErrorCodes::CANNOT_CLOSE_FILE);
+
+ ProfileEvents::increment(ProfileEvents::DirectorySyncElapsedMicroseconds, watch.elapsedMicroseconds());
}
catch (...)
{
diff --git a/src/Functions/FunctionsBitToArray.cpp b/src/Functions/FunctionsBitToArray.cpp
index 22a56ba35e6..f154884a0fd 100644
--- a/src/Functions/FunctionsBitToArray.cpp
+++ b/src/Functions/FunctionsBitToArray.cpp
@@ -8,6 +8,7 @@
#include
#include
#include
+#include
namespace DB
@@ -285,7 +286,7 @@ public:
{
while (x)
{
- result_array_values_data.push_back(getTrailingZeroBitsUnsafe(x));
+ result_array_values_data.push_back(std::countr_zero(x));
x &= (x - 1);
}
}
diff --git a/src/Functions/bitHammingDistance.cpp b/src/Functions/bitHammingDistance.cpp
index 2090d17432c..75928c2a8af 100644
--- a/src/Functions/bitHammingDistance.cpp
+++ b/src/Functions/bitHammingDistance.cpp
@@ -1,5 +1,6 @@
#include
#include
+#include
namespace DB
{
@@ -14,7 +15,7 @@ struct BitHammingDistanceImpl
static inline NO_SANITIZE_UNDEFINED Result apply(A a, B b)
{
UInt64 res = static_cast(a) ^ static_cast(b);
- return __builtin_popcountll(res);
+ return std::popcount(res);
}
#if USE_EMBEDDED_COMPILER
diff --git a/src/IO/ReadHelpers.cpp b/src/IO/ReadHelpers.cpp
index c2b0a0f65d7..fb5d0b9aea4 100644
--- a/src/IO/ReadHelpers.cpp
+++ b/src/IO/ReadHelpers.cpp
@@ -10,6 +10,7 @@
#include
#include
#include
+#include
#ifdef __SSE2__
#include
@@ -698,7 +699,7 @@ void readCSVStringInto(Vector & s, ReadBuffer & buf, const FormatSettings::CSV &
uint16_t bit_mask = _mm_movemask_epi8(eq);
if (bit_mask)
{
- next_pos += __builtin_ctz(bit_mask);
+ next_pos += std::countr_zero(bit_mask);
return;
}
}
@@ -716,7 +717,7 @@ void readCSVStringInto(Vector & s, ReadBuffer & buf, const FormatSettings::CSV &
uint64_t bit_mask = get_nibble_mask(eq);
if (bit_mask)
{
- next_pos += __builtin_ctzll(bit_mask) >> 2;
+ next_pos += std::countr_zero(bit_mask) >> 2;
return;
}
}
diff --git a/src/IO/WriteBufferFromFileDescriptor.cpp b/src/IO/WriteBufferFromFileDescriptor.cpp
index 25acb71596a..ba49c16c11f 100644
--- a/src/IO/WriteBufferFromFileDescriptor.cpp
+++ b/src/IO/WriteBufferFromFileDescriptor.cpp
@@ -18,6 +18,8 @@ namespace ProfileEvents
extern const Event WriteBufferFromFileDescriptorWriteFailed;
extern const Event WriteBufferFromFileDescriptorWriteBytes;
extern const Event DiskWriteElapsedMicroseconds;
+ extern const Event FileSync;
+ extern const Event FileSyncElapsedMicroseconds;
}
namespace CurrentMetrics
@@ -113,12 +115,18 @@ void WriteBufferFromFileDescriptor::sync()
/// If buffer has pending data - write it.
next();
+ ProfileEvents::increment(ProfileEvents::FileSync);
+
+ Stopwatch watch;
+
/// Request OS to sync data with storage medium.
#if defined(OS_DARWIN)
int res = ::fsync(fd);
#else
int res = ::fdatasync(fd);
#endif
+ ProfileEvents::increment(ProfileEvents::FileSyncElapsedMicroseconds, watch.elapsedMicroseconds());
+
if (-1 == res)
throwFromErrnoWithPath("Cannot fsync " + getFileName(), getFileName(), ErrorCodes::CANNOT_FSYNC);
}
diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp
index b91fd7ac5cf..3a2bc830cd5 100644
--- a/src/Interpreters/ActionsDAG.cpp
+++ b/src/Interpreters/ActionsDAG.cpp
@@ -1513,8 +1513,7 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsBeforeArrayJoin(const NameSet &
}
auto res = split(split_nodes);
- /// Do not remove array joined columns if they are not used.
- /// res.first->project_input = false;
+ res.second->project_input = project_input;
return res;
}
diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp
index 8d91375e661..fd802a49008 100644
--- a/src/Interpreters/ConcurrentHashJoin.cpp
+++ b/src/Interpreters/ConcurrentHashJoin.cpp
@@ -33,7 +33,7 @@ static UInt32 toPowerOfTwo(UInt32 x)
{
if (x <= 1)
return 1;
- return static_cast(1) << (32 - __builtin_clz(x - 1));
+ return static_cast(1) << (32 - std::countl_zero(x - 1));
}
ConcurrentHashJoin::ConcurrentHashJoin(ContextPtr context_, std::shared_ptr table_join_, size_t slots_, const Block & right_sample_block, bool any_take_last_row_)
diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h
index 68947d140ff..cf508c7bfdb 100644
--- a/src/Interpreters/Context.h
+++ b/src/Interpreters/Context.h
@@ -367,27 +367,6 @@ public:
// Top-level OpenTelemetry trace context for the query. Makes sense only for a query context.
OpenTelemetryTraceContext query_trace_context;
- /// Some counters for current query execution.
- /// Most of them are workarounds and should be removed in the future.
- struct KitchenSink
- {
- std::atomic analyze_counter = 0;
-
- KitchenSink() = default;
-
- KitchenSink(const KitchenSink & rhs)
- : analyze_counter(rhs.analyze_counter.load())
- {}
-
- KitchenSink & operator=(const KitchenSink & rhs)
- {
- analyze_counter = rhs.analyze_counter.load();
- return *this;
- }
- };
-
- KitchenSink kitchen_sink;
-
private:
using SampleBlockCache = std::unordered_map;
mutable SampleBlockCache sample_block_cache;
diff --git a/src/Interpreters/ITokenExtractor.cpp b/src/Interpreters/ITokenExtractor.cpp
index 8c1af130f71..9c4027dfa0a 100644
--- a/src/Interpreters/ITokenExtractor.cpp
+++ b/src/Interpreters/ITokenExtractor.cpp
@@ -4,6 +4,7 @@
#include
#include
+#include
#if defined(__SSE2__)
#include
@@ -122,7 +123,7 @@ bool SplitTokenExtractor::nextInStringPadded(const char * data, size_t length, s
const auto alnum_chars_ranges = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
'\xFF', '\x80', 'z', 'a', 'Z', 'A', '9', '0');
// Every bit represents if `haystack` character is in the ranges (1) or not (0)
- const int result_bitmask = _mm_cvtsi128_si32(_mm_cmpestrm(alnum_chars_ranges, 8, haystack, haystack_length, _SIDD_CMP_RANGES));
+ const unsigned result_bitmask = _mm_cvtsi128_si32(_mm_cmpestrm(alnum_chars_ranges, 8, haystack, haystack_length, _SIDD_CMP_RANGES));
#else
// NOTE: -1 and +1 required since SSE2 has no `>=` and `<=` instructions on packed 8-bit integers (epi8).
const auto number_begin = _mm_set1_epi8('0' - 1);
@@ -136,7 +137,7 @@ bool SplitTokenExtractor::nextInStringPadded(const char * data, size_t length, s
// every bit represents if `haystack` character `c` satisfies condition:
// (c < 0) || (c > '0' - 1 && c < '9' + 1) || (c > 'a' - 1 && c < 'z' + 1) || (c > 'A' - 1 && c < 'Z' + 1)
// < 0 since _mm_cmplt_epi8 threats chars as SIGNED, and so all chars > 0x80 are negative.
- const int result_bitmask = _mm_movemask_epi8(_mm_or_si128(_mm_or_si128(_mm_or_si128(
+ const unsigned result_bitmask = _mm_movemask_epi8(_mm_or_si128(_mm_or_si128(_mm_or_si128(
_mm_cmplt_epi8(haystack, zero),
_mm_and_si128(_mm_cmpgt_epi8(haystack, number_begin), _mm_cmplt_epi8(haystack, number_end))),
_mm_and_si128(_mm_cmpgt_epi8(haystack, alpha_lower_begin), _mm_cmplt_epi8(haystack, alpha_lower_end))),
@@ -152,7 +153,7 @@ bool SplitTokenExtractor::nextInStringPadded(const char * data, size_t length, s
continue;
}
- const auto token_start_pos_in_current_haystack = getTrailingZeroBitsUnsafe(result_bitmask);
+ const auto token_start_pos_in_current_haystack = std::countr_zero(result_bitmask);
if (*token_length == 0)
// new token
*token_start = *pos + token_start_pos_in_current_haystack;
@@ -160,7 +161,7 @@ bool SplitTokenExtractor::nextInStringPadded(const char * data, size_t length, s
// end of token starting in one of previous haystacks
return true;
- const auto token_bytes_in_current_haystack = getTrailingZeroBitsUnsafe(~(result_bitmask >> token_start_pos_in_current_haystack));
+ const auto token_bytes_in_current_haystack = std::countr_zero(~(result_bitmask >> token_start_pos_in_current_haystack));
*token_length += token_bytes_in_current_haystack;
*pos += token_start_pos_in_current_haystack + token_bytes_in_current_haystack;
diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp
index 7a217a061e1..3af95fd77b8 100644
--- a/src/Interpreters/InterpreterSelectQuery.cpp
+++ b/src/Interpreters/InterpreterSelectQuery.cpp
@@ -98,7 +98,6 @@ namespace ErrorCodes
extern const int SAMPLING_NOT_SUPPORTED;
extern const int ILLEGAL_FINAL;
extern const int ILLEGAL_PREWHERE;
- extern const int TOO_DEEP_PIPELINE;
extern const int TOO_MANY_COLUMNS;
extern const int LOGICAL_ERROR;
extern const int NOT_IMPLEMENTED;
@@ -499,14 +498,6 @@ InterpreterSelectQuery::InterpreterSelectQuery(
auto analyze = [&] (bool try_move_to_prewhere)
{
- if (context->hasQueryContext())
- {
- std::atomic & current_query_analyze_count = context->getQueryContext()->kitchen_sink.analyze_counter;
- ++current_query_analyze_count;
- if (settings.max_pipeline_depth && current_query_analyze_count >= settings.max_pipeline_depth)
- throw DB::Exception(ErrorCodes::TOO_DEEP_PIPELINE, "Query analyze overflow. Try to increase `max_pipeline_depth` or simplify the query");
- }
-
/// Allow push down and other optimizations for VIEW: replace with subquery and rewrite it.
ASTPtr view_table;
if (view)
@@ -645,7 +636,6 @@ InterpreterSelectQuery::InterpreterSelectQuery(
analyze(shouldMoveToPrewhere());
bool need_analyze_again = false;
-
if (analysis_result.prewhere_constant_filter_description.always_false || analysis_result.prewhere_constant_filter_description.always_true)
{
if (analysis_result.prewhere_constant_filter_description.always_true)
@@ -654,7 +644,6 @@ InterpreterSelectQuery::InterpreterSelectQuery(
query.setExpression(ASTSelectQuery::Expression::PREWHERE, std::make_shared(0u));
need_analyze_again = true;
}
-
if (analysis_result.where_constant_filter_description.always_false || analysis_result.where_constant_filter_description.always_true)
{
if (analysis_result.where_constant_filter_description.always_true)
diff --git a/src/Interpreters/Set.cpp b/src/Interpreters/Set.cpp
index ab443f58cf2..af05b33c1f6 100644
--- a/src/Interpreters/Set.cpp
+++ b/src/Interpreters/Set.cpp
@@ -430,8 +430,9 @@ MergeTreeSetIndex::MergeTreeSetIndex(const Columns & set_elements, std::vectorgetName()});
- sort_description.emplace_back(ordered_set[i]->getName(), 1, 1);
+ String column_name = "_" + toString(i);
+ block_to_sort.insert({ordered_set[i], nullptr, column_name});
+ sort_description.emplace_back(column_name, 1, 1);
}
sortBlock(block_to_sort, sort_description);
diff --git a/src/Parsers/ExpressionListParsers.cpp b/src/Parsers/ExpressionListParsers.cpp
index caf9be1fea6..ea51367ee5d 100644
--- a/src/Parsers/ExpressionListParsers.cpp
+++ b/src/Parsers/ExpressionListParsers.cpp
@@ -1,6 +1,7 @@
#include
#include
+#include
#include
#include
@@ -9,6 +10,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -603,6 +605,13 @@ bool ParserTableFunctionExpression::parseImpl(Pos & pos, ASTPtr & node, Expected
{
if (ParserTableFunctionView().parse(pos, node, expected))
return true;
+ ParserKeyword s_settings("SETTINGS");
+ if (s_settings.ignore(pos, expected))
+ {
+ ParserSetQuery parser_settings(true);
+ if (parser_settings.parse(pos, node, expected))
+ return true;
+ }
return elem_parser.parse(pos, node, expected);
}
diff --git a/src/Storages/IStorage.h b/src/Storages/IStorage.h
index 03e10f98b49..a61bfeaff57 100644
--- a/src/Storages/IStorage.h
+++ b/src/Storages/IStorage.h
@@ -282,7 +282,7 @@ public:
*
* SelectQueryInfo is required since the stage can depends on the query
* (see Distributed() engine and optimize_skip_unused_shards,
- * see also MergeTree engine and allow_experimental_projection_optimization).
+ * see also MergeTree engine and projection optimization).
* And to store optimized cluster (after optimize_skip_unused_shards).
* It will also store needed stuff for projection query pipeline.
*
diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/src/Storages/MergeTree/MergeTreeRangeReader.cpp
index a10192c4cc1..79854785016 100644
--- a/src/Storages/MergeTree/MergeTreeRangeReader.cpp
+++ b/src/Storages/MergeTree/MergeTreeRangeReader.cpp
@@ -8,6 +8,7 @@
#include
#include
#include
+#include
#ifdef __SSE2__
#include
@@ -473,7 +474,7 @@ size_t numZerosInTail(const UInt8 * begin, const UInt8 * end)
count += 64;
else
{
- count += __builtin_clzll(val);
+ count += std::countl_zero(val);
return count;
}
}
@@ -507,7 +508,7 @@ size_t numZerosInTail(const UInt8 * begin, const UInt8 * end)
count += 64;
else
{
- count += __builtin_clzll(val);
+ count += std::countl_zero(val);
return count;
}
}
@@ -531,7 +532,7 @@ size_t MergeTreeRangeReader::ReadResult::numZerosInTail(const UInt8 * begin, con
size_t count = 0;
-#if defined(__SSE2__) && defined(__POPCNT__)
+#if defined(__SSE2__)
const __m128i zero16 = _mm_setzero_si128();
while (end - begin >= 64)
{
@@ -555,7 +556,7 @@ size_t MergeTreeRangeReader::ReadResult::numZerosInTail(const UInt8 * begin, con
count += 64;
else
{
- count += __builtin_clzll(val);
+ count += std::countl_zero(val);
return count;
}
}
@@ -583,7 +584,7 @@ size_t MergeTreeRangeReader::ReadResult::numZerosInTail(const UInt8 * begin, con
count += 64;
else
{
- count += __builtin_clzll(val);
+ count += std::countl_zero(val);
return count;
}
}
diff --git a/src/TableFunctions/TableFunctionExecutable.cpp b/src/TableFunctions/TableFunctionExecutable.cpp
index b84008f5ac8..7489b91659c 100644
--- a/src/TableFunctions/TableFunctionExecutable.cpp
+++ b/src/TableFunctions/TableFunctionExecutable.cpp
@@ -4,7 +4,10 @@
#include
#include
#include
+#include
#include
+#include
+#include
#include
#include
#include
@@ -48,7 +51,7 @@ void TableFunctionExecutable::parseArguments(const ASTPtr & ast_function, Contex
std::vector script_name_with_arguments;
boost::split(script_name_with_arguments, script_name_with_arguments_value, [](char c){ return c == ' '; });
- script_name = script_name_with_arguments[0];
+ script_name = std::move(script_name_with_arguments[0]);
script_name_with_arguments.erase(script_name_with_arguments.begin());
arguments = std::move(script_name_with_arguments);
format = checkAndGetLiteralArgument(args[1], "format");
@@ -56,14 +59,26 @@ void TableFunctionExecutable::parseArguments(const ASTPtr & ast_function, Contex
for (size_t i = 3; i < args.size(); ++i)
{
- ASTPtr query = args[i]->children.at(0);
- if (!query->as())
- throw Exception(ErrorCodes::UNSUPPORTED_METHOD,
- "Table function '{}' argument is invalid input query {}",
- getName(),
- query->formatForErrorMessage());
-
- input_queries.emplace_back(std::move(query));
+ if (args[i]->as())
+ {
+ settings_query = std::move(args[i]);
+ }
+ else
+ {
+ ASTPtr query = args[i]->children.at(0);
+ if (query->as())
+ {
+ input_queries.emplace_back(std::move(query));
+ }
+ else
+ {
+ throw Exception(
+ ErrorCodes::UNSUPPORTED_METHOD,
+ "Table function '{}' argument is invalid {}",
+ getName(),
+ args[i]->formatForErrorMessage());
+ }
+ }
}
}
@@ -79,6 +94,8 @@ StoragePtr TableFunctionExecutable::executeImpl(const ASTPtr & /*ast_function*/,
ExecutableSettings settings;
settings.script_name = script_name;
settings.script_arguments = arguments;
+ if (settings_query != nullptr)
+ settings.applyChanges(settings_query->as()->changes);
auto storage = std::make_shared(storage_id, format, settings, input_queries, getActualTableStructure(context), ConstraintsDescription{});
storage->startup();
diff --git a/src/TableFunctions/TableFunctionExecutable.h b/src/TableFunctions/TableFunctionExecutable.h
index 128ee8e46fc..820da077ca2 100644
--- a/src/TableFunctions/TableFunctionExecutable.h
+++ b/src/TableFunctions/TableFunctionExecutable.h
@@ -6,6 +6,7 @@ namespace DB
{
class Context;
+class ASTSetQuery;
/* executable(script_name_optional_arguments, format, structure, input_query) - creates a temporary storage from executable file
*
@@ -32,5 +33,6 @@ private:
String format;
String structure;
std::vector input_queries;
+ ASTPtr settings_query = nullptr;
};
}
diff --git a/tests/ci/build_check.py b/tests/ci/build_check.py
index 3976e2ba916..488fd1bbb34 100644
--- a/tests/ci/build_check.py
+++ b/tests/ci/build_check.py
@@ -39,7 +39,7 @@ def _can_export_binaries(build_config: BuildConfig) -> bool:
return False
if build_config["bundled"] != "bundled":
return False
- if build_config["splitted"] == "splitted":
+ if build_config["libraries"] == "shared":
return False
if build_config["sanitizer"] != "":
return True
@@ -68,8 +68,8 @@ def get_packager_cmd(
cmd += f" --build-type={build_config['build_type']}"
if build_config["sanitizer"]:
cmd += f" --sanitizer={build_config['sanitizer']}"
- if build_config["splitted"] == "splitted":
- cmd += " --split-binary"
+ if build_config["libraries"] == "shared":
+ cmd += " --shared-libraries"
if build_config["tidy"] == "enable":
cmd += " --clang-tidy"
diff --git a/tests/ci/build_report_check.py b/tests/ci/build_report_check.py
index 4bb7a619b9f..4246673de3e 100644
--- a/tests/ci/build_report_check.py
+++ b/tests/ci/build_report_check.py
@@ -37,7 +37,7 @@ class BuildResult:
build_type,
sanitizer,
bundled,
- splitted,
+ libraries,
status,
elapsed_seconds,
with_coverage,
@@ -46,7 +46,7 @@ class BuildResult:
self.build_type = build_type
self.sanitizer = sanitizer
self.bundled = bundled
- self.splitted = splitted
+ self.libraries = libraries
self.status = status
self.elapsed_seconds = elapsed_seconds
self.with_coverage = with_coverage
@@ -91,7 +91,7 @@ def get_failed_report(
build_type="unknown",
sanitizer="unknown",
bundled="unknown",
- splitted="unknown",
+ libraries="unknown",
status=message,
elapsed_seconds=0,
with_coverage=False,
@@ -108,7 +108,7 @@ def process_report(
build_type=build_config["build_type"],
sanitizer=build_config["sanitizer"],
bundled=build_config["bundled"],
- splitted=build_config["splitted"],
+ libraries=build_config["libraries"],
status="success" if build_report["status"] else "failure",
elapsed_seconds=build_report["elapsed_seconds"],
with_coverage=False,
diff --git a/tests/ci/cherry_pick.py b/tests/ci/cherry_pick.py
index 334a24ed7af..5382106d26b 100644
--- a/tests/ci/cherry_pick.py
+++ b/tests/ci/cherry_pick.py
@@ -206,7 +206,8 @@ Merge it only if you intend to backport changes to the target branch, otherwise
)
self.cherrypick_pr.add_to_labels(Labels.LABEL_CHERRYPICK)
self.cherrypick_pr.add_to_labels(Labels.LABEL_DO_NOT_TEST)
- self.cherrypick_pr.add_to_assignees(self.pr.assignee)
+ if self.pr.assignee is not None:
+ self.cherrypick_pr.add_to_assignees(self.pr.assignee)
self.cherrypick_pr.add_to_assignees(self.pr.user)
def create_backport(self):
@@ -238,7 +239,8 @@ Merge it only if you intend to backport changes to the target branch, otherwise
head=self.backport_branch,
)
self.backport_pr.add_to_labels(Labels.LABEL_BACKPORT)
- self.backport_pr.add_to_assignees(self.pr.assignee)
+ if self.pr.assignee is not None:
+ self.cherrypick_pr.add_to_assignees(self.pr.assignee)
self.backport_pr.add_to_assignees(self.pr.user)
@property
diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py
index 10db5d05ad4..5f2832156cb 100644
--- a/tests/ci/ci_config.py
+++ b/tests/ci/ci_config.py
@@ -14,7 +14,7 @@ CI_CONFIG = {
"package_type": "deb",
"static_binary_name": "amd64",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"additional_pkgs": True,
"tidy": "disable",
"with_coverage": False,
@@ -25,7 +25,7 @@ CI_CONFIG = {
"sanitizer": "",
"package_type": "coverity",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"tidy": "disable",
"with_coverage": False,
"official": False,
@@ -37,7 +37,7 @@ CI_CONFIG = {
"package_type": "deb",
"static_binary_name": "aarch64",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"additional_pkgs": True,
"tidy": "disable",
"with_coverage": False,
@@ -48,7 +48,7 @@ CI_CONFIG = {
"sanitizer": "address",
"package_type": "deb",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"tidy": "disable",
"with_coverage": False,
},
@@ -58,7 +58,7 @@ CI_CONFIG = {
"sanitizer": "undefined",
"package_type": "deb",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"tidy": "disable",
"with_coverage": False,
},
@@ -68,7 +68,7 @@ CI_CONFIG = {
"sanitizer": "thread",
"package_type": "deb",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"tidy": "disable",
"with_coverage": False,
},
@@ -78,7 +78,7 @@ CI_CONFIG = {
"sanitizer": "memory",
"package_type": "deb",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"tidy": "disable",
"with_coverage": False,
},
@@ -88,7 +88,7 @@ CI_CONFIG = {
"sanitizer": "",
"package_type": "deb",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"tidy": "disable",
"with_coverage": False,
},
@@ -98,7 +98,7 @@ CI_CONFIG = {
"sanitizer": "",
"package_type": "binary",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"tidy": "disable",
"with_coverage": False,
},
@@ -109,17 +109,17 @@ CI_CONFIG = {
"package_type": "binary",
"static_binary_name": "debug-amd64",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"tidy": "enable",
"with_coverage": False,
},
- "binary_splitted": {
+ "binary_shared": {
"compiler": "clang-14",
"build_type": "",
"sanitizer": "",
"package_type": "binary",
"bundled": "bundled",
- "splitted": "splitted",
+ "libraries": "shared",
"tidy": "disable",
"with_coverage": False,
},
@@ -130,7 +130,7 @@ CI_CONFIG = {
"package_type": "binary",
"static_binary_name": "macos",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"tidy": "disable",
"with_coverage": False,
},
@@ -140,7 +140,7 @@ CI_CONFIG = {
"sanitizer": "",
"package_type": "binary",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"tidy": "disable",
"with_coverage": False,
},
@@ -151,7 +151,7 @@ CI_CONFIG = {
"package_type": "binary",
"static_binary_name": "freebsd",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"tidy": "disable",
"with_coverage": False,
},
@@ -162,7 +162,7 @@ CI_CONFIG = {
"package_type": "binary",
"static_binary_name": "macos-aarch64",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"tidy": "disable",
"with_coverage": False,
},
@@ -173,7 +173,7 @@ CI_CONFIG = {
"package_type": "binary",
"static_binary_name": "powerpc64le",
"bundled": "bundled",
- "splitted": "unsplitted",
+ "libraries": "static",
"tidy": "disable",
"with_coverage": False,
},
@@ -192,7 +192,7 @@ CI_CONFIG = {
],
"ClickHouse special build check": [
"binary_tidy",
- "binary_splitted",
+ "binary_shared",
"binary_darwin",
"binary_aarch64",
"binary_freebsd",
@@ -297,7 +297,7 @@ CI_CONFIG = {
"required_build": "package_release",
},
"Split build smoke test": {
- "required_build": "binary_splitted",
+ "required_build": "binary_shared",
},
"Unit tests (release-clang)": {
"required_build": "binary_release",
diff --git a/tests/ci/clickhouse_helper.py b/tests/ci/clickhouse_helper.py
index c595dc559df..a81334860d1 100644
--- a/tests/ci/clickhouse_helper.py
+++ b/tests/ci/clickhouse_helper.py
@@ -32,7 +32,18 @@ class ClickHouseHelper:
}
for i in range(5):
- response = requests.post(url, params=params, data=json_str, headers=auth)
+ try:
+ response = requests.post(
+ url, params=params, data=json_str, headers=auth
+ )
+ except Exception as e:
+ logging.warning(
+ "Received exception while sending data to %s on %s attempt: %s",
+ url,
+ i,
+ e,
+ )
+ continue
logging.info("Response content '%s'", response.content)
diff --git a/tests/ci/report.py b/tests/ci/report.py
index 83d89e628a2..d924be5885b 100644
--- a/tests/ci/report.py
+++ b/tests/ci/report.py
@@ -290,7 +290,7 @@ tr:hover td {{filter: brightness(95%);}}
Build type |
Sanitizer |
Bundled |
-Splitted |
+Libraries |
Status |
Build log |
Build time |
@@ -335,7 +335,7 @@ def create_build_html_report(
row += "{} | ".format("none")
row += "{} | ".format(build_result.bundled)
- row += "{} | ".format(build_result.splitted)
+ row += "{} | ".format(build_result.libraries)
if build_result.status:
style = _get_status_style(build_result.status)
diff --git a/tests/integration/parallel_skip.json b/tests/integration/parallel_skip.json
index 8587e8d4120..b5de1d67cbc 100644
--- a/tests/integration/parallel_skip.json
+++ b/tests/integration/parallel_skip.json
@@ -46,5 +46,19 @@
"test_storage_s3/test.py::test_url_reconnect_in_the_middle",
"test_system_metrics/test.py::test_readonly_metrics",
"test_system_replicated_fetches/test.py::test_system_replicated_fetches",
- "test_zookeeper_config_load_balancing/test.py::test_round_robin"
+ "test_zookeeper_config_load_balancing/test.py::test_round_robin",
+
+ "test_tlsv1_3/test.py::test_https",
+ "test_tlsv1_3/test.py::test_https_wrong_cert",
+ "test_tlsv1_3/test.py::test_https_non_ssl_auth",
+ "test_tlsv1_3/test.py::test_create_user",
+ "test_user_ip_restrictions/test.py::test_ipv4",
+ "test_user_ip_restrictions/test.py::test_ipv6",
+ "test_ssl_cert_authentication/test.py::test_https",
+ "test_ssl_cert_authentication/test.py::test_https_wrong_cert",
+ "test_ssl_cert_authentication/test.py::test_https_non_ssl_auth",
+ "test_ssl_cert_authentication/test.py::test_create_user",
+ "test_grpc_protocol_ssl/test.py::test_secure_channel",
+ "test_grpc_protocol_ssl/test.py::test_insecure_channel",
+ "test_grpc_protocol_ssl/test.py::test_wrong_client_certificate"
]
diff --git a/tests/integration/test_executable_table_function/test.py b/tests/integration/test_executable_table_function/test.py
index e3ac11eef87..801a3c7c14a 100644
--- a/tests/integration/test_executable_table_function/test.py
+++ b/tests/integration/test_executable_table_function/test.py
@@ -163,6 +163,19 @@ def test_executable_function_input_multiple_pipes_python(started_cluster):
assert actual == expected
+def test_executable_function_input_slow_python_timeout_increased(started_cluster):
+ skip_test_msan(node)
+ query = "SELECT * FROM executable('input_slow.py', 'TabSeparated', 'value String', {source}, SETTINGS {settings})"
+ settings = "command_termination_timeout = 26, command_read_timeout = 26000, command_write_timeout = 26000"
+ assert node.query(query.format(source="(SELECT 1)", settings=settings)) == "Key 1\n"
+ assert (
+ node.query(
+ query.format(source="(SELECT id FROM test_data_table)", settings=settings)
+ )
+ == "Key 0\nKey 1\nKey 2\n"
+ )
+
+
def test_executable_storage_no_input_bash(started_cluster):
skip_test_msan(node)
node.query("DROP TABLE IF EXISTS test_table")
diff --git a/tests/integration/test_grpc_protocol_ssl/test.py b/tests/integration/test_grpc_protocol_ssl/test.py
index 60c3ccd7a9d..3cd08e3cd6a 100644
--- a/tests/integration/test_grpc_protocol_ssl/test.py
+++ b/tests/integration/test_grpc_protocol_ssl/test.py
@@ -5,7 +5,8 @@ import grpc
from helpers.cluster import ClickHouseCluster, run_and_check
GRPC_PORT = 9100
-NODE_IP = "10.5.172.77" # It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf).
+# It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf).
+NODE_IP = "10.5.172.77" # Never copy-paste this line
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
DEFAULT_ENCODING = "utf-8"
diff --git a/tests/integration/test_part_moves_between_shards/test.py b/tests/integration/test_part_moves_between_shards/test.py
index 1dbe5324124..8fef44305ea 100644
--- a/tests/integration/test_part_moves_between_shards/test.py
+++ b/tests/integration/test_part_moves_between_shards/test.py
@@ -165,10 +165,7 @@ def test_deduplication_while_move(started_cluster):
assert TSV(
n.query(
"SELECT count() FROM test_deduplication_d",
- settings={
- "allow_experimental_query_deduplication": 1,
- "allow_experimental_projection_optimization": 1,
- },
+ settings={"allow_experimental_query_deduplication": 1},
)
) == TSV("2")
diff --git a/tests/integration/test_ssl_cert_authentication/test.py b/tests/integration/test_ssl_cert_authentication/test.py
index 74bd08e9b35..d210c3e58cc 100644
--- a/tests/integration/test_ssl_cert_authentication/test.py
+++ b/tests/integration/test_ssl_cert_authentication/test.py
@@ -5,7 +5,8 @@ import ssl
import os.path
HTTPS_PORT = 8443
-NODE_IP = "10.5.172.77" # It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf).
+# It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf).
+NODE_IP = "10.5.172.77" # Never copy-paste this line
NODE_IP_WITH_HTTPS_PORT = NODE_IP + ":" + str(HTTPS_PORT)
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
diff --git a/tests/queries/0_stateless/02337_join_analyze_stuck.reference b/tests/integration/test_tcp_handler_interserver_listen_host/__init__.py
similarity index 100%
rename from tests/queries/0_stateless/02337_join_analyze_stuck.reference
rename to tests/integration/test_tcp_handler_interserver_listen_host/__init__.py
diff --git a/tests/integration/test_tcp_handler_interserver_listen_host/configs/config.d/interserver-listen-host.xml b/tests/integration/test_tcp_handler_interserver_listen_host/configs/config.d/interserver-listen-host.xml
new file mode 100644
index 00000000000..52d45e7c956
--- /dev/null
+++ b/tests/integration/test_tcp_handler_interserver_listen_host/configs/config.d/interserver-listen-host.xml
@@ -0,0 +1,3 @@
+
+ 10.0.0.10
+
diff --git a/tests/integration/test_tcp_handler_interserver_listen_host/configs/config.d/no-interserver-listen-host.xml b/tests/integration/test_tcp_handler_interserver_listen_host/configs/config.d/no-interserver-listen-host.xml
new file mode 100644
index 00000000000..2e7e1a83aa1
--- /dev/null
+++ b/tests/integration/test_tcp_handler_interserver_listen_host/configs/config.d/no-interserver-listen-host.xml
@@ -0,0 +1,3 @@
+
+
+
diff --git a/tests/integration/test_tcp_handler_interserver_listen_host/test_case.py b/tests/integration/test_tcp_handler_interserver_listen_host/test_case.py
new file mode 100644
index 00000000000..44df1c369cf
--- /dev/null
+++ b/tests/integration/test_tcp_handler_interserver_listen_host/test_case.py
@@ -0,0 +1,55 @@
+"""Test Interserver responses on configured IP."""
+from pathlib import Path
+import pytest
+from helpers.cluster import ClickHouseCluster
+import requests
+import socket
+import time
+
+cluster = ClickHouseCluster(__file__)
+
+INTERSERVER_LISTEN_HOST = "10.0.0.10"
+INTERSERVER_HTTP_PORT = 9009
+
+node_with_interserver_listen_host = cluster.add_instance(
+ "node_with_interserver_listen_host",
+ main_configs=["configs/config.d/interserver-listen-host.xml"],
+ ipv4_address=INTERSERVER_LISTEN_HOST, # used to configure acc. interface in test container
+ ipv6_address="2001:3984:3989::1:1000",
+)
+
+node_without_interserver_listen_host = cluster.add_instance(
+ "node_without_interserver_listen_host",
+ main_configs=["configs/config.d/no-interserver-listen-host.xml"],
+ ipv6_address="2001:3984:3989::2:1000",
+)
+
+
+@pytest.fixture(scope="module")
+def start_cluster():
+ try:
+ cluster.start()
+ yield cluster
+
+ finally:
+ cluster.shutdown()
+
+
+def test_request_to_node_with_interserver_listen_host(start_cluster):
+ time.sleep(5) # waiting for interserver listener to start
+ response_interserver = requests.get(
+ f"http://{INTERSERVER_LISTEN_HOST}:{INTERSERVER_HTTP_PORT}"
+ )
+ response_client = requests.get(
+ f"http://{node_without_interserver_listen_host.ip_address}:8123"
+ )
+ assert response_interserver.status_code == 200
+ assert "Ok." in response_interserver.text
+ assert response_client.status_code == 200
+
+
+def test_request_to_node_without_interserver_listen_host(start_cluster):
+ response = requests.get(
+ f"http://{node_without_interserver_listen_host.ip_address}:{INTERSERVER_HTTP_PORT}"
+ )
+ assert response.status_code == 200
diff --git a/tests/integration/test_tlsv1_3/test.py b/tests/integration/test_tlsv1_3/test.py
index 80c9c68eca7..e93774cf11e 100644
--- a/tests/integration/test_tlsv1_3/test.py
+++ b/tests/integration/test_tlsv1_3/test.py
@@ -5,7 +5,8 @@ import ssl
import os.path
HTTPS_PORT = 8443
-NODE_IP = "10.5.172.77" # It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf).
+# It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf).
+NODE_IP = "10.5.172.77" # Never copy-paste this line
NODE_IP_WITH_HTTPS_PORT = NODE_IP + ":" + str(HTTPS_PORT)
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
diff --git a/tests/integration/test_user_ip_restrictions/test.py b/tests/integration/test_user_ip_restrictions/test.py
index e41febfa2f5..f38064e50d0 100644
--- a/tests/integration/test_user_ip_restrictions/test.py
+++ b/tests/integration/test_user_ip_restrictions/test.py
@@ -8,47 +8,56 @@ node_ipv4 = cluster.add_instance(
"node_ipv4",
main_configs=[],
user_configs=["configs/users_ipv4.xml"],
- ipv4_address="10.5.172.77",
+ ipv4_address="10.5.172.77", # Never copy-paste this line
)
client_ipv4_ok = cluster.add_instance(
- "client_ipv4_ok", main_configs=[], user_configs=[], ipv4_address="10.5.172.10"
+ "client_ipv4_ok",
+ main_configs=[],
+ user_configs=[],
+ ipv4_address="10.5.172.10", # Never copy-paste this line
)
client_ipv4_ok_direct = cluster.add_instance(
- "client_ipv4_ok_direct", main_configs=[], user_configs=[], ipv4_address="10.5.173.1"
+ "client_ipv4_ok_direct",
+ main_configs=[],
+ user_configs=[],
+ ipv4_address="10.5.173.1", # Never copy-paste this line
)
client_ipv4_ok_full_mask = cluster.add_instance(
"client_ipv4_ok_full_mask",
main_configs=[],
user_configs=[],
- ipv4_address="10.5.175.77",
+ ipv4_address="10.5.175.77", # Never copy-paste this line
)
client_ipv4_bad = cluster.add_instance(
- "client_ipv4_bad", main_configs=[], user_configs=[], ipv4_address="10.5.173.10"
+ "client_ipv4_bad",
+ main_configs=[],
+ user_configs=[],
+ ipv4_address="10.5.173.10", # Never copy-paste this line
)
node_ipv6 = cluster.add_instance(
"node_ipv6",
main_configs=["configs/config_ipv6.xml"],
user_configs=["configs/users_ipv6.xml"],
- ipv6_address="2001:3984:3989::1:1000",
+ ipv6_address="2001:3984:3989::1:1000", # Never copy-paste this line
)
client_ipv6_ok = cluster.add_instance(
"client_ipv6_ok",
main_configs=[],
user_configs=[],
- ipv6_address="2001:3984:3989::5555",
+ ipv6_address="2001:3984:3989::5555", # Never copy-paste this line
)
client_ipv6_ok_direct = cluster.add_instance(
"client_ipv6_ok_direct",
main_configs=[],
user_configs=[],
- ipv6_address="2001:3984:3989::1:1111",
+ ipv6_address="2001:3984:3989::1:1111", # Never copy-paste this line
)
client_ipv6_bad = cluster.add_instance(
"client_ipv6_bad",
main_configs=[],
user_configs=[],
- ipv6_address="2001:3984:3989::1:1112",
+ ipv6_address="2001:3984:3989::1:1112", # Never copy-paste this line
)
diff --git a/tests/queries/0_stateless/01881_union_header_mismatch_bug.sql b/tests/queries/0_stateless/01881_union_header_mismatch_bug.sql
index 9a220ffd49f..bf8749fb046 100644
--- a/tests/queries/0_stateless/01881_union_header_mismatch_bug.sql
+++ b/tests/queries/0_stateless/01881_union_header_mismatch_bug.sql
@@ -28,3 +28,8 @@ WHERE number IN
SELECT number
FROM numbers(5)
) order by label, number;
+
+SELECT NULL FROM
+(SELECT [1048575, NULL] AS ax, 2147483648 AS c) t1 ARRAY JOIN ax
+INNER JOIN (SELECT NULL AS c) t2 USING (c);
+
diff --git a/tests/queries/0_stateless/02232_dist_insert_send_logs_level_hung.sh b/tests/queries/0_stateless/02232_dist_insert_send_logs_level_hung.sh
index 322e7e73991..a8dce5cb516 100755
--- a/tests/queries/0_stateless/02232_dist_insert_send_logs_level_hung.sh
+++ b/tests/queries/0_stateless/02232_dist_insert_send_logs_level_hung.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Tags: long, no-parallel
+# Tags: disabled
# Tag: no-parallel - to heavy
# Tag: long - to heavy
@@ -33,14 +33,11 @@ $CLICKHOUSE_CLIENT "${client_opts[@]}" -nm -q "
create materialized view mv_02232 to out_02232 as select * from in_02232;
"
-# 600 is the default timeout of clickhouse-test, and 30 is just a safe padding,
-# to avoid hung query check triggering
-insert_timeout=$((600-30))
-# Increase timeouts to avoid timeout during trying to send Log packet to
-# the remote side, when the socket is full.
insert_client_opts=(
- --send_timeout "$insert_timeout"
- --receive_timeout "$insert_timeout"
+ # Increase timeouts to avoid timeout during trying to send Log packet to
+ # the remote side, when the socket is full.
+ --send_timeout 86400
+ --receive_timeout 86400
)
# 250 seconds is enough to trigger the query hung (even in debug build)
#
diff --git a/tests/queries/0_stateless/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order.reference b/tests/queries/0_stateless/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order.reference
index 0a83fa24d49..b983dc681ec 100644
--- a/tests/queries/0_stateless/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order.reference
+++ b/tests/queries/0_stateless/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order.reference
@@ -1,11 +1,11 @@
-- { echoOn }
-select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1, optimize_aggregation_in_order=0, optimize_read_in_order=0;
+select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings optimize_aggregation_in_order=0, optimize_read_in_order=0;
15 480
14 450
13 420
12 390
11 360
-select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1, optimize_aggregation_in_order=1, optimize_read_in_order=1;
+select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings optimize_aggregation_in_order=1, optimize_read_in_order=1;
15 480
14 450
13 420
diff --git a/tests/queries/0_stateless/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order.sql b/tests/queries/0_stateless/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order.sql
index be050cc3080..53d6892415b 100644
--- a/tests/queries/0_stateless/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order.sql
+++ b/tests/queries/0_stateless/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order.sql
@@ -6,8 +6,8 @@ create table test_agg_proj_02302 (x Int32, y Int32, PROJECTION x_plus_y (select
insert into test_agg_proj_02302 select intDiv(number, 2), -intDiv(number,3) - 1 from numbers(100);
-- { echoOn }
-select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1, optimize_aggregation_in_order=0, optimize_read_in_order=0;
-select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1, optimize_aggregation_in_order=1, optimize_read_in_order=1;
+select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings optimize_aggregation_in_order=0, optimize_read_in_order=0;
+select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings optimize_aggregation_in_order=1, optimize_read_in_order=1;
-- { echoOff }
drop table test_agg_proj_02302;
diff --git a/tests/queries/0_stateless/02317_distinct_in_order_optimization.reference b/tests/queries/0_stateless/02317_distinct_in_order_optimization.reference
index efc9e28bcce..d827dc33838 100644
--- a/tests/queries/0_stateless/02317_distinct_in_order_optimization.reference
+++ b/tests/queries/0_stateless/02317_distinct_in_order_optimization.reference
@@ -85,11 +85,11 @@ select distinct 1 as x, 2 as y from distinct_in_order order by x;
1 2
select distinct 1 as x, 2 as y from distinct_in_order order by x, y;
1 2
-select distinct a, 1 as x from distinct_in_order order by x;
+select a, x from (select distinct a, 1 as x from distinct_in_order order by x) order by a;
0 1
select distinct a, 1 as x, 2 as y from distinct_in_order order by a;
0 1 2
-select distinct a, b, 1 as x, 2 as y from distinct_in_order order by a;
+select a, b, x, y from(select distinct a, b, 1 as x, 2 as y from distinct_in_order order by a) order by a, b;
0 0 1 2
0 1 1 2
0 2 1 2
@@ -97,10 +97,10 @@ select distinct a, b, 1 as x, 2 as y from distinct_in_order order by a;
0 4 1 2
select distinct x, y from (select 1 as x, 2 as y from distinct_in_order order by x) order by y;
1 2
-select distinct a, b, x, y from (select a, b, 1 as x, 2 as y from distinct_in_order order by a) order by b;
+select distinct a, b, x, y from (select a, b, 1 as x, 2 as y from distinct_in_order order by a) order by a, b;
0 0 1 2
0 1 1 2
0 2 1 2
0 3 1 2
0 4 1 2
--- check that distinct in order has the same result as ordinary distinct
+-- check that distinct in order returns the same result as ordinary distinct
diff --git a/tests/queries/0_stateless/02317_distinct_in_order_optimization.sql b/tests/queries/0_stateless/02317_distinct_in_order_optimization.sql
index 7a70e2ef873..0fb3766edb4 100644
--- a/tests/queries/0_stateless/02317_distinct_in_order_optimization.sql
+++ b/tests/queries/0_stateless/02317_distinct_in_order_optimization.sql
@@ -48,16 +48,16 @@ select '-- distinct with constants columns';
select distinct 1 as x, 2 as y from distinct_in_order;
select distinct 1 as x, 2 as y from distinct_in_order order by x;
select distinct 1 as x, 2 as y from distinct_in_order order by x, y;
-select distinct a, 1 as x from distinct_in_order order by x;
+select a, x from (select distinct a, 1 as x from distinct_in_order order by x) order by a;
select distinct a, 1 as x, 2 as y from distinct_in_order order by a;
-select distinct a, b, 1 as x, 2 as y from distinct_in_order order by a;
+select a, b, x, y from(select distinct a, b, 1 as x, 2 as y from distinct_in_order order by a) order by a, b;
select distinct x, y from (select 1 as x, 2 as y from distinct_in_order order by x) order by y;
-select distinct a, b, x, y from (select a, b, 1 as x, 2 as y from distinct_in_order order by a) order by b;
+select distinct a, b, x, y from (select a, b, 1 as x, 2 as y from distinct_in_order order by a) order by a, b;
-- { echoOff }
drop table if exists distinct_in_order sync;
-select '-- check that distinct in order has the same result as ordinary distinct';
+select '-- check that distinct in order returns the same result as ordinary distinct';
drop table if exists distinct_cardinality_low sync;
CREATE TABLE distinct_cardinality_low (low UInt64, medium UInt64, high UInt64) ENGINE MergeTree() ORDER BY (low, medium);
INSERT INTO distinct_cardinality_low SELECT number % 1e1, number % 1e2, number % 1e3 FROM numbers_mt(1e4);
diff --git a/tests/queries/0_stateless/02337_join_analyze_stuck.sql b/tests/queries/0_stateless/02337_join_analyze_stuck.sql
deleted file mode 100644
index 9bdc418f028..00000000000
--- a/tests/queries/0_stateless/02337_join_analyze_stuck.sql
+++ /dev/null
@@ -1,15 +0,0 @@
--- Tags: long
-
--- https://github.com/ClickHouse/ClickHouse/issues/21557
-
-SET max_pipeline_depth = 1000;
-
-EXPLAIN SYNTAX
-WITH
- x AS ( SELECT number FROM numbers(10) ),
- cross_sales AS (
- SELECT 1 AS xx
- FROM x, x AS d1, x AS d2, x AS d3, x AS d4, x AS d5, x AS d6, x AS d7, x AS d8, x AS d9
- WHERE x.number = d9.number
- )
-SELECT xx FROM cross_sales WHERE xx = 2000; -- { serverError TOO_DEEP_PIPELINE }
diff --git a/tests/queries/0_stateless/02360_send_logs_level_colors.reference b/tests/queries/0_stateless/02360_send_logs_level_colors.reference
index 1c30d50f5c0..fe2824243c4 100644
--- a/tests/queries/0_stateless/02360_send_logs_level_colors.reference
+++ b/tests/queries/0_stateless/02360_send_logs_level_colors.reference
@@ -1,2 +1,3 @@
ASCII text
ASCII text
+ASCII text
diff --git a/tests/queries/0_stateless/02360_send_logs_level_colors.sh b/tests/queries/0_stateless/02360_send_logs_level_colors.sh
index eaa294cebe4..4e5ce057702 100755
--- a/tests/queries/0_stateless/02360_send_logs_level_colors.sh
+++ b/tests/queries/0_stateless/02360_send_logs_level_colors.sh
@@ -26,8 +26,6 @@ EOF
run "$CLICKHOUSE_CLIENT -q 'SELECT 1' 2>$file_name"
run "$CLICKHOUSE_CLIENT -q 'SELECT 1' --server_logs_file=$file_name"
-
-# This query may fail due to bug in clickhouse-client.
-# run "$CLICKHOUSE_CLIENT -q 'SELECT 1' --server_logs_file=- >$file_name"
+run "$CLICKHOUSE_CLIENT -q 'SELECT 1' --server_logs_file=- >$file_name"
rm -f "$file_name"
diff --git a/tests/queries/0_stateless/02361_fsync_profile_events.reference b/tests/queries/0_stateless/02361_fsync_profile_events.reference
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/queries/0_stateless/02361_fsync_profile_events.sh b/tests/queries/0_stateless/02361_fsync_profile_events.sh
new file mode 100755
index 00000000000..d54da9a49e5
--- /dev/null
+++ b/tests/queries/0_stateless/02361_fsync_profile_events.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+# Tags: no-s3-storage
+# Tag no-s3-storage: s3 does not have fsync
+
+CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
+# shellcheck source=../shell_config.sh
+. "$CUR_DIR"/../shell_config.sh
+
+$CLICKHOUSE_CLIENT -nm -q "
+ drop table if exists data_fsync_pe;
+
+ create table data_fsync_pe (key Int) engine=MergeTree()
+ order by key
+ settings
+ min_rows_for_wide_part=2,
+ fsync_after_insert=1,
+ fsync_part_directory=1;
+"
+
+ret=1
+# Retry in case of fsync/fdatasync was too fast
+# (FileSyncElapsedMicroseconds/DirectorySyncElapsedMicroseconds was 0)
+for i in {1..100}; do
+ query_id="insert-$i-$CLICKHOUSE_DATABASE"
+
+ $CLICKHOUSE_CLIENT --query_id "$query_id" -q "insert into data_fsync_pe values (1)"
+
+ read -r FileSync FileOpen DirectorySync FileSyncElapsedMicroseconds DirectorySyncElapsedMicroseconds <<<"$(
+ $CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "
+ system flush logs;
+
+ select
+ ProfileEvents['FileSync'],
+ ProfileEvents['FileOpen'],
+ ProfileEvents['DirectorySync'],
+ ProfileEvents['FileSyncElapsedMicroseconds']>0,
+ ProfileEvents['DirectorySyncElapsedMicroseconds']>0
+ from system.query_log
+ where
+ event_date >= yesterday() and
+ current_database = currentDatabase() and
+ query_id = {query_id:String} and
+ type = 'QueryFinish';
+ ")"
+
+ # Non retriable errors
+ if [[ $FileSync -ne 7 ]]; then
+ exit 2
+ fi
+ # Check that all files was synced
+ if [[ $FileSync -ne $FileOpen ]]; then
+ exit 3
+ fi
+ if [[ $DirectorySync -ne 2 ]]; then
+ exit 4
+ fi
+
+ # Retriable errors
+ if [[ $FileSyncElapsedMicroseconds -eq 0 ]]; then
+ continue
+ fi
+ if [[ $DirectorySyncElapsedMicroseconds -eq 0 ]]; then
+ continue
+ fi
+
+ # Everything is OK
+ ret=0
+ break
+done
+
+$CLICKHOUSE_CLIENT -q "drop table data_fsync_pe"
+
+exit $ret
diff --git a/tests/queries/0_stateless/02374_in_tuple_index.reference b/tests/queries/0_stateless/02374_in_tuple_index.reference
new file mode 100644
index 00000000000..51993f072d5
--- /dev/null
+++ b/tests/queries/0_stateless/02374_in_tuple_index.reference
@@ -0,0 +1,2 @@
+2
+2
diff --git a/tests/queries/0_stateless/02374_in_tuple_index.sql b/tests/queries/0_stateless/02374_in_tuple_index.sql
new file mode 100644
index 00000000000..7f9b7b5470e
--- /dev/null
+++ b/tests/queries/0_stateless/02374_in_tuple_index.sql
@@ -0,0 +1,23 @@
+DROP TABLE IF EXISTS t_in_tuple_index;
+
+CREATE TABLE t_in_tuple_index
+(
+ `ID` String,
+ `USER_ID` String,
+ `PLATFORM` LowCardinality(String)
+)
+ENGINE = MergeTree()
+ORDER BY (PLATFORM, USER_ID, ID)
+SETTINGS index_granularity = 2048;
+
+INSERT INTO t_in_tuple_index VALUES ('1', 33, 'insta'), ('2', 33, 'insta');
+
+SELECT count()
+FROM t_in_tuple_index
+WHERE (PLATFORM, USER_ID) IN (('insta', '33'));
+
+SELECT count()
+FROM t_in_tuple_index
+WHERE (PLATFORM, USER_ID) IN (('insta', '33'), ('insta', '22'));
+
+DROP TABLE IF EXISTS t_in_tuple_index;