mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' into jepsen-label
This commit is contained in:
commit
fd8ad12e6b
14
.github/workflows/master.yml
vendored
14
.github/workflows/master.yml
vendored
@ -151,8 +151,8 @@ jobs:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
SplitBuildSmokeTest:
|
||||
needs: [BuilderDebSplitted]
|
||||
SharedBuildSmokeTest:
|
||||
needs: [BuilderDebShared]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -171,7 +171,7 @@ jobs:
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Split build check
|
||||
- name: Shared build check
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
@ -598,7 +598,7 @@ jobs:
|
||||
##########################################################################################
|
||||
##################################### SPECIAL BUILDS #####################################
|
||||
##########################################################################################
|
||||
BuilderDebSplitted:
|
||||
BuilderDebShared:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
@ -609,7 +609,7 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_splitted
|
||||
BUILD_NAME=binary_shared
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1012,7 +1012,7 @@ jobs:
|
||||
# - BuilderBinGCC
|
||||
- BuilderBinPPC64
|
||||
- BuilderBinClangTidy
|
||||
- BuilderDebSplitted
|
||||
- BuilderDebShared
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -3153,7 +3153,7 @@ jobs:
|
||||
- UnitTestsMsan
|
||||
- UnitTestsUBsan
|
||||
- UnitTestsReleaseClang
|
||||
- SplitBuildSmokeTest
|
||||
- SharedBuildSmokeTest
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Clear repository
|
||||
|
14
.github/workflows/pull_request.yml
vendored
14
.github/workflows/pull_request.yml
vendored
@ -216,8 +216,8 @@ jobs:
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
SplitBuildSmokeTest:
|
||||
needs: [BuilderDebSplitted]
|
||||
SharedBuildSmokeTest:
|
||||
needs: [BuilderDebShared]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
@ -236,7 +236,7 @@ jobs:
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Split build check
|
||||
- name: Shared build check
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
@ -620,7 +620,7 @@ jobs:
|
||||
##########################################################################################
|
||||
##################################### SPECIAL BUILDS #####################################
|
||||
##########################################################################################
|
||||
BuilderDebSplitted:
|
||||
BuilderDebShared:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
@ -631,7 +631,7 @@ jobs:
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_splitted
|
||||
BUILD_NAME=binary_shared
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v2
|
||||
@ -1024,7 +1024,7 @@ jobs:
|
||||
# - BuilderBinGCC
|
||||
- BuilderBinPPC64
|
||||
- BuilderBinClangTidy
|
||||
- BuilderDebSplitted
|
||||
- BuilderDebShared
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: ${{ success() || failure() }}
|
||||
steps:
|
||||
@ -3420,7 +3420,7 @@ jobs:
|
||||
- UnitTestsMsan
|
||||
- UnitTestsUBsan
|
||||
- UnitTestsReleaseClang
|
||||
- SplitBuildSmokeTest
|
||||
- SharedBuildSmokeTest
|
||||
- CompatibilityCheck
|
||||
- IntegrationTestsFlakyCheck
|
||||
runs-on: [self-hosted, style-checker]
|
||||
|
@ -15,4 +15,4 @@ ClickHouse® is an open-source column-oriented database management system that a
|
||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||
|
||||
## Upcoming events
|
||||
* **v22.8 Release Webinar** Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
||||
* [**v22.8 Release Webinar**](https://clickhouse.com/company/events/v22-8-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
||||
|
2
contrib/arrow
vendored
2
contrib/arrow
vendored
@ -1 +1 @@
|
||||
Subproject commit 3e03c6de41a86df2fc54a61e9be1abaefeff6b0e
|
||||
Subproject commit efdcd015cfdee1b6aa349c9ca227ca12c3d697f5
|
@ -100,12 +100,12 @@ def run_docker_image_with_env(
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
|
||||
def is_release_build(build_type, package_type, sanitizer, split_binary):
|
||||
def is_release_build(build_type, package_type, sanitizer, shared_libraries):
|
||||
return (
|
||||
build_type == ""
|
||||
and package_type == "deb"
|
||||
and sanitizer == ""
|
||||
and not split_binary
|
||||
and not shared_libraries
|
||||
)
|
||||
|
||||
|
||||
@ -116,7 +116,7 @@ def parse_env_variables(
|
||||
package_type,
|
||||
cache,
|
||||
distcc_hosts,
|
||||
split_binary,
|
||||
shared_libraries,
|
||||
clang_tidy,
|
||||
version,
|
||||
author,
|
||||
@ -202,7 +202,7 @@ def parse_env_variables(
|
||||
cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var")
|
||||
if is_release_build(build_type, package_type, sanitizer, split_binary):
|
||||
if is_release_build(build_type, package_type, sanitizer, shared_libraries):
|
||||
cmake_flags.append("-DSPLIT_DEBUG_SYMBOLS=ON")
|
||||
result.append("WITH_PERFORMANCE=1")
|
||||
if is_cross_arm:
|
||||
@ -215,11 +215,11 @@ def parse_env_variables(
|
||||
cmake_flags.append(f"-DCMAKE_C_COMPILER={cc}")
|
||||
cmake_flags.append(f"-DCMAKE_CXX_COMPILER={cxx}")
|
||||
|
||||
# Create combined output archive for split build and for performance tests.
|
||||
# Create combined output archive for shared library build and for performance tests.
|
||||
if package_type == "coverity":
|
||||
result.append("COMBINED_OUTPUT=coverity")
|
||||
result.append('COVERITY_TOKEN="$COVERITY_TOKEN"')
|
||||
elif split_binary:
|
||||
elif shared_libraries:
|
||||
result.append("COMBINED_OUTPUT=shared_build")
|
||||
|
||||
if sanitizer:
|
||||
@ -264,13 +264,13 @@ def parse_env_variables(
|
||||
result.append("BINARY_OUTPUT=tests")
|
||||
cmake_flags.append("-DENABLE_TESTS=1")
|
||||
|
||||
if split_binary:
|
||||
if shared_libraries:
|
||||
cmake_flags.append(
|
||||
"-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1"
|
||||
)
|
||||
# We can't always build utils because it requires too much space, but
|
||||
# we have to build them at least in some way in CI. The split build is
|
||||
# probably the least heavy disk-wise.
|
||||
# we have to build them at least in some way in CI. The shared library
|
||||
# build is probably the least heavy disk-wise.
|
||||
cmake_flags.append("-DENABLE_UTILS=1")
|
||||
# utils are not included into clickhouse-bundle, so build everything
|
||||
build_target = "all"
|
||||
@ -351,7 +351,7 @@ if __name__ == "__main__":
|
||||
default="",
|
||||
)
|
||||
|
||||
parser.add_argument("--split-binary", action="store_true")
|
||||
parser.add_argument("--shared-libraries", action="store_true")
|
||||
parser.add_argument("--clang-tidy", action="store_true")
|
||||
parser.add_argument("--cache", choices=("ccache", "distcc", ""), default="")
|
||||
parser.add_argument(
|
||||
@ -404,7 +404,7 @@ if __name__ == "__main__":
|
||||
args.package_type,
|
||||
args.cache,
|
||||
args.distcc_hosts,
|
||||
args.split_binary,
|
||||
args.shared_libraries,
|
||||
args.clang_tidy,
|
||||
args.version,
|
||||
args.author,
|
||||
|
@ -267,6 +267,12 @@ The system will prepare ClickHouse binary builds for your pull request individua
|
||||
|
||||
Most probably some of the builds will fail at first times. This is due to the fact that we check builds both with gcc as well as with clang, with almost all of existing warnings (always with the `-Werror` flag) enabled for clang. On that same page, you can find all of the build logs so that you do not have to build ClickHouse in all of the possible ways.
|
||||
|
||||
## Browse ClickHouse Source Code {#browse-clickhouse-source-code}
|
||||
|
||||
You can use the **Woboq** online code browser available [here](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). It provides code navigation, semantic highlighting, search and indexing. The code snapshot is updated daily.
|
||||
|
||||
Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual.
|
||||
|
||||
## Faster builds for development: Split build configuration {#split-build}
|
||||
|
||||
ClickHouse is normally statically linked into a single static `clickhouse` binary with minimal dependencies. This is convenient for distribution, but it means that for every change the entire binary needs to be re-linked, which is slow and inconvenient for development. As an alternative, you can instead build dynamically linked shared libraries, allowing for faster incremental builds. To use it, add the following flags to your `cmake` invocation:
|
||||
|
@ -389,12 +389,6 @@ SETTINGS mutations_sync = 1;
|
||||
|
||||
Let's run the same 3 queries.
|
||||
|
||||
[Enable](../../operations/settings/settings.md#allow-experimental-projection-optimization) projections for selects:
|
||||
|
||||
```sql
|
||||
SET allow_experimental_projection_optimization = 1;
|
||||
```
|
||||
|
||||
### Query 1. Average Price Per Year {#average-price-projections}
|
||||
|
||||
Query:
|
||||
|
@ -438,6 +438,18 @@ For more information, see the section “[Configuration files](../../operations/
|
||||
<include_from>/etc/metrica.xml</include_from>
|
||||
```
|
||||
|
||||
## interserver_listen_host {#interserver-listen-host}
|
||||
|
||||
Restriction on hosts that can exchange data between ClickHouse servers.
|
||||
The default value equals to `listen_host` setting.
|
||||
|
||||
Examples:
|
||||
|
||||
``` xml
|
||||
<interserver_listen_host>::ffff:a00:1</interserver_listen_host>
|
||||
<interserver_listen_host>10.0.0.1</interserver_listen_host>
|
||||
```
|
||||
|
||||
## interserver_http_port {#interserver-http-port}
|
||||
|
||||
Port for exchanging data between ClickHouse servers.
|
||||
@ -970,7 +982,7 @@ Default value: 2.
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<background_merges_mutations_concurrency_ratio>3</background_pbackground_merges_mutations_concurrency_ratio>
|
||||
<background_merges_mutations_concurrency_ratio>3</background_merges_mutations_concurrency_ratio>
|
||||
```
|
||||
|
||||
## background_move_pool_size {#background_move_pool_size}
|
||||
|
@ -285,3 +285,9 @@ Pull request можно создать, даже если работа над з
|
||||
Система подготовит сборки ClickHouse специально для вашего pull request. Для их получения, нажмите на ссылку «Details» у проверки «Clickhouse build check». Там вы сможете найти прямые ссылки на собранные .deb пакеты ClickHouse, которые, при желании, вы даже сможете установить на свои продакшен серверы (если не страшно).
|
||||
|
||||
Вероятнее всего, часть сборок не будет успешной с первого раза. Ведь мы проверяем сборку кода и gcc и clang, а при сборке с помощью clang включаются почти все существующие в природе warnings (всегда с флагом `-Werror`). На той же странице, вы сможете найти логи сборки - вам не обязательно самому собирать ClickHouse всеми возможными способами.
|
||||
|
||||
## Навигация по коду ClickHouse {#navigatsiia-po-kodu-clickhouse}
|
||||
|
||||
Для навигации по коду онлайн доступен **Woboq**, он расположен [здесь](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). В нём реализовано удобное перемещение между исходными файлами, семантическая подсветка, подсказки, индексация и поиск. Слепок кода обновляется ежедневно.
|
||||
|
||||
Также вы можете просматривать исходники на [GitHub](https://github.com/ClickHouse/ClickHouse).
|
||||
|
@ -389,12 +389,6 @@ SETTINGS mutations_sync = 1;
|
||||
|
||||
Давайте выполним те же 3 запроса.
|
||||
|
||||
[Включите](../../operations/settings/settings.md#allow-experimental-projection-optimization) поддержку проекций:
|
||||
|
||||
```sql
|
||||
SET allow_experimental_projection_optimization = 1;
|
||||
```
|
||||
|
||||
### Запрос 1. Средняя цена за год {#average-price-projections}
|
||||
|
||||
Запрос:
|
||||
@ -647,4 +641,3 @@ no projection: 100 rows in set. Elapsed: 0.069 sec. Processed 26.32 million rows
|
||||
### Online Playground {#playground}
|
||||
|
||||
Этот набор данных доступен в [Online Playground](https://gh-api.clickhouse.tech/play?user=play#U0VMRUNUIHRvd24sIGRpc3RyaWN0LCBjb3VudCgpIEFTIGMsIHJvdW5kKGF2ZyhwcmljZSkpIEFTIHByaWNlLCBiYXIocHJpY2UsIDAsIDUwMDAwMDAsIDEwMCkgRlJPTSB1a19wcmljZV9wYWlkIFdIRVJFIGRhdGUgPj0gJzIwMjAtMDEtMDEnIEdST1VQIEJZIHRvd24sIGRpc3RyaWN0IEhBVklORyBjID49IDEwMCBPUkRFUiBCWSBwcmljZSBERVNDIExJTUlUIDEwMA==).
|
||||
|
||||
|
@ -407,6 +407,18 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
|
||||
<include_from>/etc/metrica.xml</include_from>
|
||||
```
|
||||
|
||||
## interserver_listen_host {#interserver-listen-host}
|
||||
|
||||
Ограничение по хостам, для обмена между серверами ClickHouse.
|
||||
Значение по умолчанию совпадает со значением параметра listen_host
|
||||
|
||||
Примеры:
|
||||
|
||||
``` xml
|
||||
<interserver_listen_host>::ffff:a00:1</interserver_listen_host>
|
||||
<interserver_listen_host>10.0.0.1</interserver_listen_host>
|
||||
```
|
||||
|
||||
## interserver_http_port {#interserver-http-port}
|
||||
|
||||
Порт для обмена между серверами ClickHouse.
|
||||
|
@ -264,3 +264,9 @@ ClickHouse成员一旦在您的拉取请求上贴上«可以测试»标签,就
|
||||
系统将分别为您的拉取请求准备ClickHouse二进制版本。若要检索这些构建信息,请在检查列表中单击« ClickHouse构建检查»旁边的«详细信息»链接。在这里,您会找到指向ClickHouse的.deb软件包的直接链接,此外,甚至可以将其部署在生产服务器上(如果您不担心)。
|
||||
|
||||
某些构建项很可能会在首次构建时失败。这是因为我们同时检查了基于gcc和clang的构建,几乎所有现有的被clang启用的警告(总是带有`-Werror`标志)。在同一页面上,您可以找到所有构建的日志,因此不必以所有可能的方式构建ClickHouse。
|
||||
|
||||
## 浏览ClickHouse源代码 {#browse-clickhouse-source-code}
|
||||
|
||||
您可以使用 **Woboq** 在线代码浏览器 [点击这里](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). 它提供了代码导航和语义突出显示、搜索和索引。 代码快照每天更新。
|
||||
|
||||
此外,您还可以像往常一样浏览源代码 [GitHub](https://github.com/ClickHouse/ClickHouse)
|
||||
|
@ -46,6 +46,7 @@
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/container/flat_map.hpp>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <bit>
|
||||
|
||||
|
||||
static const char * documentation = R"(
|
||||
@ -186,7 +187,7 @@ static UInt64 transform(UInt64 x, UInt64 seed)
|
||||
if (x == 2 || x == 3)
|
||||
return x ^ (seed & 1);
|
||||
|
||||
size_t num_leading_zeros = __builtin_clzll(x);
|
||||
size_t num_leading_zeros = std::countl_zero(x);
|
||||
|
||||
return feistelNetwork(x, 64 - num_leading_zeros - 1, seed);
|
||||
}
|
||||
|
@ -367,7 +367,7 @@ Poco::Net::SocketAddress Server::socketBindListen(
|
||||
return address;
|
||||
}
|
||||
|
||||
std::vector<std::string> getListenHosts(const Poco::Util::AbstractConfiguration & config)
|
||||
Strings getListenHosts(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
auto listen_hosts = DB::getMultipleValuesFromConfig(config, "", "listen_host");
|
||||
if (listen_hosts.empty())
|
||||
@ -378,6 +378,16 @@ std::vector<std::string> getListenHosts(const Poco::Util::AbstractConfiguration
|
||||
return listen_hosts;
|
||||
}
|
||||
|
||||
Strings getInterserverListenHosts(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
auto interserver_listen_hosts = DB::getMultipleValuesFromConfig(config, "", "interserver_listen_host");
|
||||
if (!interserver_listen_hosts.empty())
|
||||
return interserver_listen_hosts;
|
||||
|
||||
/// Use more general restriction in case of emptiness
|
||||
return getListenHosts(config);
|
||||
}
|
||||
|
||||
bool getListenTry(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
bool listen_try = config.getBool("listen_try", false);
|
||||
@ -1234,6 +1244,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
/* already_loaded = */ false); /// Reload it right now (initial loading)
|
||||
|
||||
const auto listen_hosts = getListenHosts(config());
|
||||
const auto interserver_listen_hosts = getInterserverListenHosts(config());
|
||||
const auto listen_try = getListenTry(config());
|
||||
|
||||
if (config().has("keeper_server"))
|
||||
@ -1629,7 +1640,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
{
|
||||
std::lock_guard lock(servers_lock);
|
||||
createServers(config(), listen_hosts, listen_try, server_pool, async_metrics, servers);
|
||||
createServers(config(), listen_hosts, interserver_listen_hosts, listen_try, server_pool, async_metrics, servers);
|
||||
if (servers.empty())
|
||||
throw Exception(
|
||||
"No servers started (add valid listen_host and 'tcp_port' or 'http_port' to configuration file.)",
|
||||
@ -1811,7 +1822,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
void Server::createServers(
|
||||
Poco::Util::AbstractConfiguration & config,
|
||||
const std::vector<std::string> & listen_hosts,
|
||||
const Strings & listen_hosts,
|
||||
const Strings & interserver_listen_hosts,
|
||||
bool listen_try,
|
||||
Poco::ThreadPool & server_pool,
|
||||
AsynchronousMetrics & async_metrics,
|
||||
@ -1929,51 +1941,6 @@ void Server::createServers(
|
||||
#endif
|
||||
});
|
||||
|
||||
/// Interserver IO HTTP
|
||||
port_name = "interserver_http_port";
|
||||
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
return ProtocolServerAdapter(
|
||||
listen_host,
|
||||
port_name,
|
||||
"replica communication (interserver): http://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
context(),
|
||||
createHandlerFactory(*this, async_metrics, "InterserverIOHTTPHandler-factory"),
|
||||
server_pool,
|
||||
socket,
|
||||
http_params));
|
||||
});
|
||||
|
||||
port_name = "interserver_https_port";
|
||||
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
return ProtocolServerAdapter(
|
||||
listen_host,
|
||||
port_name,
|
||||
"secure replica communication (interserver): https://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
context(),
|
||||
createHandlerFactory(*this, async_metrics, "InterserverIOHTTPSHandler-factory"),
|
||||
server_pool,
|
||||
socket,
|
||||
http_params));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
#endif
|
||||
});
|
||||
|
||||
port_name = "mysql_port";
|
||||
createServer(config, listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
|
||||
{
|
||||
@ -2032,6 +1999,55 @@ void Server::createServers(
|
||||
});
|
||||
}
|
||||
|
||||
/// Now iterate over interserver_listen_hosts
|
||||
for (const auto & interserver_listen_host : interserver_listen_hosts)
|
||||
{
|
||||
/// Interserver IO HTTP
|
||||
const char * port_name = "interserver_http_port";
|
||||
createServer(config, interserver_listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, interserver_listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
return ProtocolServerAdapter(
|
||||
interserver_listen_host,
|
||||
port_name,
|
||||
"replica communication (interserver): http://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
context(),
|
||||
createHandlerFactory(*this, async_metrics, "InterserverIOHTTPHandler-factory"),
|
||||
server_pool,
|
||||
socket,
|
||||
http_params));
|
||||
});
|
||||
|
||||
port_name = "interserver_https_port";
|
||||
createServer(config, interserver_listen_host, port_name, listen_try, start_servers, servers, [&](UInt16 port) -> ProtocolServerAdapter
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socketBindListen(config, socket, interserver_listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
return ProtocolServerAdapter(
|
||||
interserver_listen_host,
|
||||
port_name,
|
||||
"secure replica communication (interserver): https://" + address.toString(),
|
||||
std::make_unique<HTTPServer>(
|
||||
context(),
|
||||
createHandlerFactory(*this, async_metrics, "InterserverIOHTTPSHandler-factory"),
|
||||
server_pool,
|
||||
socket,
|
||||
http_params));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
#endif
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void Server::updateServers(
|
||||
@ -2043,6 +2059,7 @@ void Server::updateServers(
|
||||
Poco::Logger * log = &logger();
|
||||
|
||||
const auto listen_hosts = getListenHosts(config);
|
||||
const auto interserver_listen_hosts = getInterserverListenHosts(config);
|
||||
const auto listen_try = getListenTry(config);
|
||||
|
||||
/// Remove servers once all their connections are closed
|
||||
@ -2075,7 +2092,7 @@ void Server::updateServers(
|
||||
}
|
||||
}
|
||||
|
||||
createServers(config, listen_hosts, listen_try, server_pool, async_metrics, servers, /* start_servers= */ true);
|
||||
createServers(config, listen_hosts, interserver_listen_hosts, listen_try, server_pool, async_metrics, servers, /* start_servers= */ true);
|
||||
|
||||
std::erase_if(servers, std::bind_front(check_server, ""));
|
||||
}
|
||||
|
@ -86,7 +86,8 @@ private:
|
||||
|
||||
void createServers(
|
||||
Poco::Util::AbstractConfiguration & config,
|
||||
const std::vector<std::string> & listen_hosts,
|
||||
const Strings & listen_hosts,
|
||||
const Strings & interserver_listen_hosts,
|
||||
bool listen_try,
|
||||
Poco::ThreadPool & server_pool,
|
||||
AsynchronousMetrics & async_metrics,
|
||||
|
@ -188,6 +188,10 @@
|
||||
<listen_host>127.0.0.1</listen_host>
|
||||
-->
|
||||
|
||||
<!-- <interserver_listen_host>::</interserver_listen_host> -->
|
||||
<!-- Listen host for communication between replicas. Used for data exchange -->
|
||||
<!-- Default values - equal to listen_host -->
|
||||
|
||||
<!-- Don't exit if IPv6 or IPv4 networks are unavailable while trying to listen. -->
|
||||
<!-- <listen_try>0</listen_try> -->
|
||||
|
||||
|
@ -583,9 +583,14 @@ try
|
||||
if (has_vertical_output_suffix)
|
||||
current_format = "Vertical";
|
||||
|
||||
/// It is not clear how to write progress intermixed with data with parallel formatting.
|
||||
bool logs_into_stdout = server_logs_file == "-";
|
||||
bool extras_into_stdout = need_render_progress || logs_into_stdout;
|
||||
bool select_only_into_file = select_into_file && !select_into_file_and_stdout;
|
||||
|
||||
/// It is not clear how to write progress and logs
|
||||
/// intermixed with data with parallel formatting.
|
||||
/// It may increase code complexity significantly.
|
||||
if (!need_render_progress || (select_into_file && !select_into_file_and_stdout))
|
||||
if (!extras_into_stdout || select_only_into_file)
|
||||
output_format = global_context->getOutputFormatParallelIfPossible(
|
||||
current_format, out_file_buf ? *out_file_buf : *out_buf, block);
|
||||
else
|
||||
|
@ -298,7 +298,7 @@ ColumnPtr ColumnDecimal<T>::filter(const IColumn::Filter & filt, ssize_t result_
|
||||
{
|
||||
while (mask)
|
||||
{
|
||||
size_t index = __builtin_ctzll(mask);
|
||||
size_t index = std::countr_zero(mask);
|
||||
res_data.push_back(data_pos[index]);
|
||||
#ifdef __BMI__
|
||||
mask = _blsr_u64(mask);
|
||||
|
@ -240,7 +240,7 @@ ColumnPtr ColumnFixedString::filter(const IColumn::Filter & filt, ssize_t result
|
||||
size_t res_chars_size = res->chars.size();
|
||||
while (mask)
|
||||
{
|
||||
size_t index = __builtin_ctzll(mask);
|
||||
size_t index = std::countr_zero(mask);
|
||||
res->chars.resize(res_chars_size + n);
|
||||
memcpySmallAllowReadWriteOverflow15(&res->chars[res_chars_size], data_pos + index * n, n);
|
||||
res_chars_size += n;
|
||||
|
@ -508,7 +508,7 @@ ColumnPtr ColumnVector<T>::filter(const IColumn::Filter & filt, ssize_t result_s
|
||||
{
|
||||
while (mask)
|
||||
{
|
||||
size_t index = __builtin_ctzll(mask);
|
||||
size_t index = std::countr_zero(mask);
|
||||
res_data.push_back(data_pos[index]);
|
||||
#ifdef __BMI__
|
||||
mask = _blsr_u64(mask);
|
||||
|
@ -2,13 +2,14 @@
|
||||
#include <Columns/ColumnVector.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/HashTable/HashSet.h>
|
||||
#include <bit>
|
||||
#include "ColumnsCommon.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
#if defined(__SSE2__) && defined(__POPCNT__)
|
||||
#if defined(__SSE2__)
|
||||
/// Transform 64-byte mask to 64-bit mask.
|
||||
static UInt64 toBits64(const Int8 * bytes64)
|
||||
{
|
||||
@ -41,11 +42,11 @@ size_t countBytesInFilter(const UInt8 * filt, size_t start, size_t end)
|
||||
|
||||
const Int8 * end_pos = pos + (end - start);
|
||||
|
||||
#if defined(__SSE2__) && defined(__POPCNT__)
|
||||
#if defined(__SSE2__)
|
||||
const Int8 * end_pos64 = pos + (end - start) / 64 * 64;
|
||||
|
||||
for (; pos < end_pos64; pos += 64)
|
||||
count += __builtin_popcountll(toBits64(pos));
|
||||
count += std::popcount(toBits64(pos));
|
||||
|
||||
/// TODO Add duff device for tail?
|
||||
#endif
|
||||
@ -74,11 +75,11 @@ size_t countBytesInFilterWithNull(const IColumn::Filter & filt, const UInt8 * nu
|
||||
const Int8 * pos2 = reinterpret_cast<const Int8 *>(null_map) + start;
|
||||
const Int8 * end_pos = pos + (end - start);
|
||||
|
||||
#if defined(__SSE2__) && defined(__POPCNT__)
|
||||
#if defined(__SSE2__)
|
||||
const Int8 * end_pos64 = pos + (end - start) / 64 * 64;
|
||||
|
||||
for (; pos < end_pos64; pos += 64, pos2 += 64)
|
||||
count += __builtin_popcountll(toBits64(pos) & ~toBits64(pos2));
|
||||
count += std::popcount(toBits64(pos) & ~toBits64(pos2));
|
||||
|
||||
/// TODO Add duff device for tail?
|
||||
#endif
|
||||
@ -259,7 +260,7 @@ namespace
|
||||
{
|
||||
while (mask)
|
||||
{
|
||||
size_t index = __builtin_ctzll(mask);
|
||||
size_t index = std::countr_zero(mask);
|
||||
copy_array(offsets_pos + index);
|
||||
#ifdef __BMI__
|
||||
mask = _blsr_u64(mask);
|
||||
|
@ -36,7 +36,7 @@ inline UInt64 bytes64MaskToBits64Mask(const UInt8 * bytes64)
|
||||
_mm256_loadu_si256(reinterpret_cast<const __m256i *>(bytes64)), zero32))) & 0xffffffff)
|
||||
| (static_cast<UInt64>(_mm256_movemask_epi8(_mm256_cmpeq_epi8(
|
||||
_mm256_loadu_si256(reinterpret_cast<const __m256i *>(bytes64+32)), zero32))) << 32);
|
||||
#elif defined(__SSE2__) && defined(__POPCNT__)
|
||||
#elif defined(__SSE2__)
|
||||
static const __m128i zero16 = _mm_setzero_si128();
|
||||
UInt64 res =
|
||||
(static_cast<UInt64>(_mm_movemask_epi8(_mm_cmpeq_epi8(
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Common/HashTable/HashTable.h>
|
||||
|
||||
#include <bit>
|
||||
#include <new>
|
||||
#include <variant>
|
||||
|
||||
@ -21,17 +22,17 @@ struct StringKey24
|
||||
inline StringRef ALWAYS_INLINE toStringRef(const StringKey8 & n)
|
||||
{
|
||||
assert(n != 0);
|
||||
return {reinterpret_cast<const char *>(&n), 8ul - (__builtin_clzll(n) >> 3)};
|
||||
return {reinterpret_cast<const char *>(&n), 8ul - (std::countl_zero(n) >> 3)};
|
||||
}
|
||||
inline StringRef ALWAYS_INLINE toStringRef(const StringKey16 & n)
|
||||
{
|
||||
assert(n.items[1] != 0);
|
||||
return {reinterpret_cast<const char *>(&n), 16ul - (__builtin_clzll(n.items[1]) >> 3)};
|
||||
return {reinterpret_cast<const char *>(&n), 16ul - (std::countl_zero(n.items[1]) >> 3)};
|
||||
}
|
||||
inline StringRef ALWAYS_INLINE toStringRef(const StringKey24 & n)
|
||||
{
|
||||
assert(n.c != 0);
|
||||
return {reinterpret_cast<const char *>(&n), 24ul - (__builtin_clzll(n.c) >> 3)};
|
||||
return {reinterpret_cast<const char *>(&n), 24ul - (std::countl_zero(n.c) >> 3)};
|
||||
}
|
||||
|
||||
struct StringHashTableHash
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Core/Defines.h>
|
||||
|
||||
#include <bit>
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
|
||||
@ -205,7 +206,7 @@ struct TrailingZerosCounter<UInt32>
|
||||
{
|
||||
static int apply(UInt32 val)
|
||||
{
|
||||
return __builtin_ctz(val);
|
||||
return std::countr_zero(val);
|
||||
}
|
||||
};
|
||||
|
||||
@ -214,7 +215,7 @@ struct TrailingZerosCounter<UInt64>
|
||||
{
|
||||
static int apply(UInt64 val)
|
||||
{
|
||||
return __builtin_ctzll(val);
|
||||
return std::countr_zero(val);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Common/formatIPv6.h>
|
||||
|
||||
#include <cstring>
|
||||
#include <bit>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -89,7 +90,7 @@ bool matchIPv6Subnet(const uint8_t * addr, const uint8_t * cidr_addr, UInt8 pref
|
||||
|
||||
if (mask)
|
||||
{
|
||||
auto offset = __builtin_ctz(mask);
|
||||
auto offset = std::countr_zero(mask);
|
||||
|
||||
if (prefix / 8 != offset)
|
||||
return prefix / 8 < offset;
|
||||
|
@ -25,6 +25,10 @@
|
||||
M(WriteBufferFromFileDescriptorWrite, "Number of writes (write/pwrite) to a file descriptor. Does not include sockets.") \
|
||||
M(WriteBufferFromFileDescriptorWriteFailed, "Number of times the write (write/pwrite) to a file descriptor have failed.") \
|
||||
M(WriteBufferFromFileDescriptorWriteBytes, "Number of bytes written to file descriptors. If the file is compressed, this will show compressed data size.") \
|
||||
M(FileSync, "Number of times the F_FULLFSYNC/fsync/fdatasync function was called for files.") \
|
||||
M(DirectorySync, "Number of times the F_FULLFSYNC/fsync/fdatasync function was called for directories.") \
|
||||
M(FileSyncElapsedMicroseconds, "Total time spent waiting for F_FULLFSYNC/fsync/fdatasync syscall for files.") \
|
||||
M(DirectorySyncElapsedMicroseconds, "Total time spent waiting for F_FULLFSYNC/fsync/fdatasync syscall for directories.") \
|
||||
M(ReadCompressedBytes, "Number of bytes (the number of bytes before decompression) read from compressed sources (files, network).") \
|
||||
M(CompressedReadBufferBlocks, "Number of compressed blocks (the blocks of data that are compressed independent of each other) read from compressed sources (files, network).") \
|
||||
M(CompressedReadBufferBytes, "Number of uncompressed bytes (the number of bytes after decompression) read from compressed sources (files, network).") \
|
||||
|
@ -78,7 +78,7 @@ private:
|
||||
constexpr uint64_t nextAlphaSize(uint64_t x)
|
||||
{
|
||||
constexpr uint64_t alpha_map_elements_per_counter = 6;
|
||||
return 1ULL << (sizeof(uint64_t) * 8 - __builtin_clzll(x * alpha_map_elements_per_counter));
|
||||
return 1ULL << (sizeof(uint64_t) * 8 - std::countl_zero(x * alpha_map_elements_per_counter));
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
|
||||
#include <widechar_width.h>
|
||||
#include <bit>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -124,7 +125,7 @@ size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t l
|
||||
|
||||
if (non_regular_width_mask)
|
||||
{
|
||||
auto num_regular_chars = __builtin_ctz(non_regular_width_mask);
|
||||
auto num_regular_chars = std::countr_zero(non_regular_width_mask);
|
||||
width += num_regular_chars;
|
||||
i += num_regular_chars;
|
||||
break;
|
||||
|
@ -83,7 +83,7 @@ inline size_t countCodePoints(const UInt8 * data, size_t size)
|
||||
const auto threshold = vdupq_n_s8(0xBF);
|
||||
|
||||
for (; data < src_end_sse; data += bytes_sse)
|
||||
res += __builtin_popcountll(get_nibble_mask(vcgtq_s8(vld1q_s8(reinterpret_cast<const int8_t *>(data)), threshold)));
|
||||
res += std::popcount(get_nibble_mask(vcgtq_s8(vld1q_s8(reinterpret_cast<const int8_t *>(data)), threshold)));
|
||||
res >>= 2;
|
||||
#endif
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <bit>
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
@ -561,7 +562,7 @@ int main(int argc, char ** argv)
|
||||
/// Fill source data
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
keys[i] = __builtin_ctz(i + 1); /// Make keys to have just slightly more realistic distribution.
|
||||
keys[i] = std::countr_zero(i + 1); /// Make keys to have just slightly more realistic distribution.
|
||||
values[i] = 1234.5; /// The distribution of values does not affect execution speed.
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <bit>
|
||||
#include <cstdint>
|
||||
|
||||
#include <Core/Defines.h>
|
||||
@ -50,7 +51,7 @@ inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctz(mask);
|
||||
offset += std::countr_zero(mask);
|
||||
|
||||
if (offset >= min_size)
|
||||
break;
|
||||
@ -82,7 +83,7 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctz(mask);
|
||||
offset += std::countr_zero(mask);
|
||||
|
||||
if (offset >= min_size)
|
||||
break;
|
||||
@ -123,7 +124,7 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctz(mask);
|
||||
offset += std::countr_zero(mask);
|
||||
|
||||
if (offset >= max_size)
|
||||
return 0;
|
||||
@ -150,7 +151,7 @@ inline int memcmpSmallAllowOverflow15(const Char * a, const Char * b, size_t siz
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctz(mask);
|
||||
offset += std::countr_zero(mask);
|
||||
|
||||
if (offset >= size)
|
||||
return 0;
|
||||
@ -180,7 +181,7 @@ inline bool memequalSmallAllowOverflow15(const Char * a, size_t a_size, const Ch
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctz(mask);
|
||||
offset += std::countr_zero(mask);
|
||||
return offset >= a_size;
|
||||
}
|
||||
}
|
||||
@ -203,7 +204,7 @@ inline int memcmpSmallMultipleOf16(const Char * a, const Char * b, size_t size)
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctz(mask);
|
||||
offset += std::countr_zero(mask);
|
||||
return detail::cmp(a[offset], b[offset]);
|
||||
}
|
||||
}
|
||||
@ -222,7 +223,7 @@ inline int memcmp16(const Char * a, const Char * b)
|
||||
|
||||
if (mask)
|
||||
{
|
||||
auto offset = __builtin_ctz(mask);
|
||||
auto offset = std::countr_zero(mask);
|
||||
return detail::cmp(a[offset], b[offset]);
|
||||
}
|
||||
|
||||
@ -252,7 +253,7 @@ inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctz(mask);
|
||||
offset += std::countr_zero(mask);
|
||||
return offset >= size;
|
||||
}
|
||||
}
|
||||
@ -285,7 +286,7 @@ inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctz(mask);
|
||||
offset += std::countr_zero(mask);
|
||||
|
||||
if (offset >= min_size)
|
||||
break;
|
||||
@ -317,7 +318,7 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctz(mask);
|
||||
offset += std::countr_zero(mask);
|
||||
|
||||
if (offset >= min_size)
|
||||
break;
|
||||
@ -359,7 +360,7 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctz(mask);
|
||||
offset += std::countr_zero(mask);
|
||||
|
||||
if (offset >= max_size)
|
||||
return 0;
|
||||
@ -386,7 +387,7 @@ inline int memcmpSmallAllowOverflow15(const Char * a, const Char * b, size_t siz
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctz(mask);
|
||||
offset += std::countr_zero(mask);
|
||||
|
||||
if (offset >= size)
|
||||
return 0;
|
||||
@ -416,7 +417,7 @@ inline bool memequalSmallAllowOverflow15(const Char * a, size_t a_size, const Ch
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctz(mask);
|
||||
offset += std::countr_zero(mask);
|
||||
return offset >= a_size;
|
||||
}
|
||||
}
|
||||
@ -439,7 +440,7 @@ inline int memcmpSmallMultipleOf16(const Char * a, const Char * b, size_t size)
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctz(mask);
|
||||
offset += std::countr_zero(mask);
|
||||
return detail::cmp(a[offset], b[offset]);
|
||||
}
|
||||
}
|
||||
@ -459,7 +460,7 @@ inline int memcmp16(const Char * a, const Char * b)
|
||||
|
||||
if (mask)
|
||||
{
|
||||
auto offset = __builtin_ctz(mask);
|
||||
auto offset = std::countr_zero(mask);
|
||||
return detail::cmp(a[offset], b[offset]);
|
||||
}
|
||||
|
||||
@ -490,7 +491,7 @@ inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctz(mask);
|
||||
offset += std::countr_zero(mask);
|
||||
return offset >= size;
|
||||
}
|
||||
}
|
||||
@ -523,7 +524,7 @@ inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctzll(mask) >> 2;
|
||||
offset += std::countr_zero(mask) >> 2;
|
||||
|
||||
if (offset >= min_size)
|
||||
break;
|
||||
@ -548,7 +549,7 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctzll(mask) >> 2;
|
||||
offset += std::countr_zero(mask) >> 2;
|
||||
|
||||
if (offset >= min_size)
|
||||
break;
|
||||
@ -589,7 +590,7 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctzll(mask) >> 2;
|
||||
offset += std::countr_zero(mask) >> 2;
|
||||
|
||||
if (offset >= max_size)
|
||||
return 0;
|
||||
@ -611,7 +612,7 @@ inline int memcmpSmallAllowOverflow15(const Char * a, const Char * b, size_t siz
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctzll(mask) >> 2;
|
||||
offset += std::countr_zero(mask) >> 2;
|
||||
|
||||
if (offset >= size)
|
||||
return 0;
|
||||
@ -637,7 +638,7 @@ inline bool memequalSmallAllowOverflow15(const Char * a, size_t a_size, const Ch
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctzll(mask) >> 2;
|
||||
offset += std::countr_zero(mask) >> 2;
|
||||
return offset >= a_size;
|
||||
}
|
||||
}
|
||||
@ -656,7 +657,7 @@ inline int memcmpSmallMultipleOf16(const Char * a, const Char * b, size_t size)
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctzll(mask) >> 2;
|
||||
offset += std::countr_zero(mask) >> 2;
|
||||
return detail::cmp(a[offset], b[offset]);
|
||||
}
|
||||
}
|
||||
@ -672,7 +673,7 @@ inline int memcmp16(const Char * a, const Char * b)
|
||||
mask = ~mask;
|
||||
if (mask)
|
||||
{
|
||||
auto offset = __builtin_ctzll(mask) >> 2;
|
||||
auto offset = std::countr_zero(mask) >> 2;
|
||||
return detail::cmp(a[offset], b[offset]);
|
||||
}
|
||||
return 0;
|
||||
@ -694,7 +695,7 @@ inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
|
||||
|
||||
if (mask)
|
||||
{
|
||||
offset += __builtin_ctzll(mask) >> 2;
|
||||
offset += std::countr_zero(mask) >> 2;
|
||||
return offset >= size;
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include "CompressedReadBufferBase.h"
|
||||
|
||||
#include <bit>
|
||||
#include <cstring>
|
||||
#include <cassert>
|
||||
#include <city.h>
|
||||
@ -93,8 +94,8 @@ static void validateChecksum(char * data, size_t size, const Checksum expected_c
|
||||
}
|
||||
|
||||
/// Check if the difference caused by single bit flip in stored checksum.
|
||||
size_t difference = __builtin_popcountll(expected_checksum.first ^ calculated_checksum.first)
|
||||
+ __builtin_popcountll(expected_checksum.second ^ calculated_checksum.second);
|
||||
size_t difference = std::popcount(expected_checksum.first ^ calculated_checksum.first)
|
||||
+ std::popcount(expected_checksum.second ^ calculated_checksum.second);
|
||||
|
||||
if (difference == 1)
|
||||
{
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Core/Types.h>
|
||||
#include <bit>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -413,7 +414,7 @@ UInt32 getValuableBitsNumber(UInt64 min, UInt64 max)
|
||||
{
|
||||
UInt64 diff_bits = min ^ max;
|
||||
if (diff_bits)
|
||||
return 64 - __builtin_clzll(diff_bits);
|
||||
return 64 - std::countl_zero(diff_bits);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -344,7 +344,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
|
||||
M(UInt64, max_temporary_non_const_columns, 0, "", 0) \
|
||||
\
|
||||
M(UInt64, max_subquery_depth, 100, "", 0) \
|
||||
M(UInt64, max_pipeline_depth, 10000, "", 0) \
|
||||
M(UInt64, max_pipeline_depth, 1000, "", 0) \
|
||||
M(UInt64, max_ast_depth, 1000, "Maximum depth of query syntax tree. Checked after parsing.", 0) \
|
||||
M(UInt64, max_ast_elements, 50000, "Maximum size of query syntax tree in number of nodes. Checked after parsing.", 0) \
|
||||
M(UInt64, max_expanded_ast_elements, 500000, "Maximum size of query syntax tree in number of nodes after expansion of aliases and the asterisk.", 0) \
|
||||
|
@ -34,6 +34,8 @@ namespace ProfileEvents
|
||||
extern const Event AIOWriteBytes;
|
||||
extern const Event AIORead;
|
||||
extern const Event AIOReadBytes;
|
||||
extern const Event FileSync;
|
||||
extern const Event FileSyncElapsedMicroseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
@ -544,6 +546,9 @@ public:
|
||||
file_path,
|
||||
std::to_string(bytes_written));
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::FileSync);
|
||||
|
||||
Stopwatch watch;
|
||||
#if defined(OS_DARWIN)
|
||||
if (::fsync(file.fd) < 0)
|
||||
throwFromErrnoWithPath("Cannot fsync " + file_path, file_path, ErrorCodes::CANNOT_FSYNC);
|
||||
@ -551,6 +556,7 @@ public:
|
||||
if (::fdatasync(file.fd) < 0)
|
||||
throwFromErrnoWithPath("Cannot fdatasync " + file_path, file_path, ErrorCodes::CANNOT_FSYNC);
|
||||
#endif
|
||||
ProfileEvents::increment(ProfileEvents::FileSyncElapsedMicroseconds, watch.elapsedMicroseconds());
|
||||
|
||||
current_block_index += buffer_size_in_blocks;
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
#include <Disks/LocalDirectorySyncGuard.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Disks/IDisk.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <fcntl.h> // O_RDWR
|
||||
|
||||
/// OSX does not have O_DIRECTORY
|
||||
@ -8,6 +10,12 @@
|
||||
#define O_DIRECTORY O_RDWR
|
||||
#endif
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event DirectorySync;
|
||||
extern const Event DirectorySyncElapsedMicroseconds;
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -29,8 +37,12 @@ LocalDirectorySyncGuard::LocalDirectorySyncGuard(const String & full_path)
|
||||
|
||||
LocalDirectorySyncGuard::~LocalDirectorySyncGuard()
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::DirectorySync);
|
||||
|
||||
try
|
||||
{
|
||||
Stopwatch watch;
|
||||
|
||||
#if defined(OS_DARWIN)
|
||||
if (fcntl(fd, F_FULLFSYNC, 0))
|
||||
throwFromErrno("Cannot fcntl(F_FULLFSYNC)", ErrorCodes::CANNOT_FSYNC);
|
||||
@ -40,6 +52,8 @@ LocalDirectorySyncGuard::~LocalDirectorySyncGuard()
|
||||
#endif
|
||||
if (-1 == ::close(fd))
|
||||
throw Exception("Cannot close file", ErrorCodes::CANNOT_CLOSE_FILE);
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::DirectorySyncElapsedMicroseconds, watch.elapsedMicroseconds());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <Functions/IFunction.h>
|
||||
#include <IO/WriteBufferFromVector.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <bit>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -285,7 +286,7 @@ public:
|
||||
{
|
||||
while (x)
|
||||
{
|
||||
result_array_values_data.push_back(getTrailingZeroBitsUnsafe(x));
|
||||
result_array_values_data.push_back(std::countr_zero(x));
|
||||
x &= (x - 1);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Functions/FunctionBinaryArithmetic.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <bit>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -14,7 +15,7 @@ struct BitHammingDistanceImpl
|
||||
static inline NO_SANITIZE_UNDEFINED Result apply(A a, B b)
|
||||
{
|
||||
UInt64 res = static_cast<UInt64>(a) ^ static_cast<UInt64>(b);
|
||||
return __builtin_popcountll(res);
|
||||
return std::popcount(res);
|
||||
}
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <IO/Operators.h>
|
||||
#include <base/find_symbols.h>
|
||||
#include <cstdlib>
|
||||
#include <bit>
|
||||
|
||||
#ifdef __SSE2__
|
||||
#include <emmintrin.h>
|
||||
@ -698,7 +699,7 @@ void readCSVStringInto(Vector & s, ReadBuffer & buf, const FormatSettings::CSV &
|
||||
uint16_t bit_mask = _mm_movemask_epi8(eq);
|
||||
if (bit_mask)
|
||||
{
|
||||
next_pos += __builtin_ctz(bit_mask);
|
||||
next_pos += std::countr_zero(bit_mask);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -716,7 +717,7 @@ void readCSVStringInto(Vector & s, ReadBuffer & buf, const FormatSettings::CSV &
|
||||
uint64_t bit_mask = get_nibble_mask(eq);
|
||||
if (bit_mask)
|
||||
{
|
||||
next_pos += __builtin_ctzll(bit_mask) >> 2;
|
||||
next_pos += std::countr_zero(bit_mask) >> 2;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,8 @@ namespace ProfileEvents
|
||||
extern const Event WriteBufferFromFileDescriptorWriteFailed;
|
||||
extern const Event WriteBufferFromFileDescriptorWriteBytes;
|
||||
extern const Event DiskWriteElapsedMicroseconds;
|
||||
extern const Event FileSync;
|
||||
extern const Event FileSyncElapsedMicroseconds;
|
||||
}
|
||||
|
||||
namespace CurrentMetrics
|
||||
@ -113,12 +115,18 @@ void WriteBufferFromFileDescriptor::sync()
|
||||
/// If buffer has pending data - write it.
|
||||
next();
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::FileSync);
|
||||
|
||||
Stopwatch watch;
|
||||
|
||||
/// Request OS to sync data with storage medium.
|
||||
#if defined(OS_DARWIN)
|
||||
int res = ::fsync(fd);
|
||||
#else
|
||||
int res = ::fdatasync(fd);
|
||||
#endif
|
||||
ProfileEvents::increment(ProfileEvents::FileSyncElapsedMicroseconds, watch.elapsedMicroseconds());
|
||||
|
||||
if (-1 == res)
|
||||
throwFromErrnoWithPath("Cannot fsync " + getFileName(), getFileName(), ErrorCodes::CANNOT_FSYNC);
|
||||
}
|
||||
|
@ -1513,8 +1513,7 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsBeforeArrayJoin(const NameSet &
|
||||
}
|
||||
|
||||
auto res = split(split_nodes);
|
||||
/// Do not remove array joined columns if they are not used.
|
||||
/// res.first->project_input = false;
|
||||
res.second->project_input = project_input;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ static UInt32 toPowerOfTwo(UInt32 x)
|
||||
{
|
||||
if (x <= 1)
|
||||
return 1;
|
||||
return static_cast<UInt32>(1) << (32 - __builtin_clz(x - 1));
|
||||
return static_cast<UInt32>(1) << (32 - std::countl_zero(x - 1));
|
||||
}
|
||||
|
||||
ConcurrentHashJoin::ConcurrentHashJoin(ContextPtr context_, std::shared_ptr<TableJoin> table_join_, size_t slots_, const Block & right_sample_block, bool any_take_last_row_)
|
||||
|
@ -367,27 +367,6 @@ public:
|
||||
// Top-level OpenTelemetry trace context for the query. Makes sense only for a query context.
|
||||
OpenTelemetryTraceContext query_trace_context;
|
||||
|
||||
/// Some counters for current query execution.
|
||||
/// Most of them are workarounds and should be removed in the future.
|
||||
struct KitchenSink
|
||||
{
|
||||
std::atomic<size_t> analyze_counter = 0;
|
||||
|
||||
KitchenSink() = default;
|
||||
|
||||
KitchenSink(const KitchenSink & rhs)
|
||||
: analyze_counter(rhs.analyze_counter.load())
|
||||
{}
|
||||
|
||||
KitchenSink & operator=(const KitchenSink & rhs)
|
||||
{
|
||||
analyze_counter = rhs.analyze_counter.load();
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
KitchenSink kitchen_sink;
|
||||
|
||||
private:
|
||||
using SampleBlockCache = std::unordered_map<std::string, Block>;
|
||||
mutable SampleBlockCache sample_block_cache;
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Common/UTF8Helpers.h>
|
||||
#include <bit>
|
||||
|
||||
#if defined(__SSE2__)
|
||||
#include <emmintrin.h>
|
||||
@ -122,7 +123,7 @@ bool SplitTokenExtractor::nextInStringPadded(const char * data, size_t length, s
|
||||
const auto alnum_chars_ranges = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
|
||||
'\xFF', '\x80', 'z', 'a', 'Z', 'A', '9', '0');
|
||||
// Every bit represents if `haystack` character is in the ranges (1) or not (0)
|
||||
const int result_bitmask = _mm_cvtsi128_si32(_mm_cmpestrm(alnum_chars_ranges, 8, haystack, haystack_length, _SIDD_CMP_RANGES));
|
||||
const unsigned result_bitmask = _mm_cvtsi128_si32(_mm_cmpestrm(alnum_chars_ranges, 8, haystack, haystack_length, _SIDD_CMP_RANGES));
|
||||
#else
|
||||
// NOTE: -1 and +1 required since SSE2 has no `>=` and `<=` instructions on packed 8-bit integers (epi8).
|
||||
const auto number_begin = _mm_set1_epi8('0' - 1);
|
||||
@ -136,7 +137,7 @@ bool SplitTokenExtractor::nextInStringPadded(const char * data, size_t length, s
|
||||
// every bit represents if `haystack` character `c` satisfies condition:
|
||||
// (c < 0) || (c > '0' - 1 && c < '9' + 1) || (c > 'a' - 1 && c < 'z' + 1) || (c > 'A' - 1 && c < 'Z' + 1)
|
||||
// < 0 since _mm_cmplt_epi8 threats chars as SIGNED, and so all chars > 0x80 are negative.
|
||||
const int result_bitmask = _mm_movemask_epi8(_mm_or_si128(_mm_or_si128(_mm_or_si128(
|
||||
const unsigned result_bitmask = _mm_movemask_epi8(_mm_or_si128(_mm_or_si128(_mm_or_si128(
|
||||
_mm_cmplt_epi8(haystack, zero),
|
||||
_mm_and_si128(_mm_cmpgt_epi8(haystack, number_begin), _mm_cmplt_epi8(haystack, number_end))),
|
||||
_mm_and_si128(_mm_cmpgt_epi8(haystack, alpha_lower_begin), _mm_cmplt_epi8(haystack, alpha_lower_end))),
|
||||
@ -152,7 +153,7 @@ bool SplitTokenExtractor::nextInStringPadded(const char * data, size_t length, s
|
||||
continue;
|
||||
}
|
||||
|
||||
const auto token_start_pos_in_current_haystack = getTrailingZeroBitsUnsafe(result_bitmask);
|
||||
const auto token_start_pos_in_current_haystack = std::countr_zero(result_bitmask);
|
||||
if (*token_length == 0)
|
||||
// new token
|
||||
*token_start = *pos + token_start_pos_in_current_haystack;
|
||||
@ -160,7 +161,7 @@ bool SplitTokenExtractor::nextInStringPadded(const char * data, size_t length, s
|
||||
// end of token starting in one of previous haystacks
|
||||
return true;
|
||||
|
||||
const auto token_bytes_in_current_haystack = getTrailingZeroBitsUnsafe(~(result_bitmask >> token_start_pos_in_current_haystack));
|
||||
const auto token_bytes_in_current_haystack = std::countr_zero(~(result_bitmask >> token_start_pos_in_current_haystack));
|
||||
*token_length += token_bytes_in_current_haystack;
|
||||
|
||||
*pos += token_start_pos_in_current_haystack + token_bytes_in_current_haystack;
|
||||
|
@ -98,7 +98,6 @@ namespace ErrorCodes
|
||||
extern const int SAMPLING_NOT_SUPPORTED;
|
||||
extern const int ILLEGAL_FINAL;
|
||||
extern const int ILLEGAL_PREWHERE;
|
||||
extern const int TOO_DEEP_PIPELINE;
|
||||
extern const int TOO_MANY_COLUMNS;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
@ -499,14 +498,6 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
|
||||
auto analyze = [&] (bool try_move_to_prewhere)
|
||||
{
|
||||
if (context->hasQueryContext())
|
||||
{
|
||||
std::atomic<size_t> & current_query_analyze_count = context->getQueryContext()->kitchen_sink.analyze_counter;
|
||||
++current_query_analyze_count;
|
||||
if (settings.max_pipeline_depth && current_query_analyze_count >= settings.max_pipeline_depth)
|
||||
throw DB::Exception(ErrorCodes::TOO_DEEP_PIPELINE, "Query analyze overflow. Try to increase `max_pipeline_depth` or simplify the query");
|
||||
}
|
||||
|
||||
/// Allow push down and other optimizations for VIEW: replace with subquery and rewrite it.
|
||||
ASTPtr view_table;
|
||||
if (view)
|
||||
@ -645,7 +636,6 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
analyze(shouldMoveToPrewhere());
|
||||
|
||||
bool need_analyze_again = false;
|
||||
|
||||
if (analysis_result.prewhere_constant_filter_description.always_false || analysis_result.prewhere_constant_filter_description.always_true)
|
||||
{
|
||||
if (analysis_result.prewhere_constant_filter_description.always_true)
|
||||
@ -654,7 +644,6 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
query.setExpression(ASTSelectQuery::Expression::PREWHERE, std::make_shared<ASTLiteral>(0u));
|
||||
need_analyze_again = true;
|
||||
}
|
||||
|
||||
if (analysis_result.where_constant_filter_description.always_false || analysis_result.where_constant_filter_description.always_true)
|
||||
{
|
||||
if (analysis_result.where_constant_filter_description.always_true)
|
||||
|
@ -430,8 +430,9 @@ MergeTreeSetIndex::MergeTreeSetIndex(const Columns & set_elements, std::vector<K
|
||||
SortDescription sort_description;
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
{
|
||||
block_to_sort.insert({ordered_set[i], nullptr, ordered_set[i]->getName()});
|
||||
sort_description.emplace_back(ordered_set[i]->getName(), 1, 1);
|
||||
String column_name = "_" + toString(i);
|
||||
block_to_sort.insert({ordered_set[i], nullptr, column_name});
|
||||
sort_description.emplace_back(column_name, 1, 1);
|
||||
}
|
||||
|
||||
sortBlock(block_to_sort, sort_description);
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <string_view>
|
||||
|
||||
#include <Parsers/ExpressionListParsers.h>
|
||||
#include <Parsers/ParserSetQuery.h>
|
||||
|
||||
#include <Parsers/ASTAsterisk.h>
|
||||
#include <Parsers/ASTExpressionList.h>
|
||||
@ -9,6 +10,7 @@
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||
#include <Parsers/ASTSubquery.h>
|
||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||
@ -603,6 +605,13 @@ bool ParserTableFunctionExpression::parseImpl(Pos & pos, ASTPtr & node, Expected
|
||||
{
|
||||
if (ParserTableFunctionView().parse(pos, node, expected))
|
||||
return true;
|
||||
ParserKeyword s_settings("SETTINGS");
|
||||
if (s_settings.ignore(pos, expected))
|
||||
{
|
||||
ParserSetQuery parser_settings(true);
|
||||
if (parser_settings.parse(pos, node, expected))
|
||||
return true;
|
||||
}
|
||||
return elem_parser.parse(pos, node, expected);
|
||||
}
|
||||
|
||||
|
@ -282,7 +282,7 @@ public:
|
||||
*
|
||||
* SelectQueryInfo is required since the stage can depends on the query
|
||||
* (see Distributed() engine and optimize_skip_unused_shards,
|
||||
* see also MergeTree engine and allow_experimental_projection_optimization).
|
||||
* see also MergeTree engine and projection optimization).
|
||||
* And to store optimized cluster (after optimize_skip_unused_shards).
|
||||
* It will also store needed stuff for projection query pipeline.
|
||||
*
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <base/range.h>
|
||||
#include <Interpreters/castColumn.h>
|
||||
#include <DataTypes/DataTypeNothing.h>
|
||||
#include <bit>
|
||||
|
||||
#ifdef __SSE2__
|
||||
#include <emmintrin.h>
|
||||
@ -473,7 +474,7 @@ size_t numZerosInTail(const UInt8 * begin, const UInt8 * end)
|
||||
count += 64;
|
||||
else
|
||||
{
|
||||
count += __builtin_clzll(val);
|
||||
count += std::countl_zero(val);
|
||||
return count;
|
||||
}
|
||||
}
|
||||
@ -507,7 +508,7 @@ size_t numZerosInTail(const UInt8 * begin, const UInt8 * end)
|
||||
count += 64;
|
||||
else
|
||||
{
|
||||
count += __builtin_clzll(val);
|
||||
count += std::countl_zero(val);
|
||||
return count;
|
||||
}
|
||||
}
|
||||
@ -531,7 +532,7 @@ size_t MergeTreeRangeReader::ReadResult::numZerosInTail(const UInt8 * begin, con
|
||||
|
||||
size_t count = 0;
|
||||
|
||||
#if defined(__SSE2__) && defined(__POPCNT__)
|
||||
#if defined(__SSE2__)
|
||||
const __m128i zero16 = _mm_setzero_si128();
|
||||
while (end - begin >= 64)
|
||||
{
|
||||
@ -555,7 +556,7 @@ size_t MergeTreeRangeReader::ReadResult::numZerosInTail(const UInt8 * begin, con
|
||||
count += 64;
|
||||
else
|
||||
{
|
||||
count += __builtin_clzll(val);
|
||||
count += std::countl_zero(val);
|
||||
return count;
|
||||
}
|
||||
}
|
||||
@ -583,7 +584,7 @@ size_t MergeTreeRangeReader::ReadResult::numZerosInTail(const UInt8 * begin, con
|
||||
count += 64;
|
||||
else
|
||||
{
|
||||
count += __builtin_clzll(val);
|
||||
count += std::countl_zero(val);
|
||||
return count;
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,10 @@
|
||||
#include <TableFunctions/TableFunctionFactory.h>
|
||||
#include <TableFunctions/parseColumnsListForTableFunction.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTIdentifier.h>
|
||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Storages/checkAndGetLiteralArgument.h>
|
||||
#include <Storages/StorageExecutable.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
@ -48,7 +51,7 @@ void TableFunctionExecutable::parseArguments(const ASTPtr & ast_function, Contex
|
||||
std::vector<String> script_name_with_arguments;
|
||||
boost::split(script_name_with_arguments, script_name_with_arguments_value, [](char c){ return c == ' '; });
|
||||
|
||||
script_name = script_name_with_arguments[0];
|
||||
script_name = std::move(script_name_with_arguments[0]);
|
||||
script_name_with_arguments.erase(script_name_with_arguments.begin());
|
||||
arguments = std::move(script_name_with_arguments);
|
||||
format = checkAndGetLiteralArgument<String>(args[1], "format");
|
||||
@ -56,14 +59,26 @@ void TableFunctionExecutable::parseArguments(const ASTPtr & ast_function, Contex
|
||||
|
||||
for (size_t i = 3; i < args.size(); ++i)
|
||||
{
|
||||
ASTPtr query = args[i]->children.at(0);
|
||||
if (!query->as<ASTSelectWithUnionQuery>())
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD,
|
||||
"Table function '{}' argument is invalid input query {}",
|
||||
getName(),
|
||||
query->formatForErrorMessage());
|
||||
|
||||
input_queries.emplace_back(std::move(query));
|
||||
if (args[i]->as<ASTSetQuery>())
|
||||
{
|
||||
settings_query = std::move(args[i]);
|
||||
}
|
||||
else
|
||||
{
|
||||
ASTPtr query = args[i]->children.at(0);
|
||||
if (query->as<ASTSelectWithUnionQuery>())
|
||||
{
|
||||
input_queries.emplace_back(std::move(query));
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::UNSUPPORTED_METHOD,
|
||||
"Table function '{}' argument is invalid {}",
|
||||
getName(),
|
||||
args[i]->formatForErrorMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -79,6 +94,8 @@ StoragePtr TableFunctionExecutable::executeImpl(const ASTPtr & /*ast_function*/,
|
||||
ExecutableSettings settings;
|
||||
settings.script_name = script_name;
|
||||
settings.script_arguments = arguments;
|
||||
if (settings_query != nullptr)
|
||||
settings.applyChanges(settings_query->as<ASTSetQuery>()->changes);
|
||||
|
||||
auto storage = std::make_shared<StorageExecutable>(storage_id, format, settings, input_queries, getActualTableStructure(context), ConstraintsDescription{});
|
||||
storage->startup();
|
||||
|
@ -6,6 +6,7 @@ namespace DB
|
||||
{
|
||||
|
||||
class Context;
|
||||
class ASTSetQuery;
|
||||
|
||||
/* executable(script_name_optional_arguments, format, structure, input_query) - creates a temporary storage from executable file
|
||||
*
|
||||
@ -32,5 +33,6 @@ private:
|
||||
String format;
|
||||
String structure;
|
||||
std::vector<ASTPtr> input_queries;
|
||||
ASTPtr settings_query = nullptr;
|
||||
};
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ def _can_export_binaries(build_config: BuildConfig) -> bool:
|
||||
return False
|
||||
if build_config["bundled"] != "bundled":
|
||||
return False
|
||||
if build_config["splitted"] == "splitted":
|
||||
if build_config["libraries"] == "shared":
|
||||
return False
|
||||
if build_config["sanitizer"] != "":
|
||||
return True
|
||||
@ -68,8 +68,8 @@ def get_packager_cmd(
|
||||
cmd += f" --build-type={build_config['build_type']}"
|
||||
if build_config["sanitizer"]:
|
||||
cmd += f" --sanitizer={build_config['sanitizer']}"
|
||||
if build_config["splitted"] == "splitted":
|
||||
cmd += " --split-binary"
|
||||
if build_config["libraries"] == "shared":
|
||||
cmd += " --shared-libraries"
|
||||
if build_config["tidy"] == "enable":
|
||||
cmd += " --clang-tidy"
|
||||
|
||||
|
@ -37,7 +37,7 @@ class BuildResult:
|
||||
build_type,
|
||||
sanitizer,
|
||||
bundled,
|
||||
splitted,
|
||||
libraries,
|
||||
status,
|
||||
elapsed_seconds,
|
||||
with_coverage,
|
||||
@ -46,7 +46,7 @@ class BuildResult:
|
||||
self.build_type = build_type
|
||||
self.sanitizer = sanitizer
|
||||
self.bundled = bundled
|
||||
self.splitted = splitted
|
||||
self.libraries = libraries
|
||||
self.status = status
|
||||
self.elapsed_seconds = elapsed_seconds
|
||||
self.with_coverage = with_coverage
|
||||
@ -91,7 +91,7 @@ def get_failed_report(
|
||||
build_type="unknown",
|
||||
sanitizer="unknown",
|
||||
bundled="unknown",
|
||||
splitted="unknown",
|
||||
libraries="unknown",
|
||||
status=message,
|
||||
elapsed_seconds=0,
|
||||
with_coverage=False,
|
||||
@ -108,7 +108,7 @@ def process_report(
|
||||
build_type=build_config["build_type"],
|
||||
sanitizer=build_config["sanitizer"],
|
||||
bundled=build_config["bundled"],
|
||||
splitted=build_config["splitted"],
|
||||
libraries=build_config["libraries"],
|
||||
status="success" if build_report["status"] else "failure",
|
||||
elapsed_seconds=build_report["elapsed_seconds"],
|
||||
with_coverage=False,
|
||||
|
@ -206,7 +206,8 @@ Merge it only if you intend to backport changes to the target branch, otherwise
|
||||
)
|
||||
self.cherrypick_pr.add_to_labels(Labels.LABEL_CHERRYPICK)
|
||||
self.cherrypick_pr.add_to_labels(Labels.LABEL_DO_NOT_TEST)
|
||||
self.cherrypick_pr.add_to_assignees(self.pr.assignee)
|
||||
if self.pr.assignee is not None:
|
||||
self.cherrypick_pr.add_to_assignees(self.pr.assignee)
|
||||
self.cherrypick_pr.add_to_assignees(self.pr.user)
|
||||
|
||||
def create_backport(self):
|
||||
@ -238,7 +239,8 @@ Merge it only if you intend to backport changes to the target branch, otherwise
|
||||
head=self.backport_branch,
|
||||
)
|
||||
self.backport_pr.add_to_labels(Labels.LABEL_BACKPORT)
|
||||
self.backport_pr.add_to_assignees(self.pr.assignee)
|
||||
if self.pr.assignee is not None:
|
||||
self.cherrypick_pr.add_to_assignees(self.pr.assignee)
|
||||
self.backport_pr.add_to_assignees(self.pr.user)
|
||||
|
||||
@property
|
||||
|
@ -14,7 +14,7 @@ CI_CONFIG = {
|
||||
"package_type": "deb",
|
||||
"static_binary_name": "amd64",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"additional_pkgs": True,
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
@ -25,7 +25,7 @@ CI_CONFIG = {
|
||||
"sanitizer": "",
|
||||
"package_type": "coverity",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
"official": False,
|
||||
@ -37,7 +37,7 @@ CI_CONFIG = {
|
||||
"package_type": "deb",
|
||||
"static_binary_name": "aarch64",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"additional_pkgs": True,
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
@ -48,7 +48,7 @@ CI_CONFIG = {
|
||||
"sanitizer": "address",
|
||||
"package_type": "deb",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
},
|
||||
@ -58,7 +58,7 @@ CI_CONFIG = {
|
||||
"sanitizer": "undefined",
|
||||
"package_type": "deb",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
},
|
||||
@ -68,7 +68,7 @@ CI_CONFIG = {
|
||||
"sanitizer": "thread",
|
||||
"package_type": "deb",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
},
|
||||
@ -78,7 +78,7 @@ CI_CONFIG = {
|
||||
"sanitizer": "memory",
|
||||
"package_type": "deb",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
},
|
||||
@ -88,7 +88,7 @@ CI_CONFIG = {
|
||||
"sanitizer": "",
|
||||
"package_type": "deb",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
},
|
||||
@ -98,7 +98,7 @@ CI_CONFIG = {
|
||||
"sanitizer": "",
|
||||
"package_type": "binary",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
},
|
||||
@ -109,17 +109,17 @@ CI_CONFIG = {
|
||||
"package_type": "binary",
|
||||
"static_binary_name": "debug-amd64",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"tidy": "enable",
|
||||
"with_coverage": False,
|
||||
},
|
||||
"binary_splitted": {
|
||||
"binary_shared": {
|
||||
"compiler": "clang-14",
|
||||
"build_type": "",
|
||||
"sanitizer": "",
|
||||
"package_type": "binary",
|
||||
"bundled": "bundled",
|
||||
"splitted": "splitted",
|
||||
"libraries": "shared",
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
},
|
||||
@ -130,7 +130,7 @@ CI_CONFIG = {
|
||||
"package_type": "binary",
|
||||
"static_binary_name": "macos",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
},
|
||||
@ -140,7 +140,7 @@ CI_CONFIG = {
|
||||
"sanitizer": "",
|
||||
"package_type": "binary",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
},
|
||||
@ -151,7 +151,7 @@ CI_CONFIG = {
|
||||
"package_type": "binary",
|
||||
"static_binary_name": "freebsd",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
},
|
||||
@ -162,7 +162,7 @@ CI_CONFIG = {
|
||||
"package_type": "binary",
|
||||
"static_binary_name": "macos-aarch64",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
},
|
||||
@ -173,7 +173,7 @@ CI_CONFIG = {
|
||||
"package_type": "binary",
|
||||
"static_binary_name": "powerpc64le",
|
||||
"bundled": "bundled",
|
||||
"splitted": "unsplitted",
|
||||
"libraries": "static",
|
||||
"tidy": "disable",
|
||||
"with_coverage": False,
|
||||
},
|
||||
@ -192,7 +192,7 @@ CI_CONFIG = {
|
||||
],
|
||||
"ClickHouse special build check": [
|
||||
"binary_tidy",
|
||||
"binary_splitted",
|
||||
"binary_shared",
|
||||
"binary_darwin",
|
||||
"binary_aarch64",
|
||||
"binary_freebsd",
|
||||
@ -297,7 +297,7 @@ CI_CONFIG = {
|
||||
"required_build": "package_release",
|
||||
},
|
||||
"Split build smoke test": {
|
||||
"required_build": "binary_splitted",
|
||||
"required_build": "binary_shared",
|
||||
},
|
||||
"Unit tests (release-clang)": {
|
||||
"required_build": "binary_release",
|
||||
|
@ -32,7 +32,18 @@ class ClickHouseHelper:
|
||||
}
|
||||
|
||||
for i in range(5):
|
||||
response = requests.post(url, params=params, data=json_str, headers=auth)
|
||||
try:
|
||||
response = requests.post(
|
||||
url, params=params, data=json_str, headers=auth
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(
|
||||
"Received exception while sending data to %s on %s attempt: %s",
|
||||
url,
|
||||
i,
|
||||
e,
|
||||
)
|
||||
continue
|
||||
|
||||
logging.info("Response content '%s'", response.content)
|
||||
|
||||
|
@ -290,7 +290,7 @@ tr:hover td {{filter: brightness(95%);}}
|
||||
<th>Build type</th>
|
||||
<th>Sanitizer</th>
|
||||
<th>Bundled</th>
|
||||
<th>Splitted</th>
|
||||
<th>Libraries</th>
|
||||
<th>Status</th>
|
||||
<th>Build log</th>
|
||||
<th>Build time</th>
|
||||
@ -335,7 +335,7 @@ def create_build_html_report(
|
||||
row += "<td>{}</td>".format("none")
|
||||
|
||||
row += "<td>{}</td>".format(build_result.bundled)
|
||||
row += "<td>{}</td>".format(build_result.splitted)
|
||||
row += "<td>{}</td>".format(build_result.libraries)
|
||||
|
||||
if build_result.status:
|
||||
style = _get_status_style(build_result.status)
|
||||
|
@ -46,5 +46,19 @@
|
||||
"test_storage_s3/test.py::test_url_reconnect_in_the_middle",
|
||||
"test_system_metrics/test.py::test_readonly_metrics",
|
||||
"test_system_replicated_fetches/test.py::test_system_replicated_fetches",
|
||||
"test_zookeeper_config_load_balancing/test.py::test_round_robin"
|
||||
"test_zookeeper_config_load_balancing/test.py::test_round_robin",
|
||||
|
||||
"test_tlsv1_3/test.py::test_https",
|
||||
"test_tlsv1_3/test.py::test_https_wrong_cert",
|
||||
"test_tlsv1_3/test.py::test_https_non_ssl_auth",
|
||||
"test_tlsv1_3/test.py::test_create_user",
|
||||
"test_user_ip_restrictions/test.py::test_ipv4",
|
||||
"test_user_ip_restrictions/test.py::test_ipv6",
|
||||
"test_ssl_cert_authentication/test.py::test_https",
|
||||
"test_ssl_cert_authentication/test.py::test_https_wrong_cert",
|
||||
"test_ssl_cert_authentication/test.py::test_https_non_ssl_auth",
|
||||
"test_ssl_cert_authentication/test.py::test_create_user",
|
||||
"test_grpc_protocol_ssl/test.py::test_secure_channel",
|
||||
"test_grpc_protocol_ssl/test.py::test_insecure_channel",
|
||||
"test_grpc_protocol_ssl/test.py::test_wrong_client_certificate"
|
||||
]
|
||||
|
@ -163,6 +163,19 @@ def test_executable_function_input_multiple_pipes_python(started_cluster):
|
||||
assert actual == expected
|
||||
|
||||
|
||||
def test_executable_function_input_slow_python_timeout_increased(started_cluster):
|
||||
skip_test_msan(node)
|
||||
query = "SELECT * FROM executable('input_slow.py', 'TabSeparated', 'value String', {source}, SETTINGS {settings})"
|
||||
settings = "command_termination_timeout = 26, command_read_timeout = 26000, command_write_timeout = 26000"
|
||||
assert node.query(query.format(source="(SELECT 1)", settings=settings)) == "Key 1\n"
|
||||
assert (
|
||||
node.query(
|
||||
query.format(source="(SELECT id FROM test_data_table)", settings=settings)
|
||||
)
|
||||
== "Key 0\nKey 1\nKey 2\n"
|
||||
)
|
||||
|
||||
|
||||
def test_executable_storage_no_input_bash(started_cluster):
|
||||
skip_test_msan(node)
|
||||
node.query("DROP TABLE IF EXISTS test_table")
|
||||
|
@ -5,7 +5,8 @@ import grpc
|
||||
from helpers.cluster import ClickHouseCluster, run_and_check
|
||||
|
||||
GRPC_PORT = 9100
|
||||
NODE_IP = "10.5.172.77" # It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf).
|
||||
# It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf).
|
||||
NODE_IP = "10.5.172.77" # Never copy-paste this line
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
DEFAULT_ENCODING = "utf-8"
|
||||
|
||||
|
@ -165,10 +165,7 @@ def test_deduplication_while_move(started_cluster):
|
||||
assert TSV(
|
||||
n.query(
|
||||
"SELECT count() FROM test_deduplication_d",
|
||||
settings={
|
||||
"allow_experimental_query_deduplication": 1,
|
||||
"allow_experimental_projection_optimization": 1,
|
||||
},
|
||||
settings={"allow_experimental_query_deduplication": 1},
|
||||
)
|
||||
) == TSV("2")
|
||||
|
||||
|
@ -5,7 +5,8 @@ import ssl
|
||||
import os.path
|
||||
|
||||
HTTPS_PORT = 8443
|
||||
NODE_IP = "10.5.172.77" # It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf).
|
||||
# It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf).
|
||||
NODE_IP = "10.5.172.77" # Never copy-paste this line
|
||||
NODE_IP_WITH_HTTPS_PORT = NODE_IP + ":" + str(HTTPS_PORT)
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
@ -0,0 +1,3 @@
|
||||
<clickhouse>
|
||||
<interserver_listen_host replace="replace">10.0.0.10</interserver_listen_host>
|
||||
</clickhouse>
|
@ -0,0 +1,3 @@
|
||||
<clickhouse>
|
||||
<interserver_listen_host remove="remove"></interserver_listen_host>
|
||||
</clickhouse>
|
@ -0,0 +1,55 @@
|
||||
"""Test Interserver responses on configured IP."""
|
||||
from pathlib import Path
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
import requests
|
||||
import socket
|
||||
import time
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
INTERSERVER_LISTEN_HOST = "10.0.0.10"
|
||||
INTERSERVER_HTTP_PORT = 9009
|
||||
|
||||
node_with_interserver_listen_host = cluster.add_instance(
|
||||
"node_with_interserver_listen_host",
|
||||
main_configs=["configs/config.d/interserver-listen-host.xml"],
|
||||
ipv4_address=INTERSERVER_LISTEN_HOST, # used to configure acc. interface in test container
|
||||
ipv6_address="2001:3984:3989::1:1000",
|
||||
)
|
||||
|
||||
node_without_interserver_listen_host = cluster.add_instance(
|
||||
"node_without_interserver_listen_host",
|
||||
main_configs=["configs/config.d/no-interserver-listen-host.xml"],
|
||||
ipv6_address="2001:3984:3989::2:1000",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def start_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_request_to_node_with_interserver_listen_host(start_cluster):
|
||||
time.sleep(5) # waiting for interserver listener to start
|
||||
response_interserver = requests.get(
|
||||
f"http://{INTERSERVER_LISTEN_HOST}:{INTERSERVER_HTTP_PORT}"
|
||||
)
|
||||
response_client = requests.get(
|
||||
f"http://{node_without_interserver_listen_host.ip_address}:8123"
|
||||
)
|
||||
assert response_interserver.status_code == 200
|
||||
assert "Ok." in response_interserver.text
|
||||
assert response_client.status_code == 200
|
||||
|
||||
|
||||
def test_request_to_node_without_interserver_listen_host(start_cluster):
|
||||
response = requests.get(
|
||||
f"http://{node_without_interserver_listen_host.ip_address}:{INTERSERVER_HTTP_PORT}"
|
||||
)
|
||||
assert response.status_code == 200
|
@ -5,7 +5,8 @@ import ssl
|
||||
import os.path
|
||||
|
||||
HTTPS_PORT = 8443
|
||||
NODE_IP = "10.5.172.77" # It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf).
|
||||
# It's important for the node to work at this IP because 'server-cert.pem' requires that (see server-ext.cnf).
|
||||
NODE_IP = "10.5.172.77" # Never copy-paste this line
|
||||
NODE_IP_WITH_HTTPS_PORT = NODE_IP + ":" + str(HTTPS_PORT)
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
@ -8,47 +8,56 @@ node_ipv4 = cluster.add_instance(
|
||||
"node_ipv4",
|
||||
main_configs=[],
|
||||
user_configs=["configs/users_ipv4.xml"],
|
||||
ipv4_address="10.5.172.77",
|
||||
ipv4_address="10.5.172.77", # Never copy-paste this line
|
||||
)
|
||||
client_ipv4_ok = cluster.add_instance(
|
||||
"client_ipv4_ok", main_configs=[], user_configs=[], ipv4_address="10.5.172.10"
|
||||
"client_ipv4_ok",
|
||||
main_configs=[],
|
||||
user_configs=[],
|
||||
ipv4_address="10.5.172.10", # Never copy-paste this line
|
||||
)
|
||||
client_ipv4_ok_direct = cluster.add_instance(
|
||||
"client_ipv4_ok_direct", main_configs=[], user_configs=[], ipv4_address="10.5.173.1"
|
||||
"client_ipv4_ok_direct",
|
||||
main_configs=[],
|
||||
user_configs=[],
|
||||
ipv4_address="10.5.173.1", # Never copy-paste this line
|
||||
)
|
||||
client_ipv4_ok_full_mask = cluster.add_instance(
|
||||
"client_ipv4_ok_full_mask",
|
||||
main_configs=[],
|
||||
user_configs=[],
|
||||
ipv4_address="10.5.175.77",
|
||||
ipv4_address="10.5.175.77", # Never copy-paste this line
|
||||
)
|
||||
client_ipv4_bad = cluster.add_instance(
|
||||
"client_ipv4_bad", main_configs=[], user_configs=[], ipv4_address="10.5.173.10"
|
||||
"client_ipv4_bad",
|
||||
main_configs=[],
|
||||
user_configs=[],
|
||||
ipv4_address="10.5.173.10", # Never copy-paste this line
|
||||
)
|
||||
|
||||
node_ipv6 = cluster.add_instance(
|
||||
"node_ipv6",
|
||||
main_configs=["configs/config_ipv6.xml"],
|
||||
user_configs=["configs/users_ipv6.xml"],
|
||||
ipv6_address="2001:3984:3989::1:1000",
|
||||
ipv6_address="2001:3984:3989::1:1000", # Never copy-paste this line
|
||||
)
|
||||
client_ipv6_ok = cluster.add_instance(
|
||||
"client_ipv6_ok",
|
||||
main_configs=[],
|
||||
user_configs=[],
|
||||
ipv6_address="2001:3984:3989::5555",
|
||||
ipv6_address="2001:3984:3989::5555", # Never copy-paste this line
|
||||
)
|
||||
client_ipv6_ok_direct = cluster.add_instance(
|
||||
"client_ipv6_ok_direct",
|
||||
main_configs=[],
|
||||
user_configs=[],
|
||||
ipv6_address="2001:3984:3989::1:1111",
|
||||
ipv6_address="2001:3984:3989::1:1111", # Never copy-paste this line
|
||||
)
|
||||
client_ipv6_bad = cluster.add_instance(
|
||||
"client_ipv6_bad",
|
||||
main_configs=[],
|
||||
user_configs=[],
|
||||
ipv6_address="2001:3984:3989::1:1112",
|
||||
ipv6_address="2001:3984:3989::1:1112", # Never copy-paste this line
|
||||
)
|
||||
|
||||
|
||||
|
@ -28,3 +28,8 @@ WHERE number IN
|
||||
SELECT number
|
||||
FROM numbers(5)
|
||||
) order by label, number;
|
||||
|
||||
SELECT NULL FROM
|
||||
(SELECT [1048575, NULL] AS ax, 2147483648 AS c) t1 ARRAY JOIN ax
|
||||
INNER JOIN (SELECT NULL AS c) t2 USING (c);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: long, no-parallel
|
||||
# Tags: disabled
|
||||
# Tag: no-parallel - to heavy
|
||||
# Tag: long - to heavy
|
||||
|
||||
@ -33,14 +33,11 @@ $CLICKHOUSE_CLIENT "${client_opts[@]}" -nm -q "
|
||||
create materialized view mv_02232 to out_02232 as select * from in_02232;
|
||||
"
|
||||
|
||||
# 600 is the default timeout of clickhouse-test, and 30 is just a safe padding,
|
||||
# to avoid hung query check triggering
|
||||
insert_timeout=$((600-30))
|
||||
# Increase timeouts to avoid timeout during trying to send Log packet to
|
||||
# the remote side, when the socket is full.
|
||||
insert_client_opts=(
|
||||
--send_timeout "$insert_timeout"
|
||||
--receive_timeout "$insert_timeout"
|
||||
# Increase timeouts to avoid timeout during trying to send Log packet to
|
||||
# the remote side, when the socket is full.
|
||||
--send_timeout 86400
|
||||
--receive_timeout 86400
|
||||
)
|
||||
# 250 seconds is enough to trigger the query hung (even in debug build)
|
||||
#
|
||||
|
@ -1,11 +1,11 @@
|
||||
-- { echoOn }
|
||||
select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1, optimize_aggregation_in_order=0, optimize_read_in_order=0;
|
||||
select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings optimize_aggregation_in_order=0, optimize_read_in_order=0;
|
||||
15 480
|
||||
14 450
|
||||
13 420
|
||||
12 390
|
||||
11 360
|
||||
select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1, optimize_aggregation_in_order=1, optimize_read_in_order=1;
|
||||
select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings optimize_aggregation_in_order=1, optimize_read_in_order=1;
|
||||
15 480
|
||||
14 450
|
||||
13 420
|
||||
|
@ -6,8 +6,8 @@ create table test_agg_proj_02302 (x Int32, y Int32, PROJECTION x_plus_y (select
|
||||
insert into test_agg_proj_02302 select intDiv(number, 2), -intDiv(number,3) - 1 from numbers(100);
|
||||
|
||||
-- { echoOn }
|
||||
select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1, optimize_aggregation_in_order=0, optimize_read_in_order=0;
|
||||
select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings allow_experimental_projection_optimization=1, optimize_aggregation_in_order=1, optimize_read_in_order=1;
|
||||
select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings optimize_aggregation_in_order=0, optimize_read_in_order=0;
|
||||
select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings optimize_aggregation_in_order=1, optimize_read_in_order=1;
|
||||
|
||||
-- { echoOff }
|
||||
drop table test_agg_proj_02302;
|
||||
|
@ -85,11 +85,11 @@ select distinct 1 as x, 2 as y from distinct_in_order order by x;
|
||||
1 2
|
||||
select distinct 1 as x, 2 as y from distinct_in_order order by x, y;
|
||||
1 2
|
||||
select distinct a, 1 as x from distinct_in_order order by x;
|
||||
select a, x from (select distinct a, 1 as x from distinct_in_order order by x) order by a;
|
||||
0 1
|
||||
select distinct a, 1 as x, 2 as y from distinct_in_order order by a;
|
||||
0 1 2
|
||||
select distinct a, b, 1 as x, 2 as y from distinct_in_order order by a;
|
||||
select a, b, x, y from(select distinct a, b, 1 as x, 2 as y from distinct_in_order order by a) order by a, b;
|
||||
0 0 1 2
|
||||
0 1 1 2
|
||||
0 2 1 2
|
||||
@ -97,10 +97,10 @@ select distinct a, b, 1 as x, 2 as y from distinct_in_order order by a;
|
||||
0 4 1 2
|
||||
select distinct x, y from (select 1 as x, 2 as y from distinct_in_order order by x) order by y;
|
||||
1 2
|
||||
select distinct a, b, x, y from (select a, b, 1 as x, 2 as y from distinct_in_order order by a) order by b;
|
||||
select distinct a, b, x, y from (select a, b, 1 as x, 2 as y from distinct_in_order order by a) order by a, b;
|
||||
0 0 1 2
|
||||
0 1 1 2
|
||||
0 2 1 2
|
||||
0 3 1 2
|
||||
0 4 1 2
|
||||
-- check that distinct in order has the same result as ordinary distinct
|
||||
-- check that distinct in order returns the same result as ordinary distinct
|
||||
|
@ -48,16 +48,16 @@ select '-- distinct with constants columns';
|
||||
select distinct 1 as x, 2 as y from distinct_in_order;
|
||||
select distinct 1 as x, 2 as y from distinct_in_order order by x;
|
||||
select distinct 1 as x, 2 as y from distinct_in_order order by x, y;
|
||||
select distinct a, 1 as x from distinct_in_order order by x;
|
||||
select a, x from (select distinct a, 1 as x from distinct_in_order order by x) order by a;
|
||||
select distinct a, 1 as x, 2 as y from distinct_in_order order by a;
|
||||
select distinct a, b, 1 as x, 2 as y from distinct_in_order order by a;
|
||||
select a, b, x, y from(select distinct a, b, 1 as x, 2 as y from distinct_in_order order by a) order by a, b;
|
||||
select distinct x, y from (select 1 as x, 2 as y from distinct_in_order order by x) order by y;
|
||||
select distinct a, b, x, y from (select a, b, 1 as x, 2 as y from distinct_in_order order by a) order by b;
|
||||
select distinct a, b, x, y from (select a, b, 1 as x, 2 as y from distinct_in_order order by a) order by a, b;
|
||||
-- { echoOff }
|
||||
|
||||
drop table if exists distinct_in_order sync;
|
||||
|
||||
select '-- check that distinct in order has the same result as ordinary distinct';
|
||||
select '-- check that distinct in order returns the same result as ordinary distinct';
|
||||
drop table if exists distinct_cardinality_low sync;
|
||||
CREATE TABLE distinct_cardinality_low (low UInt64, medium UInt64, high UInt64) ENGINE MergeTree() ORDER BY (low, medium);
|
||||
INSERT INTO distinct_cardinality_low SELECT number % 1e1, number % 1e2, number % 1e3 FROM numbers_mt(1e4);
|
||||
|
@ -1,15 +0,0 @@
|
||||
-- Tags: long
|
||||
|
||||
-- https://github.com/ClickHouse/ClickHouse/issues/21557
|
||||
|
||||
SET max_pipeline_depth = 1000;
|
||||
|
||||
EXPLAIN SYNTAX
|
||||
WITH
|
||||
x AS ( SELECT number FROM numbers(10) ),
|
||||
cross_sales AS (
|
||||
SELECT 1 AS xx
|
||||
FROM x, x AS d1, x AS d2, x AS d3, x AS d4, x AS d5, x AS d6, x AS d7, x AS d8, x AS d9
|
||||
WHERE x.number = d9.number
|
||||
)
|
||||
SELECT xx FROM cross_sales WHERE xx = 2000; -- { serverError TOO_DEEP_PIPELINE }
|
@ -1,2 +1,3 @@
|
||||
ASCII text
|
||||
ASCII text
|
||||
ASCII text
|
||||
|
@ -26,8 +26,6 @@ EOF
|
||||
|
||||
run "$CLICKHOUSE_CLIENT -q 'SELECT 1' 2>$file_name"
|
||||
run "$CLICKHOUSE_CLIENT -q 'SELECT 1' --server_logs_file=$file_name"
|
||||
|
||||
# This query may fail due to bug in clickhouse-client.
|
||||
# run "$CLICKHOUSE_CLIENT -q 'SELECT 1' --server_logs_file=- >$file_name"
|
||||
run "$CLICKHOUSE_CLIENT -q 'SELECT 1' --server_logs_file=- >$file_name"
|
||||
|
||||
rm -f "$file_name"
|
||||
|
73
tests/queries/0_stateless/02361_fsync_profile_events.sh
Executable file
73
tests/queries/0_stateless/02361_fsync_profile_events.sh
Executable file
@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-s3-storage
|
||||
# Tag no-s3-storage: s3 does not have fsync
|
||||
|
||||
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CUR_DIR"/../shell_config.sh
|
||||
|
||||
$CLICKHOUSE_CLIENT -nm -q "
|
||||
drop table if exists data_fsync_pe;
|
||||
|
||||
create table data_fsync_pe (key Int) engine=MergeTree()
|
||||
order by key
|
||||
settings
|
||||
min_rows_for_wide_part=2,
|
||||
fsync_after_insert=1,
|
||||
fsync_part_directory=1;
|
||||
"
|
||||
|
||||
ret=1
|
||||
# Retry in case of fsync/fdatasync was too fast
|
||||
# (FileSyncElapsedMicroseconds/DirectorySyncElapsedMicroseconds was 0)
|
||||
for i in {1..100}; do
|
||||
query_id="insert-$i-$CLICKHOUSE_DATABASE"
|
||||
|
||||
$CLICKHOUSE_CLIENT --query_id "$query_id" -q "insert into data_fsync_pe values (1)"
|
||||
|
||||
read -r FileSync FileOpen DirectorySync FileSyncElapsedMicroseconds DirectorySyncElapsedMicroseconds <<<"$(
|
||||
$CLICKHOUSE_CLIENT -nm --param_query_id "$query_id" -q "
|
||||
system flush logs;
|
||||
|
||||
select
|
||||
ProfileEvents['FileSync'],
|
||||
ProfileEvents['FileOpen'],
|
||||
ProfileEvents['DirectorySync'],
|
||||
ProfileEvents['FileSyncElapsedMicroseconds']>0,
|
||||
ProfileEvents['DirectorySyncElapsedMicroseconds']>0
|
||||
from system.query_log
|
||||
where
|
||||
event_date >= yesterday() and
|
||||
current_database = currentDatabase() and
|
||||
query_id = {query_id:String} and
|
||||
type = 'QueryFinish';
|
||||
")"
|
||||
|
||||
# Non retriable errors
|
||||
if [[ $FileSync -ne 7 ]]; then
|
||||
exit 2
|
||||
fi
|
||||
# Check that all files was synced
|
||||
if [[ $FileSync -ne $FileOpen ]]; then
|
||||
exit 3
|
||||
fi
|
||||
if [[ $DirectorySync -ne 2 ]]; then
|
||||
exit 4
|
||||
fi
|
||||
|
||||
# Retriable errors
|
||||
if [[ $FileSyncElapsedMicroseconds -eq 0 ]]; then
|
||||
continue
|
||||
fi
|
||||
if [[ $DirectorySyncElapsedMicroseconds -eq 0 ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Everything is OK
|
||||
ret=0
|
||||
break
|
||||
done
|
||||
|
||||
$CLICKHOUSE_CLIENT -q "drop table data_fsync_pe"
|
||||
|
||||
exit $ret
|
2
tests/queries/0_stateless/02374_in_tuple_index.reference
Normal file
2
tests/queries/0_stateless/02374_in_tuple_index.reference
Normal file
@ -0,0 +1,2 @@
|
||||
2
|
||||
2
|
23
tests/queries/0_stateless/02374_in_tuple_index.sql
Normal file
23
tests/queries/0_stateless/02374_in_tuple_index.sql
Normal file
@ -0,0 +1,23 @@
|
||||
DROP TABLE IF EXISTS t_in_tuple_index;
|
||||
|
||||
CREATE TABLE t_in_tuple_index
|
||||
(
|
||||
`ID` String,
|
||||
`USER_ID` String,
|
||||
`PLATFORM` LowCardinality(String)
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY (PLATFORM, USER_ID, ID)
|
||||
SETTINGS index_granularity = 2048;
|
||||
|
||||
INSERT INTO t_in_tuple_index VALUES ('1', 33, 'insta'), ('2', 33, 'insta');
|
||||
|
||||
SELECT count()
|
||||
FROM t_in_tuple_index
|
||||
WHERE (PLATFORM, USER_ID) IN (('insta', '33'));
|
||||
|
||||
SELECT count()
|
||||
FROM t_in_tuple_index
|
||||
WHERE (PLATFORM, USER_ID) IN (('insta', '33'), ('insta', '22'));
|
||||
|
||||
DROP TABLE IF EXISTS t_in_tuple_index;
|
Loading…
Reference in New Issue
Block a user