Merge branch 'master' into liangliangpan-patch-fix-alter-ttl-error

This commit is contained in:
Alexander Gololobov 2023-01-09 15:17:12 +01:00 committed by GitHub
commit 321c987a51
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
373 changed files with 5119 additions and 2541 deletions

172
.gitmodules vendored
View File

@ -1,88 +1,88 @@
[submodule "contrib/poco"]
path = contrib/poco
url = https://github.com/ClickHouse/poco.git
url = https://github.com/ClickHouse/poco
branch = clickhouse
[submodule "contrib/zstd"]
path = contrib/zstd
url = https://github.com/facebook/zstd.git
url = https://github.com/facebook/zstd
[submodule "contrib/lz4"]
path = contrib/lz4
url = https://github.com/lz4/lz4.git
url = https://github.com/lz4/lz4
[submodule "contrib/librdkafka"]
path = contrib/librdkafka
url = https://github.com/ClickHouse/librdkafka.git
url = https://github.com/ClickHouse/librdkafka
[submodule "contrib/cctz"]
path = contrib/cctz
url = https://github.com/ClickHouse/cctz.git
url = https://github.com/ClickHouse/cctz
[submodule "contrib/zlib-ng"]
path = contrib/zlib-ng
url = https://github.com/ClickHouse/zlib-ng.git
url = https://github.com/ClickHouse/zlib-ng
branch = clickhouse-2.0.x
[submodule "contrib/googletest"]
path = contrib/googletest
url = https://github.com/google/googletest.git
url = https://github.com/google/googletest
[submodule "contrib/capnproto"]
path = contrib/capnproto
url = https://github.com/capnproto/capnproto.git
url = https://github.com/capnproto/capnproto
[submodule "contrib/double-conversion"]
path = contrib/double-conversion
url = https://github.com/google/double-conversion.git
url = https://github.com/google/double-conversion
[submodule "contrib/re2"]
path = contrib/re2
url = https://github.com/google/re2.git
url = https://github.com/google/re2
[submodule "contrib/mariadb-connector-c"]
path = contrib/mariadb-connector-c
url = https://github.com/ClickHouse/mariadb-connector-c.git
url = https://github.com/ClickHouse/mariadb-connector-c
[submodule "contrib/jemalloc"]
path = contrib/jemalloc
url = https://github.com/jemalloc/jemalloc.git
url = https://github.com/jemalloc/jemalloc
[submodule "contrib/unixodbc"]
path = contrib/unixodbc
url = https://github.com/ClickHouse/UnixODBC.git
url = https://github.com/ClickHouse/UnixODBC
[submodule "contrib/protobuf"]
path = contrib/protobuf
url = https://github.com/ClickHouse/protobuf.git
url = https://github.com/ClickHouse/protobuf
branch = v3.13.0.1
[submodule "contrib/boost"]
path = contrib/boost
url = https://github.com/ClickHouse/boost.git
url = https://github.com/ClickHouse/boost
[submodule "contrib/base64"]
path = contrib/base64
url = https://github.com/ClickHouse/Turbo-Base64.git
url = https://github.com/ClickHouse/Turbo-Base64
[submodule "contrib/arrow"]
path = contrib/arrow
url = https://github.com/ClickHouse/arrow.git
url = https://github.com/ClickHouse/arrow
branch = blessed/release-6.0.1
[submodule "contrib/thrift"]
path = contrib/thrift
url = https://github.com/apache/thrift.git
url = https://github.com/apache/thrift
[submodule "contrib/libhdfs3"]
path = contrib/libhdfs3
url = https://github.com/ClickHouse/libhdfs3.git
url = https://github.com/ClickHouse/libhdfs3
[submodule "contrib/libxml2"]
path = contrib/libxml2
url = https://github.com/GNOME/libxml2.git
url = https://github.com/GNOME/libxml2
[submodule "contrib/libgsasl"]
path = contrib/libgsasl
url = https://github.com/ClickHouse/libgsasl.git
url = https://github.com/ClickHouse/libgsasl
[submodule "contrib/snappy"]
path = contrib/snappy
url = https://github.com/ClickHouse/snappy.git
url = https://github.com/ClickHouse/snappy
[submodule "contrib/cppkafka"]
path = contrib/cppkafka
url = https://github.com/mfontanini/cppkafka.git
url = https://github.com/mfontanini/cppkafka
[submodule "contrib/brotli"]
path = contrib/brotli
url = https://github.com/google/brotli.git
url = https://github.com/google/brotli
[submodule "contrib/h3"]
path = contrib/h3
url = https://github.com/ClickHouse/h3
[submodule "contrib/libunwind"]
path = contrib/libunwind
url = https://github.com/ClickHouse/libunwind.git
url = https://github.com/ClickHouse/libunwind
[submodule "contrib/simdjson"]
path = contrib/simdjson
url = https://github.com/simdjson/simdjson.git
url = https://github.com/simdjson/simdjson
[submodule "contrib/rapidjson"]
path = contrib/rapidjson
url = https://github.com/ClickHouse/rapidjson
@ -94,68 +94,68 @@
url = https://github.com/ClickHouse/orc
[submodule "contrib/sparsehash-c11"]
path = contrib/sparsehash-c11
url = https://github.com/sparsehash/sparsehash-c11.git
url = https://github.com/sparsehash/sparsehash-c11
[submodule "contrib/grpc"]
path = contrib/grpc
url = https://github.com/ClickHouse/grpc.git
url = https://github.com/ClickHouse/grpc
branch = v1.33.2
[submodule "contrib/aws"]
path = contrib/aws
url = https://github.com/ClickHouse/aws-sdk-cpp.git
url = https://github.com/ClickHouse/aws-sdk-cpp
[submodule "aws-c-event-stream"]
path = contrib/aws-c-event-stream
url = https://github.com/awslabs/aws-c-event-stream.git
url = https://github.com/awslabs/aws-c-event-stream
[submodule "aws-c-common"]
path = contrib/aws-c-common
url = https://github.com/ClickHouse/aws-c-common.git
url = https://github.com/ClickHouse/aws-c-common
[submodule "aws-checksums"]
path = contrib/aws-checksums
url = https://github.com/awslabs/aws-checksums.git
url = https://github.com/awslabs/aws-checksums
[submodule "contrib/curl"]
path = contrib/curl
url = https://github.com/curl/curl.git
url = https://github.com/curl/curl
[submodule "contrib/icudata"]
path = contrib/icudata
url = https://github.com/ClickHouse/icudata.git
url = https://github.com/ClickHouse/icudata
[submodule "contrib/icu"]
path = contrib/icu
url = https://github.com/unicode-org/icu.git
url = https://github.com/unicode-org/icu
[submodule "contrib/flatbuffers"]
path = contrib/flatbuffers
url = https://github.com/ClickHouse/flatbuffers.git
url = https://github.com/ClickHouse/flatbuffers
[submodule "contrib/replxx"]
path = contrib/replxx
url = https://github.com/ClickHouse/replxx.git
url = https://github.com/ClickHouse/replxx
[submodule "contrib/avro"]
path = contrib/avro
url = https://github.com/ClickHouse/avro.git
url = https://github.com/ClickHouse/avro
ignore = untracked
[submodule "contrib/msgpack-c"]
path = contrib/msgpack-c
url = https://github.com/msgpack/msgpack-c
[submodule "contrib/libcpuid"]
path = contrib/libcpuid
url = https://github.com/ClickHouse/libcpuid.git
url = https://github.com/ClickHouse/libcpuid
[submodule "contrib/openldap"]
path = contrib/openldap
url = https://github.com/ClickHouse/openldap.git
url = https://github.com/ClickHouse/openldap
[submodule "contrib/AMQP-CPP"]
path = contrib/AMQP-CPP
url = https://github.com/ClickHouse/AMQP-CPP.git
url = https://github.com/ClickHouse/AMQP-CPP
[submodule "contrib/cassandra"]
path = contrib/cassandra
url = https://github.com/ClickHouse/cpp-driver.git
url = https://github.com/ClickHouse/cpp-driver
branch = clickhouse
[submodule "contrib/libuv"]
path = contrib/libuv
url = https://github.com/ClickHouse/libuv.git
url = https://github.com/ClickHouse/libuv
branch = clickhouse
[submodule "contrib/fmtlib"]
path = contrib/fmtlib
url = https://github.com/fmtlib/fmt.git
url = https://github.com/fmtlib/fmt
[submodule "contrib/sentry-native"]
path = contrib/sentry-native
url = https://github.com/ClickHouse/sentry-native.git
url = https://github.com/ClickHouse/sentry-native
[submodule "contrib/krb5"]
path = contrib/krb5
url = https://github.com/ClickHouse/krb5
@ -172,17 +172,17 @@
url = https://github.com/danlark1/miniselect
[submodule "contrib/rocksdb"]
path = contrib/rocksdb
url = https://github.com/ClickHouse/rocksdb.git
url = https://github.com/ClickHouse/rocksdb
[submodule "contrib/xz"]
path = contrib/xz
url = https://github.com/xz-mirror/xz
[submodule "contrib/abseil-cpp"]
path = contrib/abseil-cpp
url = https://github.com/abseil/abseil-cpp.git
url = https://github.com/abseil/abseil-cpp
branch = lts_2021_11_02
[submodule "contrib/dragonbox"]
path = contrib/dragonbox
url = https://github.com/ClickHouse/dragonbox.git
url = https://github.com/ClickHouse/dragonbox
[submodule "contrib/fast_float"]
path = contrib/fast_float
url = https://github.com/fastfloat/fast_float
@ -191,44 +191,44 @@
url = https://github.com/ClickHouse/libpq
[submodule "contrib/boringssl"]
path = contrib/boringssl
url = https://github.com/ClickHouse/boringssl.git
url = https://github.com/ClickHouse/boringssl
branch = unknown_branch_from_artur
[submodule "contrib/NuRaft"]
path = contrib/NuRaft
url = https://github.com/ClickHouse/NuRaft.git
url = https://github.com/ClickHouse/NuRaft
[submodule "contrib/nanodbc"]
path = contrib/nanodbc
url = https://github.com/ClickHouse/nanodbc.git
url = https://github.com/ClickHouse/nanodbc
[submodule "contrib/datasketches-cpp"]
path = contrib/datasketches-cpp
url = https://github.com/ClickHouse/datasketches-cpp.git
url = https://github.com/ClickHouse/datasketches-cpp
[submodule "contrib/yaml-cpp"]
path = contrib/yaml-cpp
url = https://github.com/ClickHouse/yaml-cpp.git
url = https://github.com/ClickHouse/yaml-cpp
[submodule "contrib/cld2"]
path = contrib/cld2
url = https://github.com/ClickHouse/cld2.git
url = https://github.com/ClickHouse/cld2
[submodule "contrib/libstemmer_c"]
path = contrib/libstemmer_c
url = https://github.com/ClickHouse/libstemmer_c.git
url = https://github.com/ClickHouse/libstemmer_c
[submodule "contrib/wordnet-blast"]
path = contrib/wordnet-blast
url = https://github.com/ClickHouse/wordnet-blast.git
url = https://github.com/ClickHouse/wordnet-blast
[submodule "contrib/lemmagen-c"]
path = contrib/lemmagen-c
url = https://github.com/ClickHouse/lemmagen-c.git
url = https://github.com/ClickHouse/lemmagen-c
[submodule "contrib/libpqxx"]
path = contrib/libpqxx
url = https://github.com/ClickHouse/libpqxx.git
url = https://github.com/ClickHouse/libpqxx
[submodule "contrib/sqlite-amalgamation"]
path = contrib/sqlite-amalgamation
url = https://github.com/azadkuh/sqlite-amalgamation
url = https://github.com/ClickHouse/sqlite-amalgamation
[submodule "contrib/s2geometry"]
path = contrib/s2geometry
url = https://github.com/ClickHouse/s2geometry.git
url = https://github.com/ClickHouse/s2geometry
[submodule "contrib/bzip2"]
path = contrib/bzip2
url = https://github.com/ClickHouse/bzip2.git
url = https://github.com/ClickHouse/bzip2
[submodule "contrib/magic_enum"]
path = contrib/magic_enum
url = https://github.com/Neargye/magic_enum
@ -237,93 +237,93 @@
url = https://github.com/google/libprotobuf-mutator
[submodule "contrib/sysroot"]
path = contrib/sysroot
url = https://github.com/ClickHouse/sysroot.git
url = https://github.com/ClickHouse/sysroot
[submodule "contrib/nlp-data"]
path = contrib/nlp-data
url = https://github.com/ClickHouse/nlp-data.git
url = https://github.com/ClickHouse/nlp-data
[submodule "contrib/hive-metastore"]
path = contrib/hive-metastore
url = https://github.com/ClickHouse/hive-metastore
[submodule "contrib/azure"]
path = contrib/azure
url = https://github.com/ClickHouse/azure-sdk-for-cpp.git
url = https://github.com/ClickHouse/azure-sdk-for-cpp
[submodule "contrib/minizip-ng"]
path = contrib/minizip-ng
url = https://github.com/zlib-ng/minizip-ng
[submodule "contrib/annoy"]
path = contrib/annoy
url = https://github.com/ClickHouse/annoy.git
url = https://github.com/ClickHouse/annoy
branch = ClickHouse-master
[submodule "contrib/qpl"]
path = contrib/qpl
url = https://github.com/intel/qpl.git
url = https://github.com/intel/qpl
[submodule "contrib/wyhash"]
path = contrib/wyhash
url = https://github.com/wangyi-fudan/wyhash.git
url = https://github.com/wangyi-fudan/wyhash
[submodule "contrib/hashidsxx"]
path = contrib/hashidsxx
url = https://github.com/schoentoon/hashidsxx.git
url = https://github.com/schoentoon/hashidsxx
[submodule "contrib/nats-io"]
path = contrib/nats-io
url = https://github.com/ClickHouse/nats.c.git
url = https://github.com/ClickHouse/nats.c
[submodule "contrib/vectorscan"]
path = contrib/vectorscan
url = https://github.com/VectorCamp/vectorscan.git
url = https://github.com/VectorCamp/vectorscan
[submodule "contrib/c-ares"]
path = contrib/c-ares
url = https://github.com/ClickHouse/c-ares
[submodule "contrib/llvm-project"]
path = contrib/llvm-project
url = https://github.com/ClickHouse/llvm-project.git
url = https://github.com/ClickHouse/llvm-project
[submodule "contrib/corrosion"]
path = contrib/corrosion
url = https://github.com/corrosion-rs/corrosion.git
url = https://github.com/corrosion-rs/corrosion
[submodule "contrib/morton-nd"]
path = contrib/morton-nd
url = https://github.com/morton-nd/morton-nd
[submodule "contrib/xxHash"]
path = contrib/xxHash
url = https://github.com/Cyan4973/xxHash.git
url = https://github.com/Cyan4973/xxHash
[submodule "contrib/crc32-s390x"]
path = contrib/crc32-s390x
url = https://github.com/linux-on-ibm-z/crc32-s390x.git
url = https://github.com/linux-on-ibm-z/crc32-s390x
[submodule "contrib/openssl"]
path = contrib/openssl
url = https://github.com/openssl/openssl
branch = openssl-3.0
[submodule "contrib/google-benchmark"]
path = contrib/google-benchmark
url = https://github.com/google/benchmark.git
url = https://github.com/google/benchmark
[submodule "contrib/libdivide"]
path = contrib/libdivide
url = https://github.com/ridiculousfish/libdivide.git
url = https://github.com/ridiculousfish/libdivide
[submodule "contrib/aws-crt-cpp"]
path = contrib/aws-crt-cpp
url = https://github.com/ClickHouse/aws-crt-cpp.git
url = https://github.com/ClickHouse/aws-crt-cpp
[submodule "contrib/aws-c-io"]
path = contrib/aws-c-io
url = https://github.com/ClickHouse/aws-c-io.git
url = https://github.com/ClickHouse/aws-c-io
[submodule "contrib/aws-c-mqtt"]
path = contrib/aws-c-mqtt
url = https://github.com/awslabs/aws-c-mqtt.git
url = https://github.com/awslabs/aws-c-mqtt
[submodule "contrib/aws-c-auth"]
path = contrib/aws-c-auth
url = https://github.com/awslabs/aws-c-auth.git
url = https://github.com/awslabs/aws-c-auth
[submodule "contrib/aws-c-cal"]
path = contrib/aws-c-cal
url = https://github.com/ClickHouse/aws-c-cal.git
url = https://github.com/ClickHouse/aws-c-cal
[submodule "contrib/aws-c-sdkutils"]
path = contrib/aws-c-sdkutils
url = https://github.com/awslabs/aws-c-sdkutils.git
url = https://github.com/awslabs/aws-c-sdkutils
[submodule "contrib/aws-c-http"]
path = contrib/aws-c-http
url = https://github.com/awslabs/aws-c-http.git
url = https://github.com/awslabs/aws-c-http
[submodule "contrib/aws-c-s3"]
path = contrib/aws-c-s3
url = https://github.com/awslabs/aws-c-s3.git
url = https://github.com/awslabs/aws-c-s3
[submodule "contrib/aws-c-compression"]
path = contrib/aws-c-compression
url = https://github.com/awslabs/aws-c-compression.git
url = https://github.com/awslabs/aws-c-compression
[submodule "contrib/aws-s2n-tls"]
path = contrib/aws-s2n-tls
url = https://github.com/aws/s2n-tls.git
url = https://github.com/ClickHouse/s2n-tls

53
base/base/IPv4andIPv6.h Normal file
View File

@ -0,0 +1,53 @@
#pragma once
#include <base/strong_typedef.h>
#include <base/extended_types.h>
#include <Common/memcmpSmall.h>
namespace DB
{
using IPv4 = StrongTypedef<UInt32, struct IPv4Tag>;
struct IPv6 : StrongTypedef<UInt128, struct IPv6Tag>
{
constexpr IPv6() = default;
constexpr explicit IPv6(const UInt128 & x) : StrongTypedef(x) {}
constexpr explicit IPv6(UInt128 && x) : StrongTypedef(std::move(x)) {}
IPv6 & operator=(const UInt128 & rhs) { StrongTypedef::operator=(rhs); return *this; }
IPv6 & operator=(UInt128 && rhs) { StrongTypedef::operator=(std::move(rhs)); return *this; }
bool operator<(const IPv6 & rhs) const
{
return
memcmp16(
reinterpret_cast<const unsigned char *>(toUnderType().items),
reinterpret_cast<const unsigned char *>(rhs.toUnderType().items)
) < 0;
}
bool operator>(const IPv6 & rhs) const
{
return
memcmp16(
reinterpret_cast<const unsigned char *>(toUnderType().items),
reinterpret_cast<const unsigned char *>(rhs.toUnderType().items)
) > 0;
}
bool operator==(const IPv6 & rhs) const
{
return
memcmp16(
reinterpret_cast<const unsigned char *>(toUnderType().items),
reinterpret_cast<const unsigned char *>(rhs.toUnderType().items)
) == 0;
}
bool operator<=(const IPv6 & rhs) const { return !operator>(rhs); }
bool operator>=(const IPv6 & rhs) const { return !operator<(rhs); }
bool operator!=(const IPv6 & rhs) const { return !operator==(rhs); }
};
}

View File

@ -2,6 +2,7 @@
#include "Decimal.h"
#include "UUID.h"
#include "IPv4andIPv6.h"
namespace DB
{
@ -35,6 +36,8 @@ TN_MAP(Float32)
TN_MAP(Float64)
TN_MAP(String)
TN_MAP(UUID)
TN_MAP(IPv4)
TN_MAP(IPv6)
TN_MAP(Decimal32)
TN_MAP(Decimal64)
TN_MAP(Decimal128)

2
contrib/aws-s2n-tls vendored

@ -1 +1 @@
Subproject commit 15d534e8a9ca1eda6bacee514e37d08b4f38a526
Subproject commit 0f1ba9e5c4a67cb3898de0c0b4f911d4194dc8de

@ -1 +1 @@
Subproject commit 9818baa5d027ffb26d57f810dc4c597d4946781c
Subproject commit 400ad7152a0c7ee07756d96ab4f6a8f6d1080916

View File

@ -63,10 +63,6 @@
"name": "clickhouse/integration-tests-runner",
"dependent": []
},
"docker/test/testflows/runner": {
"name": "clickhouse/testflows-runner",
"dependent": []
},
"docker/test/fasttest": {
"name": "clickhouse/fasttest",
"dependent": []

View File

@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="22.12.1.1752"
ARG VERSION="22.12.2.25"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# user/group precreated explicitly with fixed uid/gid on purpose.

View File

@ -21,7 +21,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="22.12.1.1752"
ARG VERSION="22.12.2.25"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# set non-empty deb_location_url url to create a docker image

View File

@ -9,6 +9,8 @@ RUN apt-get update \
netbase \
perl \
pv \
ripgrep \
zstd \
--yes --no-install-recommends
# Sanitizer options for services (clickhouse-server)

View File

@ -2,6 +2,13 @@
<profiles>
<default>
<max_execution_time>10</max_execution_time>
<max_memory_usage>10G</max_memory_usage>
<!--
Otherwise we will get the TOO_MANY_SIMULTANEOUS_QUERIES errors,
they are ok, but complicate debugging.
-->
<table_function_remote_max_addresses>200</table_function_remote_max_addresses>
<!--
Don't let the fuzzer change this setting (I've actually seen it
@ -20,6 +27,10 @@
<allow_experimental_analyzer>
<readonly/>
</allow_experimental_analyzer>
<table_function_remote_max_addresses>
<max>200</max>
</table_function_remote_max_addresses>
</constraints>
</default>
</profiles>

View File

@ -241,13 +241,29 @@ quit
# clickhouse-client. We don't check for existence of server process, because
# the process is still present while the server is terminating and not
# accepting the connections anymore.
if clickhouse-client --query "select 1 format Null"
for _ in {1..100}
do
if clickhouse-client --query "SELECT 1" 2> err
then
server_died=0
break
else
# There are legitimate queries leading to this error, example:
# SELECT * FROM remote('127.0.0.{1..255}', system, one)
if grep -F 'TOO_MANY_SIMULTANEOUS_QUERIES' err
then
# Give it some time to cool down
clickhouse-client --query "SHOW PROCESSLIST"
sleep 1
else
echo "Server live check returns $?"
cat err
server_died=1
break
fi
fi
done
# wait in background to call wait in foreground and ensure that the
# process is alive, since w/o job control this is the only way to obtain
@ -262,14 +278,17 @@ quit
if [ "$server_died" == 1 ]
then
# The server has died.
if ! grep --text -ao "Received signal.*\|Logical error.*\|Assertion.*failed\|Failed assertion.*\|.*runtime error: .*\|.*is located.*\|SUMMARY: AddressSanitizer:.*\|SUMMARY: MemorySanitizer:.*\|SUMMARY: ThreadSanitizer:.*\|.*_LIBCPP_ASSERT.*" server.log > description.txt
if ! rg -E --text -o 'Received signal.*|Logical error.*|Assertion.*failed|Failed assertion.*|.*runtime error: .*|.*is located.*|(SUMMARY|ERROR): [a-zA-Z]+Sanitizer:.*|.*_LIBCPP_ASSERT.*' server.log > description.txt
then
echo "Lost connection to server. See the logs." > description.txt
fi
if grep -E --text 'Sanitizer: (out-of-memory|failed to allocate)' description.txt
IS_SANITIZED=$(clickhouse-local --query "SELECT value LIKE '%-fsanitize=%' FROM system.build_options WHERE name = 'CXX_FLAGS'")
if [ "${IS_SANITIZED}" -eq "1" ] && rg -E --text 'Sanitizer: (out-of-memory|out of memory|failed to allocate|Child process was terminated by signal 9)' description.txt
then
# OOM of sanitizer is not a problem we can handle - treat it as success, but preserve the description.
# Why? Because sanitizers have the memory overhead, that is not controllable from inside clickhouse-server.
task_exit_code=0
echo "success" > status.txt
else
@ -299,8 +318,8 @@ quit
# which is confusing.
task_exit_code=$fuzzer_exit_code
echo "failure" > status.txt
{ grep --text -o "Found error:.*" fuzzer.log \
|| grep --text -ao "Exception:.*" fuzzer.log \
{ rg --text -o "Found error:.*" fuzzer.log \
|| rg --text -ao "Exception:.*" fuzzer.log \
|| echo "Fuzzer failed ($fuzzer_exit_code). See the logs." ; } \
| tail -1 > description.txt
fi
@ -310,7 +329,7 @@ quit
mv core.*.gz core.gz
fi
dmesg -T | grep -q -F -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE' && echo "OOM in dmesg" ||:
dmesg -T | rg -q -F -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE' && echo "OOM in dmesg" ||:
}
case "$stage" in
@ -348,7 +367,7 @@ if [ -f core.gz ]; then
CORE_LINK='<a href="core.gz">core.gz</a>'
fi
grep --text -F '<Fatal>' server.log > fatal.log ||:
rg --text -F '<Fatal>' server.log > fatal.log ||:
pigz server.log

View File

@ -49,7 +49,7 @@ RUN arch=${TARGETARCH:-amd64} \
&& curl -o mysql-odbc.rpm "https://cdn.mysql.com/archives/mysql-connector-odbc-8.0/mysql-connector-odbc-8.0.27-1.el8.${rarch}.rpm" \
&& rpm2archive mysql-odbc.rpm \
&& tar xf mysql-odbc.rpm.tgz -C / ./usr/lib64/ \
&& LINK_DIR=$(dpkg -L libodbc1 | grep '^/usr/lib/.*-linux-gnu/odbc$') \
&& LINK_DIR=$(dpkg -L libodbc1 | rg '^/usr/lib/.*-linux-gnu/odbc$') \
&& ln -s /usr/lib64/libmyodbc8a.so "$LINK_DIR" \
&& ln -s /usr/lib64/libmyodbc8a.so "$LINK_DIR"/libmyodbc.so

View File

@ -8,6 +8,7 @@ RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
RUN apt-get update \
&& env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
adduser \
ca-certificates \
bash \
btrfs-progs \

View File

@ -11,7 +11,7 @@ set -eu
for module; do
if [ "${module#-}" = "$module" ]; then
ip link show "$module" || true
lsmod | grep "$module" || true
lsmod | rg "$module" || true
fi
done

View File

@ -37,6 +37,8 @@ RUN apt-get update \
wget \
rustc \
cargo \
ripgrep \
zstd \
&& pip3 --no-cache-dir install 'clickhouse-driver==0.2.1' scipy \
&& apt-get purge --yes python3-dev g++ \
&& apt-get autoremove --yes \

View File

@ -193,7 +193,7 @@ function run_tests
then
# Run only explicitly specified tests, if any.
# shellcheck disable=SC2010
test_files=($(ls "$test_prefix" | grep "$CHPC_TEST_GREP" | xargs -I{} -n1 readlink -f "$test_prefix/{}"))
test_files=($(ls "$test_prefix" | rg "$CHPC_TEST_GREP" | xargs -I{} -n1 readlink -f "$test_prefix/{}"))
elif [ "$PR_TO_TEST" -ne 0 ] \
&& [ "$(wc -l < changed-test-definitions.txt)" -gt 0 ] \
&& [ "$(wc -l < other-changed-files.txt)" -eq 0 ]
@ -210,7 +210,7 @@ function run_tests
# We can filter out certain tests
if [ -v CHPC_TEST_GREP_EXCLUDE ]; then
# filter tests array in bash https://stackoverflow.com/a/40375567
filtered_test_files=( $( for i in ${test_files[@]} ; do echo $i ; done | grep -v ${CHPC_TEST_GREP_EXCLUDE} ) )
filtered_test_files=( $( for i in ${test_files[@]} ; do echo $i ; done | rg -v ${CHPC_TEST_GREP_EXCLUDE} ) )
test_files=("${filtered_test_files[@]}")
fi
@ -284,7 +284,7 @@ function run_tests
# Use awk because bash doesn't support floating point arithmetic.
profile_seconds=$(awk "BEGIN { print ($profile_seconds_left > 0 ? 10 : 0) }")
if [ "$(grep -c $(basename $test) changed-test-definitions.txt)" -gt 0 ]
if [ "$(rg -c $(basename $test) changed-test-definitions.txt)" -gt 0 ]
then
# Run all queries from changed test files to ensure that all new queries will be tested.
max_queries=0
@ -518,7 +518,7 @@ IFS=$'\n'
for prefix in $(cut -f1,2 "analyze/query-run-metrics-for-stats.tsv" | sort | uniq)
do
file="analyze/tmp/${prefix// /_}.tsv"
grep "^$prefix " "analyze/query-run-metrics-for-stats.tsv" > "$file" &
rg "^$prefix " "analyze/query-run-metrics-for-stats.tsv" > "$file" &
printf "%s\0\n" \
"clickhouse-local \
--file \"$file\" \
@ -1088,7 +1088,7 @@ do
# Build separate .svg flamegraph for each query.
# -F is somewhat unsafe because it might match not the beginning of the
# string, but this is unlikely and escaping the query for grep is a pain.
grep -F "$query " "report/stacks.$version.tsv" \
rg -F "$query " "report/stacks.$version.tsv" \
| cut -f 5- \
| sed 's/\t/ /g' \
| tee "report/tmp/$query_file.stacks.$version.tsv" \
@ -1117,7 +1117,7 @@ do
query_file=$(echo "$query" | cut -c-120 | sed 's/[/ ]/_/g')
# Ditto the above comment about -F.
grep -F "$query " "report/metric-deviation.$version.tsv" \
rg -F "$query " "report/metric-deviation.$version.tsv" \
| cut -f4- > "$query_file.$version.metrics.rep" &
done
done
@ -1132,8 +1132,8 @@ do
{
# The second grep is a heuristic for error messages like
# "socket.timeout: timed out".
grep -h -m2 -i '\(Exception\|Error\):[^:]' "$log" \
|| grep -h -m2 -i '^[^ ]\+: ' "$log" \
rg --no-filename --max-count=2 -i '\(Exception\|Error\):[^:]' "$log" \
|| rg --no-filename --max-count=2 -i '^[^ ]\+: ' "$log" \
|| head -2 "$log"
} | sed "s/^/$test\t/" >> run-errors.tsv ||:
done
@ -1180,7 +1180,7 @@ IFS=$'\n'
for prefix in $(cut -f1 "metrics/metrics.tsv" | sort | uniq)
do
file="metrics/$prefix.tsv"
grep "^$prefix " "metrics/metrics.tsv" | cut -f2- > "$file"
rg "^$prefix " "metrics/metrics.tsv" | cut -f2- > "$file"
gnuplot -e "
set datafile separator '\t';

View File

@ -3,7 +3,7 @@
set -e -x
# Choose random timezone for this test run
TZ="$(grep -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
echo "Choosen random timezone $TZ"
ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
@ -152,7 +152,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||:
fi
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
rg -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz ||:
# FIXME: remove once only github actions will be left
@ -163,8 +163,8 @@ if [[ -n "$WITH_COVERAGE" ]] && [[ "$WITH_COVERAGE" -eq 1 ]]; then
tar -chf /test_output/clickhouse_coverage.tar.gz /profraw ||:
fi
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server1.log ||:
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server2.log ||:
rg -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server1.log ||:
rg -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server2.log ||:
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
# FIXME: remove once only github actions will be left

View File

@ -4,7 +4,7 @@
set -e -x -a
# Choose random timezone for this test run.
TZ="$(grep -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
echo "Choosen random timezone $TZ"
ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
@ -167,7 +167,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||:
fi
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
rg -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server.log ||:
pigz < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.gz &
# Compress tables.
@ -213,8 +213,8 @@ fi
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server1.log ||:
grep -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server2.log ||:
rg -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server1.log ||:
rg -Fa "Fatal" /var/log/clickhouse-server/clickhouse-server2.log ||:
pigz < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.gz ||:
pigz < /var/log/clickhouse-server/clickhouse-server2.log > /test_output/clickhouse-server2.log.gz ||:
# FIXME: remove once only github actions will be left

View File

@ -77,11 +77,12 @@ EOL
local max_users_mem
max_users_mem=$((total_mem*30/100)) # 30%
echo "Setting max_memory_usage_for_user=$max_users_mem"
echo "Setting max_memory_usage_for_user=$max_users_mem and max_memory_usage for queries to 10G"
cat > /etc/clickhouse-server/users.d/max_memory_usage_for_user.xml <<EOL
<clickhouse>
<profiles>
<default>
<max_memory_usage>10G</max_memory_usage>
<max_memory_usage_for_user>${max_users_mem}</max_memory_usage_for_user>
</default>
</profiles>
@ -159,7 +160,7 @@ function start()
echo -e "Cannot start clickhouse-server\tFAIL" >> /test_output/test_results.tsv
cat /var/log/clickhouse-server/stdout.log
tail -n1000 /var/log/clickhouse-server/stderr.log
tail -n100000 /var/log/clickhouse-server/clickhouse-server.log | grep -F -v -e '<Warning> RaftInstance:' -e '<Information> RaftInstance' | tail -n1000
tail -n100000 /var/log/clickhouse-server/clickhouse-server.log | rg -F -v -e '<Warning> RaftInstance:' -e '<Information> RaftInstance' | tail -n1000
break
fi
# use root to match with current uid
@ -302,7 +303,7 @@ start
clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_output/test_results.tsv \
|| (echo -e 'Server failed to start (see application_errors.txt and clickhouse-server.clean.log)\tFAIL' >> /test_output/test_results.tsv \
&& grep -a "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt)
&& rg -a "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt)
stop
@ -312,8 +313,8 @@ stop
# Grep logs for sanitizer asserts, crashes and other critical errors
# Sanitizer asserts
grep -Fa "==================" /var/log/clickhouse-server/stderr.log | grep -v "in query:" >> /test_output/tmp
grep -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
rg -Fa "==================" /var/log/clickhouse-server/stderr.log | rg -v "in query:" >> /test_output/tmp
rg -Fa "WARNING" /var/log/clickhouse-server/stderr.log >> /test_output/tmp
zgrep -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
&& echo -e 'Sanitizer assert (in stderr.log)\tFAIL' >> /test_output/test_results.tsv \
|| echo -e 'No sanitizer asserts\tOK' >> /test_output/test_results.tsv
@ -363,7 +364,7 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
echo -e "Backward compatibility check\n"
echo "Get previous release tag"
previous_release_tag=$(clickhouse-client --version | grep -o "[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*" | get_previous_release_tag)
previous_release_tag=$(clickhouse-client --version | rg -o "[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*" | get_previous_release_tag)
echo $previous_release_tag
echo "Clone previous release repository"
@ -476,7 +477,7 @@ if [ "$DISABLE_BC_CHECK" -ne "1" ]; then
start 500
clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \
|| (echo -e 'Backward compatibility check: Server failed to start\tFAIL' >> /test_output/test_results.tsv \
&& grep -a "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log >> /test_output/bc_check_application_errors.txt)
&& rg -a "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log >> /test_output/bc_check_application_errors.txt)
clickhouse-client --query="SELECT 'Server version: ', version()"

View File

@ -0,0 +1,29 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v22.12.2.25-stable (c790cfd4465) FIXME as compared to v22.12.1.1752-stable (688e488e930)
#### Build/Testing/Packaging Improvement
* Backported in [#44381](https://github.com/ClickHouse/ClickHouse/issues/44381): In rare cases, we don't rebuild binaries, because another task with a similar prefix succeeded. E.g. `binary_darwin` didn't restart because `binary_darwin_aarch64`. [#44311](https://github.com/ClickHouse/ClickHouse/pull/44311) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#44561](https://github.com/ClickHouse/ClickHouse/issues/44561): Retry the integration tests on compressing errors. [#44529](https://github.com/ClickHouse/ClickHouse/pull/44529) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#44739](https://github.com/ClickHouse/ClickHouse/issues/44739): [#40651](https://github.com/ClickHouse/ClickHouse/issues/40651) [#41404](https://github.com/ClickHouse/ClickHouse/issues/41404). [#42126](https://github.com/ClickHouse/ClickHouse/pull/42126) ([Alexander Gololobov](https://github.com/davenger)).
* Backported in [#44764](https://github.com/ClickHouse/ClickHouse/issues/44764): Fix parsing of bad version from compatibility setting. [#44224](https://github.com/ClickHouse/ClickHouse/pull/44224) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#44435](https://github.com/ClickHouse/ClickHouse/issues/44435): Fix possible crash in case function `IN` with constant arguments was used as a constant argument together with `LowCardinality`. Fixes [#44221](https://github.com/ClickHouse/ClickHouse/issues/44221). [#44346](https://github.com/ClickHouse/ClickHouse/pull/44346) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Bump libdivide (to gain some new optimizations) [#44132](https://github.com/ClickHouse/ClickHouse/pull/44132) ([Azat Khuzhin](https://github.com/azat)).
* Add some settings under `compatibility` [#44209](https://github.com/ClickHouse/ClickHouse/pull/44209) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix deadlock in StorageSystemDatabases [#44272](https://github.com/ClickHouse/ClickHouse/pull/44272) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Get rid of global Git object [#44273](https://github.com/ClickHouse/ClickHouse/pull/44273) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Add check for submodules sanity [#44386](https://github.com/ClickHouse/ClickHouse/pull/44386) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Implement a custom central checkout action [#44399](https://github.com/ClickHouse/ClickHouse/pull/44399) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix crash on delete from materialized view [#44705](https://github.com/ClickHouse/ClickHouse/pull/44705) ([Alexander Gololobov](https://github.com/davenger)).

View File

@ -154,10 +154,6 @@ Runs [stateful functional tests](tests.md#functional-tests). Treat them in the s
Runs [integration tests](tests.md#integration-tests).
## Testflows Check
Runs some tests using Testflows test system. See [here](https://github.com/ClickHouse/ClickHouse/tree/master/tests/testflows#running-tests-locally) how to run them locally.
## Stress Test
Runs stateless functional tests concurrently from several clients to detect
concurrency-related errors. If it fails:

View File

@ -281,10 +281,6 @@ We also track test coverage but only for functional tests and only for clickhous
There is automated check for flaky tests. It runs all new tests 100 times (for functional tests) or 10 times (for integration tests). If at least single time the test failed, it is considered flaky.
## Testflows
[Testflows](https://testflows.com/) is an enterprise-grade open-source testing framework, which is used to test a subset of ClickHouse.
## Test Automation {#test-automation}
We run tests with [GitHub Actions](https://github.com/features/actions).

View File

@ -19,7 +19,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
(
Path String,
Time DateTime,
Value <Numeric_type>,
Value Float64,
Version <Numeric_type>
...
) ENGINE = GraphiteMergeTree(config_section)
@ -37,7 +37,7 @@ A table for the Graphite data should have the following columns for the followin
- Time of measuring the metric. Data type: `DateTime`.
- Value of the metric. Data type: any numeric.
- Value of the metric. Data type: `Float64`.
- Version of the metric. Data type: any numeric (ClickHouse saves the rows with the highest version or the last written if versions are the same. Other rows are deleted during the merge of data parts).
@ -65,7 +65,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
EventDate Date,
Path String,
Time DateTime,
Value <Numeric_type>,
Value Float64,
Version <Numeric_type>
...
) ENGINE [=] GraphiteMergeTree(date-column [, sampling_expression], (primary, key), index_granularity, config_section)

View File

@ -86,7 +86,18 @@ When creating a table, the following settings are applied:
[join_any_take_last_row](/docs/en/operations/settings/settings.md/#settings-join_any_take_last_row)
#### join_use_nulls
[persistent](/docs/en/operations/settings/settings.md/#persistent)
#### persistent
Disables persistency for the Join and [Set](/docs/en/engines/table-engines/special/set.md) table engines.
Reduces the I/O overhead. Suitable for scenarios that pursue performance and do not require persistence.
Possible values:
- 1 — Enabled.
- 0 — Disabled.
Default value: `1`.
The `Join`-engine tables cant be used in `GLOBAL JOIN` operations.

View File

@ -19,5 +19,16 @@ For a rough server restart, the block of data on the disk might be lost or damag
When creating a table, the following settings are applied:
- [persistent](../../../operations/settings/settings.md#persistent)
#### persistent
Disables persistency for the Set and [Join](/docs/en/engines/table-engines/special/join.md/#join) table engines.
Reduces the I/O overhead. Suitable for scenarios that pursue performance and do not require persistence.
Possible values:
- 1 — Enabled.
- 0 — Disabled.
Default value: `1`.

View File

@ -2441,19 +2441,6 @@ Result
└──────────────────────────┴───────┴───────────────────────────────────────────────────────┘
```
## persistent {#persistent}
Disables persistency for the [Set](../../engines/table-engines/special/set.md/#set) and [Join](../../engines/table-engines/special/join.md/#join) table engines.
Reduces the I/O overhead. Suitable for scenarios that pursue performance and do not require persistence.
Possible values:
- 1 — Enabled.
- 0 — Disabled.
Default value: `1`.
## allow_nullable_key {#allow-nullable-key}
Allows using of the [Nullable](../../sql-reference/data-types/nullable.md/#data_type-nullable)-typed values in a sorting and a primary key for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md/#table_engines-mergetree) tables.

View File

@ -11,6 +11,7 @@ Columns:
- `table` ([String](../../sql-reference/data-types/string.md)) — Table name.
- `name` ([String](../../sql-reference/data-types/string.md)) — Index name.
- `type` ([String](../../sql-reference/data-types/string.md)) — Index type.
- `type_full` ([String](../../sql-reference/data-types/string.md)) — Index type expression from create statement.
- `expr` ([String](../../sql-reference/data-types/string.md)) — Expression for the index calculation.
- `granularity` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The number of granules in the block.
- `data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The size of compressed data, in bytes.
@ -30,6 +31,7 @@ database: default
table: user_actions
name: clicks_idx
type: minmax
type_full: minmax
expr: clicks
granularity: 1
data_compressed_bytes: 58
@ -42,6 +44,7 @@ database: default
table: users
name: contacts_null_idx
type: minmax
type_full: minmax
expr: assumeNotNull(contacts_null)
granularity: 1
data_compressed_bytes: 58

View File

@ -806,7 +806,8 @@ Aliases: `dateSub`, `DATE_SUB`.
**Arguments**
- `unit` — The type of interval to subtract. [String](../../sql-reference/data-types/string.md).
- `unit` — The type of interval to subtract. Note: The unit should be unquoted.
Possible values:
- `second`

View File

@ -416,6 +416,7 @@ Functions:
- `dictGetDateTime`
- `dictGetUUID`
- `dictGetString`
- `dictGetIPv4`, `dictGetIPv6`
All these functions have the `OrDefault` modification. For example, `dictGetDateOrDefault`.

View File

@ -351,14 +351,16 @@ In all `multiSearch*` functions the number of needles should be less than 2<sup>
## match(haystack, pattern)
Checks whether the string matches the regular expression `pattern` in `re2` syntax. `Re2` has a more limited [syntax](https://github.com/google/re2/wiki/Syntax) than Perl regular expressions.
Checks whether string `haystack` matches the regular expression `pattern`. The pattern is an [re2 regular expression](https://github.com/google/re2/wiki/Syntax) which has a more limited syntax than Perl regular expressions.
Returns 0 if it does not match, or 1 if it matches.
Returns 1 in case of a match, and 0 otherwise.
Matching is based on UTF-8, e.g. `.` matches the Unicode code point `¥` which is represented in UTF-8 using two bytes. The regular expression must not contain null bytes.
If the haystack or pattern contain a sequence of bytes that are not valid UTF-8, then the behavior is undefined.
If the haystack or pattern contain a sequence of bytes that are not valid UTF-8, the behavior is undefined.
No automatic Unicode normalization is performed, if you need it you can use the [normalizeUTF8*()](https://clickhouse.com/docs/en/sql-reference/functions/string-functions/) functions for that.
Unlike re2's default behavior, `.` matches line breaks. To disable this, prepend the pattern with `(?-s)`.
For patterns to search for substrings in a string, it is better to use LIKE or position, since they work much faster.
## multiMatchAny(haystack, \[pattern<sub>1</sub>, pattern<sub>2</sub>, …, pattern<sub>n</sub>\])

View File

@ -48,16 +48,10 @@ sidebar_position: 10
Если вы заглянете в раздел, где публикуются установочные пакеты ClickHouse, вы увидите там следующие виды пакетов:
1. `testing`
2. `prestable`
3. `stable`
4. `lts` (long-term support)
1. `stable`
2. `lts` (long-term support)
Как уже упоминалось выше, тестовые релизы (`testing`) стоит использовать для раннего обнаружения ошибок, в рабочей среде мы не рекомендуем использовать такие релизы, поскольку они еще не протестированы так же тщательно, как остальные.
Подготовительные (`prestable`) — это релизы-кандидаты, которые с большой вероятностью скоро будут доведены до стабильного состояния. Вы можете использовать их в тестовой среде и сообщать нам об обнаруженных ошибках.
В рабочей среде мы рекомендуем использвать либо стабильный релиз (`stable`), либо релиз с долговременной поддержкой (`lts`). Если вы выбираете между этими двуми видами релизов, примите во внимание следующее:
В рабочей среде мы рекомендуем использвать либо стабильный релиз (`stable`), либо релиз с долговременной поддержкой (`lts`). Если вы выбираете между этими двумя видами релизов, примите во внимание следующее:
- По умолчанию мы рекомендуем релизы `stable`. Новый стабильный релиз выпускается примерно раз в месяц, что открывает доступ к новым функциям. Три последних стабильных релиза находятся на поддержке — это означает, что в них интегрируются исправленные ошибки и доработки.
- Релизы `lts` выпускаются дважды в год и находятся на поддержке в течение года с момента выхода. Они более предочтительны в следующих случаях:

View File

@ -110,9 +110,6 @@ git push
## 集成测试 {#integration-tests}
运行[集成测试](./tests.md#integration-tests).
## Testflows 检查{#testflows-check}
使用Testflows测试系统去运行一些测试, 在[此处](https://github.com/ClickHouse/ClickHouse/tree/master/tests/testflows#running-tests-locally)查看如何在本地运行它们.
## 压力测试 {#stress-test}
从多个客户端并发运行无状态功能测试, 用以检测与并发相关的错误.如果失败:
```

View File

@ -281,10 +281,6 @@ We also track test coverage but only for functional tests and only for clickhous
There is automated check for flaky tests. It runs all new tests 100 times (for functional tests) or 10 times (for integration tests). If at least single time the test failed, it is considered flaky.
## Testflows
[Testflows](https://testflows.com/) is an enterprise-grade open-source testing framework, which is used to test a subset of ClickHouse.
## Test Automation {#test-automation}
We run tests with [GitHub Actions](https://github.com/features/actions).

View File

@ -12,8 +12,6 @@ priority: "optional"
conflicts:
- clickhouse-server
depends:
- adduser
suggests:
- clickhouse-keeper-dbg

View File

@ -12,8 +12,6 @@ priority: "optional"
conflicts:
- clickhouse-keeper
depends:
- adduser
replaces:
- clickhouse-server-common
- clickhouse-server-base

View File

@ -37,7 +37,7 @@
#include <AggregateFunctions/registerAggregateFunctions.h>
#include <TableFunctions/registerTableFunctions.h>
#include <Storages/registerStorages.h>
#include <Storages/NamedCollections/NamedCollectionUtils.h>
#include <Common/NamedCollections/NamedCollectionUtils.h>
#include <Dictionaries/registerDictionaries.h>
#include <Disks/registerDisks.h>
#include <Formats/registerFormats.h>

View File

@ -60,7 +60,7 @@
#include <Storages/System/attachInformationSchemaTables.h>
#include <Storages/Cache/ExternalDataSourceCache.h>
#include <Storages/Cache/registerRemoteFileMetadatas.h>
#include <Storages/NamedCollections/NamedCollectionUtils.h>
#include <Common/NamedCollections/NamedCollectionUtils.h>
#include <AggregateFunctions/registerAggregateFunctions.h>
#include <Functions/UserDefined/IUserDefinedSQLObjectsLoader.h>
#include <Functions/registerFunctions.h>

View File

@ -116,13 +116,9 @@ public:
static DataTypePtr getKeyType(const DataTypes & types, const AggregateFunctionPtr & nested)
{
if (types.empty())
if (types.size() != 1)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Aggregate function {}Map requires at least one argument", nested->getName());
if (types.size() > 1)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Aggregate function {}Map requires only one map argument", nested->getName());
"Aggregate function {}Map requires one map argument, but {} found", nested->getName(), types.size());
const auto * map_type = checkAndGetDataType<DataTypeMap>(types[0].get());
if (!map_type)

View File

@ -36,8 +36,8 @@ public:
AggregateFunctionOrFill(AggregateFunctionPtr nested_function_, const DataTypes & arguments, const Array & params)
: IAggregateFunctionHelper<AggregateFunctionOrFill>{arguments, params, createResultType(nested_function_->getResultType())}
, nested_function{nested_function_}
, size_of_data {nested_function->sizeOfData()}
, inner_nullable {nested_function->getResultType()->isNullable()}
, size_of_data{nested_function->sizeOfData()}
, inner_nullable{nested_function->getResultType()->isNullable()}
{
// nothing
}

View File

@ -428,10 +428,7 @@ public:
}
bool keepKey(const T & key) const { return static_cast<const Derived &>(*this).keepKey(key); }
String getName() const override { return getNameImpl(); }
private:
static String getNameImpl() { return Derived::getNameImpl(); }
String getName() const override { return Derived::getNameImpl(); }
};
template <typename T, bool overflow, bool tuple_argument>

View File

@ -65,9 +65,9 @@ class IAggregateFunction : public std::enable_shared_from_this<IAggregateFunctio
{
public:
IAggregateFunction(const DataTypes & argument_types_, const Array & parameters_, const DataTypePtr & result_type_)
: result_type(result_type_)
, argument_types(argument_types_)
: argument_types(argument_types_)
, parameters(parameters_)
, result_type(result_type_)
{}
/// Get main function name.
@ -409,9 +409,9 @@ public:
#endif
protected:
DataTypePtr result_type;
DataTypes argument_types;
Array parameters;
DataTypePtr result_type;
};

View File

@ -31,18 +31,21 @@ FunctionNode::FunctionNode(String function_name_)
children[arguments_child_index] = std::make_shared<ListNode>();
}
ColumnsWithTypeAndName FunctionNode::getArgumentTypes() const
ColumnsWithTypeAndName FunctionNode::getArgumentColumns() const
{
ColumnsWithTypeAndName argument_types;
for (const auto & arg : getArguments().getNodes())
const auto & arguments = getArguments().getNodes();
ColumnsWithTypeAndName argument_columns;
argument_columns.reserve(arguments.size());
for (const auto & arg : arguments)
{
ColumnWithTypeAndName argument;
argument.type = arg->getResultType();
if (auto * constant = arg->as<ConstantNode>())
argument.column = argument.type->createColumnConst(1, constant->getValue());
argument_types.push_back(argument);
argument_columns.push_back(std::move(argument));
}
return argument_types;
return argument_columns;
}
void FunctionNode::resolveAsFunction(FunctionBasePtr function_value)

View File

@ -1,12 +1,14 @@
#pragma once
#include <memory>
#include <Core/IResolvedFunction.h>
#include <AggregateFunctions/IAggregateFunction.h>
#include <Analyzer/ConstantValue.h>
#include <Analyzer/IQueryTreeNode.h>
#include <Analyzer/ListNode.h>
#include <Analyzer/ConstantValue.h>
#include <Common/typeid_cast.h>
#include <Core/ColumnsWithTypeAndName.h>
#include <Core/IResolvedFunction.h>
#include <Functions/IFunction.h>
namespace DB
{
@ -19,12 +21,6 @@ namespace ErrorCodes
class IFunctionOverloadResolver;
using FunctionOverloadResolverPtr = std::shared_ptr<IFunctionOverloadResolver>;
class IFunctionBase;
using FunctionBasePtr = std::shared_ptr<const IFunctionBase>;
class IAggregateFunction;
using AggregateFunctionPtr = std::shared_ptr<const IAggregateFunction>;
/** Function node represents function in query tree.
* Function syntax: function_name(parameter_1, ...)(argument_1, ...).
* If function does not have parameters its syntax is function_name(argument_1, ...).
@ -63,66 +59,36 @@ public:
explicit FunctionNode(String function_name_);
/// Get function name
const String & getFunctionName() const
{
return function_name;
}
const String & getFunctionName() const { return function_name; }
/// Get parameters
const ListNode & getParameters() const
{
return children[parameters_child_index]->as<const ListNode &>();
}
const ListNode & getParameters() const { return children[parameters_child_index]->as<const ListNode &>(); }
/// Get parameters
ListNode & getParameters()
{
return children[parameters_child_index]->as<ListNode &>();
}
ListNode & getParameters() { return children[parameters_child_index]->as<ListNode &>(); }
/// Get parameters node
const QueryTreeNodePtr & getParametersNode() const
{
return children[parameters_child_index];
}
const QueryTreeNodePtr & getParametersNode() const { return children[parameters_child_index]; }
/// Get parameters node
QueryTreeNodePtr & getParametersNode()
{
return children[parameters_child_index];
}
QueryTreeNodePtr & getParametersNode() { return children[parameters_child_index]; }
/// Get arguments
const ListNode & getArguments() const
{
return children[arguments_child_index]->as<const ListNode &>();
}
const ListNode & getArguments() const { return children[arguments_child_index]->as<const ListNode &>(); }
/// Get arguments
ListNode & getArguments()
{
return children[arguments_child_index]->as<ListNode &>();
}
ListNode & getArguments() { return children[arguments_child_index]->as<ListNode &>(); }
/// Get arguments node
const QueryTreeNodePtr & getArgumentsNode() const
{
return children[arguments_child_index];
}
const QueryTreeNodePtr & getArgumentsNode() const { return children[arguments_child_index]; }
/// Get arguments node
QueryTreeNodePtr & getArgumentsNode()
{
return children[arguments_child_index];
}
QueryTreeNodePtr & getArgumentsNode() { return children[arguments_child_index]; }
ColumnsWithTypeAndName getArgumentTypes() const;
ColumnsWithTypeAndName getArgumentColumns() const;
/// Returns true if function node has window, false otherwise
bool hasWindow() const
{
return children[window_child_index] != nullptr;
}
bool hasWindow() const { return children[window_child_index] != nullptr; }
/** Get window node.
* Valid only for window function node.
@ -130,18 +96,12 @@ public:
* 1. It can be identifier node if window function is defined as expr OVER window_name.
* 2. It can be window node if window function is defined as expr OVER (window_name ...).
*/
const QueryTreeNodePtr & getWindowNode() const
{
return children[window_child_index];
}
const QueryTreeNodePtr & getWindowNode() const { return children[window_child_index]; }
/** Get window node.
* Valid only for window function node.
*/
QueryTreeNodePtr & getWindowNode()
{
return children[window_child_index];
}
QueryTreeNodePtr & getWindowNode() { return children[window_child_index]; }
/** Get non aggregate function.
* If function is not resolved nullptr returned.
@ -150,7 +110,7 @@ public:
{
if (kind != FunctionKind::ORDINARY)
return {};
return std::reinterpret_pointer_cast<const IFunctionBase>(function);
return std::static_pointer_cast<const IFunctionBase>(function);
}
/** Get aggregate function.
@ -161,32 +121,20 @@ public:
{
if (kind == FunctionKind::UNKNOWN || kind == FunctionKind::ORDINARY)
return {};
return std::reinterpret_pointer_cast<const IAggregateFunction>(function);
return std::static_pointer_cast<const IAggregateFunction>(function);
}
/// Is function node resolved
bool isResolved() const
{
return function != nullptr;
}
bool isResolved() const { return function != nullptr; }
/// Is function node window function
bool isWindowFunction() const
{
return hasWindow();
}
bool isWindowFunction() const { return hasWindow(); }
/// Is function node aggregate function
bool isAggregateFunction() const
{
return kind == FunctionKind::AGGREGATE;
}
bool isAggregateFunction() const { return kind == FunctionKind::AGGREGATE; }
/// Is function node ordinary function
bool isOrdinaryFunction() const
{
return kind == FunctionKind::ORDINARY;
}
bool isOrdinaryFunction() const { return kind == FunctionKind::ORDINARY; }
/** Resolve function node as non aggregate function.
* It is important that function name is updated with resolved function name.
@ -208,10 +156,7 @@ public:
*/
void resolveAsWindowFunction(AggregateFunctionPtr window_function_value);
QueryTreeNodeType getNodeType() const override
{
return QueryTreeNodeType::FUNCTION;
}
QueryTreeNodeType getNodeType() const override { return QueryTreeNodeType::FUNCTION; }
DataTypePtr getResultType() const override
{

View File

@ -155,7 +155,7 @@ public:
inline void resolveOrdinaryFunctionNode(FunctionNode & function_node, const String & function_name) const
{
auto function = FunctionFactory::instance().get(function_name, context);
function_node.resolveAsFunction(function->build(function_node.getArgumentTypes()));
function_node.resolveAsFunction(function->build(function_node.getArgumentColumns()));
}
private:

View File

@ -193,7 +193,7 @@ private:
inline void resolveOrdinaryFunctionNode(FunctionNode & function_node, const String & function_name) const
{
auto function = FunctionFactory::instance().get(function_name, context);
function_node.resolveAsFunction(function->build(function_node.getArgumentTypes()));
function_node.resolveAsFunction(function->build(function_node.getArgumentColumns()));
}
ContextPtr & context;

View File

@ -65,7 +65,7 @@ QueryTreeNodePtr createResolvedFunction(const ContextPtr & context, const String
auto function = FunctionFactory::instance().get(name, context);
function_node->getArguments().getNodes() = std::move(arguments);
function_node->resolveAsFunction(function->build(function_node->getArgumentTypes()));
function_node->resolveAsFunction(function->build(function_node->getArgumentColumns()));
return function_node;
}
@ -88,7 +88,7 @@ FunctionNodePtr createResolvedAggregateFunction(const String & name, const Query
{ argument->getResultType() },
parameters,
properties);
function_node->resolveAsAggregateFunction(aggregate_function);
function_node->resolveAsAggregateFunction(std::move(aggregate_function));
return function_node;
}

View File

@ -56,7 +56,7 @@ public:
auto multi_if_function = std::make_shared<FunctionNode>("multiIf");
multi_if_function->getArguments().getNodes() = std::move(multi_if_arguments);
multi_if_function->resolveAsFunction(multi_if_function_ptr->build(multi_if_function->getArgumentTypes()));
multi_if_function->resolveAsFunction(multi_if_function_ptr->build(multi_if_function->getArgumentColumns()));
node = std::move(multi_if_function);
}

View File

@ -52,7 +52,7 @@ QueryTreeNodePtr createCastFunction(QueryTreeNodePtr from, DataTypePtr result_ty
auto function_node = std::make_shared<FunctionNode>("_CAST");
function_node->getArguments().getNodes() = std::move(arguments);
function_node->resolveAsFunction(cast_function->build(function_node->getArgumentTypes()));
function_node->resolveAsFunction(cast_function->build(function_node->getArgumentColumns()));
return function_node;
}
@ -71,7 +71,7 @@ void changeIfArguments(
auto if_resolver = FunctionFactory::instance().get("if", context);
if_node.resolveAsFunction(if_resolver->build(if_node.getArgumentTypes()));
if_node.resolveAsFunction(if_resolver->build(if_node.getArgumentColumns()));
}
/// transform(value, array_from, array_to, default_value) will be transformed to transform(value, array_from, _CAST(array_to, Array(Enum...)), _CAST(default_value, Enum...))
@ -93,7 +93,7 @@ void changeTransformArguments(
auto transform_resolver = FunctionFactory::instance().get("transform", context);
transform_node.resolveAsFunction(transform_resolver->build(transform_node.getArgumentTypes()));
transform_node.resolveAsFunction(transform_resolver->build(transform_node.getArgumentColumns()));
}
void wrapIntoToString(FunctionNode & function_node, QueryTreeNodePtr arg, ContextPtr context)
@ -102,7 +102,7 @@ void wrapIntoToString(FunctionNode & function_node, QueryTreeNodePtr arg, Contex
QueryTreeNodes arguments{ std::move(arg) };
function_node.getArguments().getNodes() = std::move(arguments);
function_node.resolveAsFunction(to_string_function->build(function_node.getArgumentTypes()));
function_node.resolveAsFunction(to_string_function->build(function_node.getArgumentColumns()));
assert(isString(function_node.getResultType()));
}

View File

@ -27,7 +27,7 @@ public:
return;
auto result_type = function_node->getResultType();
function_node->resolveAsFunction(if_function_ptr->build(function_node->getArgumentTypes()));
function_node->resolveAsFunction(if_function_ptr->build(function_node->getArgumentColumns()));
}
private:

View File

@ -4333,7 +4333,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
? AggregateFunctionFactory::instance().get(function_name + "OrNull", argument_types, parameters, properties)
: AggregateFunctionFactory::instance().get(function_name, argument_types, parameters, properties);
function_node.resolveAsWindowFunction(aggregate_function);
function_node.resolveAsWindowFunction(std::move(aggregate_function));
bool window_node_is_identifier = function_node.getWindowNode()->getNodeType() == QueryTreeNodeType::IDENTIFIER;
ProjectionName window_projection_name = resolveWindow(function_node.getWindowNode(), scope);
@ -4396,7 +4396,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
auto aggregate_function = need_add_or_null
? AggregateFunctionFactory::instance().get(function_name + "OrNull", argument_types, parameters, properties)
: AggregateFunctionFactory::instance().get(function_name, argument_types, parameters, properties);
function_node.resolveAsAggregateFunction(aggregate_function);
function_node.resolveAsAggregateFunction(std::move(aggregate_function));
return result_projection_names;
}

View File

@ -122,7 +122,7 @@ public:
auto & not_function_arguments = not_function->getArguments().getNodes();
not_function_arguments.push_back(std::move(nested_if_function_arguments_nodes[0]));
not_function->resolveAsFunction(FunctionFactory::instance().get("not", context)->build(not_function->getArgumentTypes()));
not_function->resolveAsFunction(FunctionFactory::instance().get("not", context)->build(not_function->getArgumentColumns()));
function_node_arguments_nodes[0] = std::move(not_function);
function_node_arguments_nodes.resize(1);

View File

@ -75,7 +75,6 @@ public:
function_node->getAggregateFunction()->getParameters(),
properties);
auto function_result_type = function_node->getResultType();
function_node->resolveAsAggregateFunction(std::move(aggregate_function));
}
};

View File

@ -59,7 +59,7 @@ class ValidationChecker : public InDepthQueryTreeVisitor<ValidationChecker>
if (!function->isResolved())
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Function {} is not resolved after running {} pass",
function->dumpTree(), pass_name);
function->toAST()->formatForErrorMessage(), pass_name);
}
public:

View File

@ -8,6 +8,7 @@
#include <IO/IOThreadPool.h>
#include <IO/ReadBufferFromS3.h>
#include <IO/WriteBufferFromS3.h>
#include <IO/HTTPHeaderEntries.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <aws/core/auth/AWSCredentials.h>
#include <aws/s3/S3Client.h>
@ -35,7 +36,7 @@ namespace
auto settings = context->getStorageS3Settings().getSettings(s3_uri.uri.toString());
Aws::Auth::AWSCredentials credentials(access_key_id, secret_access_key);
HeaderCollection headers;
HTTPHeaderEntries headers;
if (access_key_id.empty())
{
credentials = Aws::Auth::AWSCredentials(settings.auth_settings.access_key_id, settings.auth_settings.secret_access_key);

View File

@ -263,7 +263,7 @@ QueryPipeline ExternalDictionaryLibraryBridgeHelper::loadBase(const Poco::URI &
0,
DBMS_DEFAULT_BUFFER_SIZE,
getContext()->getReadSettings(),
ReadWriteBufferFromHTTP::HTTPHeaderEntries{});
HTTPHeaderEntries{});
auto source = FormatFactory::instance().getInput(ExternalDictionaryLibraryBridgeHelper::DEFAULT_FORMAT, *read_buf_ptr, sample_block, getContext(), DEFAULT_BLOCK_SIZE);
source->addBuffer(std::move(read_buf_ptr));

View File

@ -103,7 +103,7 @@ if (TARGET ch_contrib::nats_io)
endif()
add_headers_and_sources(dbms Storages/MeiliSearch)
add_headers_and_sources(dbms Storages/NamedCollections)
add_headers_and_sources(dbms Common/NamedCollections)
if (TARGET ch_contrib::amqp_cpp)
add_headers_and_sources(dbms Storages/RabbitMQ)

View File

@ -355,6 +355,8 @@ size_t ColumnUnique<ColumnType>::uniqueInsert(const Field & x)
void operator() (const Int128 & x) { res = {reinterpret_cast<const char *>(&x), sizeof(x)}; }
void operator() (const Int256 & x) { res = {reinterpret_cast<const char *>(&x), sizeof(x)}; }
void operator() (const UUID & x) { res = {reinterpret_cast<const char *>(&x), sizeof(x)}; }
void operator() (const IPv4 & x) { res = {reinterpret_cast<const char *>(&x), sizeof(x)}; }
void operator() (const IPv6 & x) { res = {reinterpret_cast<const char *>(&x), sizeof(x)}; }
void operator() (const Float64 & x) { res = {reinterpret_cast<const char *>(&x), sizeof(x)}; }
void operator() (const DecimalField<Decimal32> & x) { res = {reinterpret_cast<const char *>(&x), sizeof(x)}; }
void operator() (const DecimalField<Decimal64> & x) { res = {reinterpret_cast<const char *>(&x), sizeof(x)}; }

View File

@ -942,5 +942,7 @@ template class ColumnVector<Int256>;
template class ColumnVector<Float32>;
template class ColumnVector<Float64>;
template class ColumnVector<UUID>;
template class ColumnVector<IPv4>;
template class ColumnVector<IPv6>;
}

View File

@ -557,5 +557,7 @@ extern template class ColumnVector<Int256>;
extern template class ColumnVector<Float32>;
extern template class ColumnVector<Float64>;
extern template class ColumnVector<UUID>;
extern template class ColumnVector<IPv4>;
extern template class ColumnVector<IPv6>;
}

View File

@ -27,5 +27,7 @@ using ColumnFloat32 = ColumnVector<Float32>;
using ColumnFloat64 = ColumnVector<Float64>;
using ColumnUUID = ColumnVector<UUID>;
using ColumnIPv4 = ColumnVector<IPv4>;
using ColumnIPv6 = ColumnVector<IPv6>;
}

View File

@ -72,6 +72,8 @@ INSTANTIATE(Decimal256)
INSTANTIATE(DateTime64)
INSTANTIATE(char *)
INSTANTIATE(UUID)
INSTANTIATE(IPv4)
INSTANTIATE(IPv6)
#undef INSTANTIATE

View File

@ -1,4 +1,5 @@
#include <limits>
#include <type_traits>
#include <typeinfo>
#include <vector>
#include <Columns/ColumnsNumber.h>
@ -14,6 +15,12 @@ static constexpr size_t MAX_ROWS = 10000;
static const std::vector<size_t> filter_ratios = {1, 2, 5, 11, 32, 64, 100, 1000};
static const size_t K = filter_ratios.size();
template <typename, typename = void >
struct HasUnderlyingType : std::false_type {};
template <typename T>
struct HasUnderlyingType<T, std::void_t<typename T::UnderlyingType>> : std::true_type {};
template <typename T>
static MutableColumnPtr createColumn(size_t n)
{
@ -21,6 +28,9 @@ static MutableColumnPtr createColumn(size_t n)
auto & values = column->getData();
for (size_t i = 0; i < n; ++i)
if constexpr (HasUnderlyingType<T>::value)
values.push_back(static_cast<typename T::UnderlyingType>(i));
else
values.push_back(static_cast<T>(i));
return column;
@ -85,6 +95,8 @@ TEST(ColumnVector, Filter)
testFilter<Float32>();
testFilter<Float64>();
testFilter<UUID>();
testFilter<IPv4>();
testFilter<IPv6>();
}
template <typename T>

View File

@ -0,0 +1,40 @@
#pragma once
#include <Common/ProfileEvents.h>
#include <Common/Stopwatch.h>
namespace DB
{
enum Time
{
Nanoseconds,
Microseconds,
Milliseconds,
Seconds,
};
template <Time time>
struct ProfileEventTimeIncrement
{
explicit ProfileEventTimeIncrement<time>(ProfileEvents::Event event_)
: event(event_), watch(CLOCK_MONOTONIC) {}
~ProfileEventTimeIncrement()
{
watch.stop();
if constexpr (time == Time::Nanoseconds)
ProfileEvents::increment(event, watch.elapsedNanoseconds());
else if constexpr (time == Time::Microseconds)
ProfileEvents::increment(event, watch.elapsedMicroseconds());
else if constexpr (time == Time::Milliseconds)
ProfileEvents::increment(event, watch.elapsedMilliseconds());
else if constexpr (time == Time::Seconds)
ProfileEvents::increment(event, watch.elapsedSeconds());
}
ProfileEvents::Event event;
Stopwatch watch;
};
}

View File

@ -643,6 +643,8 @@
M(672, INVALID_SCHEDULER_NODE) \
M(673, RESOURCE_ACCESS_DENIED) \
M(674, RESOURCE_NOT_FOUND) \
M(675, CANNOT_PARSE_IPV4) \
M(676, CANNOT_PARSE_IPV6) \
\
M(999, KEEPER_EXCEPTION) \
M(1000, POCO_EXCEPTION) \

View File

@ -55,6 +55,8 @@ public:
T operator() (const Int64 & x) const { return T(x); }
T operator() (const Int128 & x) const { return T(x); }
T operator() (const UUID & x) const { return T(x.toUnderType()); }
T operator() (const IPv4 & x) const { return T(x.toUnderType()); }
T operator() (const IPv6 & x) const { return T(x.toUnderType()); }
T operator() (const Float64 & x) const
{

View File

@ -37,6 +37,8 @@ String FieldVisitorDump::operator() (const UInt256 & x) const { return formatQuo
String FieldVisitorDump::operator() (const Int128 & x) const { return formatQuotedWithPrefix(x, "Int128_"); }
String FieldVisitorDump::operator() (const Int256 & x) const { return formatQuotedWithPrefix(x, "Int256_"); }
String FieldVisitorDump::operator() (const UUID & x) const { return formatQuotedWithPrefix(x, "UUID_"); }
String FieldVisitorDump::operator() (const IPv4 & x) const { return formatQuotedWithPrefix(x, "IPv4_"); }
String FieldVisitorDump::operator() (const IPv6 & x) const { return formatQuotedWithPrefix(x, "IPv6_"); }
String FieldVisitorDump::operator() (const bool & x) const { return formatQuotedWithPrefix(x, "Bool_"); }

View File

@ -17,6 +17,8 @@ public:
String operator() (const Int128 & x) const;
String operator() (const Int256 & x) const;
String operator() (const UUID & x) const;
String operator() (const IPv4 & x) const;
String operator() (const IPv6 & x) const;
String operator() (const Float64 & x) const;
String operator() (const String & x) const;
String operator() (const Array & x) const;

View File

@ -49,6 +49,20 @@ void FieldVisitorHash::operator() (const UUID & x) const
hash.update(x);
}
void FieldVisitorHash::operator() (const IPv4 & x) const
{
UInt8 type = Field::Types::IPv4;
hash.update(type);
hash.update(x);
}
void FieldVisitorHash::operator() (const IPv6 & x) const
{
UInt8 type = Field::Types::IPv6;
hash.update(type);
hash.update(x);
}
void FieldVisitorHash::operator() (const Float64 & x) const
{
UInt8 type = Field::Types::Float64;

View File

@ -23,6 +23,8 @@ public:
void operator() (const Int128 & x) const;
void operator() (const Int256 & x) const;
void operator() (const UUID & x) const;
void operator() (const IPv4 & x) const;
void operator() (const IPv6 & x) const;
void operator() (const Float64 & x) const;
void operator() (const String & x) const;
void operator() (const Array & x) const;

View File

@ -33,6 +33,8 @@ bool FieldVisitorSum::operator() (Tuple &) const { throw Exception("Cannot sum T
bool FieldVisitorSum::operator() (Map &) const { throw Exception("Cannot sum Maps", ErrorCodes::LOGICAL_ERROR); }
bool FieldVisitorSum::operator() (Object &) const { throw Exception("Cannot sum Objects", ErrorCodes::LOGICAL_ERROR); }
bool FieldVisitorSum::operator() (UUID &) const { throw Exception("Cannot sum UUIDs", ErrorCodes::LOGICAL_ERROR); }
bool FieldVisitorSum::operator() (IPv4 &) const { throw Exception("Cannot sum IPv4s", ErrorCodes::LOGICAL_ERROR); }
bool FieldVisitorSum::operator() (IPv6 &) const { throw Exception("Cannot sum IPv6s", ErrorCodes::LOGICAL_ERROR); }
bool FieldVisitorSum::operator() (AggregateFunctionStateData &) const
{

View File

@ -28,6 +28,8 @@ public:
bool operator() (Map &) const;
bool operator() (Object &) const;
bool operator() (UUID &) const;
bool operator() (IPv4 &) const;
bool operator() (IPv6 &) const;
bool operator() (AggregateFunctionStateData &) const;
bool operator() (bool &) const;

View File

@ -65,6 +65,8 @@ String FieldVisitorToString::operator() (const UInt128 & x) const { return forma
String FieldVisitorToString::operator() (const UInt256 & x) const { return formatQuoted(x); }
String FieldVisitorToString::operator() (const Int256 & x) const { return formatQuoted(x); }
String FieldVisitorToString::operator() (const UUID & x) const { return formatQuoted(x); }
String FieldVisitorToString::operator() (const IPv4 & x) const { return formatQuoted(x); }
String FieldVisitorToString::operator() (const IPv6 & x) const { return formatQuoted(x); }
String FieldVisitorToString::operator() (const AggregateFunctionStateData & x) const { return formatQuoted(x.data); }
String FieldVisitorToString::operator() (const bool & x) const { return x ? "true" : "false"; }

View File

@ -17,6 +17,8 @@ public:
String operator() (const Int128 & x) const;
String operator() (const Int256 & x) const;
String operator() (const UUID & x) const;
String operator() (const IPv4 & x) const;
String operator() (const IPv6 & x) const;
String operator() (const Float64 & x) const;
String operator() (const String & x) const;
String operator() (const Array & x) const;

View File

@ -16,6 +16,8 @@ void FieldVisitorWriteBinary::operator() (const Int128 & x, WriteBuffer & buf) c
void FieldVisitorWriteBinary::operator() (const UInt256 & x, WriteBuffer & buf) const { writeBinary(x, buf); }
void FieldVisitorWriteBinary::operator() (const Int256 & x, WriteBuffer & buf) const { writeBinary(x, buf); }
void FieldVisitorWriteBinary::operator() (const UUID & x, WriteBuffer & buf) const { writeBinary(x, buf); }
void FieldVisitorWriteBinary::operator() (const IPv4 & x, WriteBuffer & buf) const { writeBinary(x, buf); }
void FieldVisitorWriteBinary::operator() (const IPv6 & x, WriteBuffer & buf) const { writeBinary(x, buf); }
void FieldVisitorWriteBinary::operator() (const DecimalField<Decimal32> & x, WriteBuffer & buf) const { writeBinary(x.getValue(), buf); }
void FieldVisitorWriteBinary::operator() (const DecimalField<Decimal64> & x, WriteBuffer & buf) const { writeBinary(x.getValue(), buf); }
void FieldVisitorWriteBinary::operator() (const DecimalField<Decimal128> & x, WriteBuffer & buf) const { writeBinary(x.getValue(), buf); }

View File

@ -16,6 +16,8 @@ public:
void operator() (const Int128 & x, WriteBuffer & buf) const;
void operator() (const Int256 & x, WriteBuffer & buf) const;
void operator() (const UUID & x, WriteBuffer & buf) const;
void operator() (const IPv4 & x, WriteBuffer & buf) const;
void operator() (const IPv6 & x, WriteBuffer & buf) const;
void operator() (const Float64 & x, WriteBuffer & buf) const;
void operator() (const String & x, WriteBuffer & buf) const;
void operator() (const Array & x, WriteBuffer & buf) const;

View File

@ -259,7 +259,7 @@ inline size_t DefaultHash64(T key)
static_cast<UInt64>(key) ^
static_cast<UInt64>(key >> 64));
}
else if constexpr (std::is_same_v<T, DB::UUID>)
else if constexpr (std::is_same_v<T, DB::UUID> || std::is_same_v<T, DB::IPv6>)
{
return intHash64(
static_cast<UInt64>(key.toUnderType()) ^

View File

@ -1,4 +1,4 @@
#include <Storages/NamedCollections/NamedCollectionConfiguration.h>
#include <Common/NamedCollections/NamedCollectionConfiguration.h>
#include <Poco/Util/XMLConfiguration.h>
#include <Common/Exception.h>
#include <Common/SettingsChanges.h>
@ -16,6 +16,13 @@ namespace ErrorCodes
namespace NamedCollectionConfiguration
{
bool hasConfigValue(
const Poco::Util::AbstractConfiguration & config,
const std::string & path)
{
return config.has(path);
}
template <typename T> T getConfigValue(
const Poco::Util::AbstractConfiguration & config,
const std::string & path)
@ -145,6 +152,52 @@ ConfigurationPtr createConfiguration(const std::string & root_name, const Settin
return config;
}
void listKeys(
const Poco::Util::AbstractConfiguration & config,
std::queue<std::string> enumerate_paths,
std::set<std::string, std::less<>> & result,
ssize_t depth)
{
if (enumerate_paths.empty())
enumerate_paths.push("");
const bool do_finish = depth >= 0 && --depth < 0;
auto initial_paths = std::move(enumerate_paths);
enumerate_paths = {};
while (!initial_paths.empty())
{
auto path = initial_paths.front();
initial_paths.pop();
Poco::Util::AbstractConfiguration::Keys keys;
if (path.empty())
config.keys(keys);
else
config.keys(path, keys);
if (keys.empty())
{
result.insert(path);
}
else if (do_finish)
{
for (const auto & key : keys)
result.emplace(path.empty() ? key : path + '.' + key);
}
else
{
for (const auto & key : keys)
enumerate_paths.emplace(path.empty() ? key : path + '.' + key);
}
}
if (enumerate_paths.empty())
return;
listKeys(config, enumerate_paths, result, depth);
}
template String getConfigValue<String>(const Poco::Util::AbstractConfiguration & config,
const std::string & path);
template UInt64 getConfigValue<UInt64>(const Poco::Util::AbstractConfiguration & config,

View File

@ -1,5 +1,7 @@
#pragma once
#include <Poco/Util/AbstractConfiguration.h>
#include <queue>
#include <set>
namespace DB
{
@ -12,6 +14,10 @@ namespace NamedCollectionConfiguration
ConfigurationPtr createEmptyConfiguration(const std::string & root_name);
bool hasConfigValue(
const Poco::Util::AbstractConfiguration & config,
const std::string & path);
template <typename T> T getConfigValue(
const Poco::Util::AbstractConfiguration & config,
const std::string & path);
@ -39,6 +45,28 @@ void removeConfigValue(
ConfigurationPtr createConfiguration(const std::string & root_name, const SettingsChanges & settings);
/// Enumerate keys paths of the config recursively.
/// E.g. if `enumerate_paths` = {"root.key1"} and config like
/// <root>
/// <key0></key0>
/// <key1>
/// <key2></key2>
/// <key3>
/// <key4></key4>
/// </key3>
/// </key1>
/// </root>
/// the `result` will contain: "root.key0", "root.key1.key2" and "root.key1.key3.key4"
///
/// depth == -1 means to return all keys with full path: "root.key0", "root.key1.key2", "root.key1.key3.key4".
/// depth == 0 means: "root.key0" and "root.key1"
/// depth == 1 means: "root.key0", "root.key1.key2" and "root.key1.key3"
/// and so on.
void listKeys(
const Poco::Util::AbstractConfiguration & config,
std::queue<std::string> enumerate_paths,
std::set<std::string, std::less<>> & result,
ssize_t depth);
}
}

View File

@ -1,4 +1,4 @@
#include <Storages/NamedCollections/NamedCollectionUtils.h>
#include <Common/NamedCollections/NamedCollectionUtils.h>
#include <Common/escapeForFileName.h>
#include <Common/FieldVisitorToString.h>
#include <Common/logger_useful.h>
@ -13,8 +13,8 @@
#include <Parsers/parseQuery.h>
#include <Parsers/ParserCreateQuery.h>
#include <Interpreters/Context.h>
#include <Storages/NamedCollections/NamedCollections.h>
#include <Storages/NamedCollections/NamedCollectionConfiguration.h>
#include <Common/NamedCollections/NamedCollections.h>
#include <Common/NamedCollections/NamedCollectionConfiguration.h>
namespace fs = std::filesystem;
@ -69,10 +69,10 @@ public:
{
const auto collection_prefix = getCollectionPrefix(collection_name);
std::queue<std::string> enumerate_input;
std::set<std::string> enumerate_result;
std::set<std::string, std::less<>> enumerate_result;
enumerate_input.push(collection_prefix);
collectKeys(config, std::move(enumerate_input), enumerate_result);
NamedCollectionConfiguration::listKeys(config, std::move(enumerate_input), enumerate_result, -1);
/// Collection does not have any keys.
/// (`enumerate_result` == <collection_path>).
@ -97,50 +97,6 @@ private:
{
return fmt::format("{}.{}", NAMED_COLLECTIONS_CONFIG_PREFIX, collection_name);
}
/// Enumerate keys paths of the config recursively.
/// E.g. if `enumerate_paths` = {"root.key1"} and config like
/// <root>
/// <key0></key0>
/// <key1>
/// <key2></key2>
/// <key3>
/// <key4></key4>
/// </key3>
/// </key1>
/// </root>
/// the `result` will contain two strings: "root.key1.key2" and "root.key1.key3.key4"
static void collectKeys(
const Poco::Util::AbstractConfiguration & config,
std::queue<std::string> enumerate_paths,
std::set<std::string> & result)
{
if (enumerate_paths.empty())
return;
auto initial_paths = std::move(enumerate_paths);
enumerate_paths = {};
while (!initial_paths.empty())
{
auto path = initial_paths.front();
initial_paths.pop();
Poco::Util::AbstractConfiguration::Keys keys;
config.keys(path, keys);
if (keys.empty())
{
result.insert(path);
}
else
{
for (const auto & key : keys)
enumerate_paths.emplace(path + '.' + key);
}
}
collectKeys(config, enumerate_paths, result);
}
};

View File

@ -3,8 +3,8 @@
#include <Interpreters/Context.h>
#include <IO/WriteBufferFromString.h>
#include <IO/Operators.h>
#include <Storages/NamedCollections/NamedCollectionConfiguration.h>
#include <Storages/NamedCollections/NamedCollectionUtils.h>
#include <Common/NamedCollections/NamedCollectionConfiguration.h>
#include <Common/NamedCollections/NamedCollectionUtils.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <ranges>
@ -229,10 +229,30 @@ public:
assert(removed);
}
Keys getKeys() const
Keys getKeys(ssize_t depth, const std::string & prefix) const
{
std::queue<std::string> enumerate_input;
if (prefix.empty())
{
if (depth == -1)
{
/// Return all keys with full depth.
return keys;
}
}
else
{
if (!Configuration::hasConfigValue(*config, prefix))
return {};
enumerate_input.push(prefix);
}
Keys result;
Configuration::listKeys(*config, enumerate_input, result, depth);
return result;
}
Keys::const_iterator begin() const
{
@ -379,10 +399,10 @@ MutableNamedCollectionPtr NamedCollection::duplicate() const
std::move(impl), collection_name, NamedCollectionUtils::SourceId::NONE, true));
}
NamedCollection::Keys NamedCollection::getKeys() const
NamedCollection::Keys NamedCollection::getKeys(ssize_t depth, const std::string & prefix) const
{
std::lock_guard lock(mutex);
return pimpl->getKeys();
return pimpl->getKeys(depth, prefix);
}
template <bool Locked> NamedCollection::const_iterator NamedCollection::begin() const

View File

@ -1,7 +1,7 @@
#pragma once
#include <Interpreters/Context.h>
#include <Storages/NamedCollections/NamedCollections_fwd.h>
#include <Storages/NamedCollections/NamedCollectionUtils.h>
#include <Common/NamedCollections/NamedCollections_fwd.h>
#include <Common/NamedCollections/NamedCollectionUtils.h>
namespace Poco { namespace Util { class AbstractConfiguration; } }
@ -47,7 +47,7 @@ public:
MutableNamedCollectionPtr duplicate() const;
Keys getKeys() const;
Keys getKeys(ssize_t depth = -1, const std::string & prefix = "") const;
using iterator = typename Keys::iterator;
using const_iterator = typename Keys::const_iterator;

View File

@ -358,6 +358,7 @@ The server successfully detected this situation and will download merged part fr
\
M(ThreadpoolReaderTaskMicroseconds, "Time spent getting the data in asynchronous reading") \
M(ThreadpoolReaderReadBytes, "Bytes read from a threadpool task in asynchronous reading") \
M(ThreadpoolReaderSubmit, "Bytes read from a threadpool task in asynchronous reading") \
\
M(FileSegmentWaitReadBufferMicroseconds, "Metric per file segment. Time spend waiting for internal read buffer (includes cache waiting)") \
M(FileSegmentReadMicroseconds, "Metric per file segment. Time spend reading from file") \
@ -379,6 +380,7 @@ The server successfully detected this situation and will download merged part fr
M(ThreadPoolReaderPageCacheMissElapsedMicroseconds, "Time spent reading data inside the asynchronous job in ThreadPoolReader - when read was not done from page cache.") \
\
M(AsynchronousReadWaitMicroseconds, "Time spent in waiting for asynchronous reads.") \
M(AsynchronousRemoteReadWaitMicroseconds, "Time spent in waiting for asynchronous remote reads.") \
\
M(ExternalDataSourceLocalCacheReadBytes, "Bytes read from local cache buffer in RemoteReadBufferCache")\
\

View File

@ -9,36 +9,55 @@
namespace DB
{
// To be used in formatIPv4, maps a byte to it's string form prefixed with length (so save strlen call).
extern const char one_byte_to_string_lookup_table[256][4] =
/** Further we want to generate constexpr array of strings with sizes from sequence of unsigned ints [0..N)
* in order to use this arrey for fast conversion of unsigned integers to strings
*/
namespace detail
{
{1, '0'}, {1, '1'}, {1, '2'}, {1, '3'}, {1, '4'}, {1, '5'}, {1, '6'}, {1, '7'}, {1, '8'}, {1, '9'},
{2, '1', '0'}, {2, '1', '1'}, {2, '1', '2'}, {2, '1', '3'}, {2, '1', '4'}, {2, '1', '5'}, {2, '1', '6'}, {2, '1', '7'}, {2, '1', '8'}, {2, '1', '9'},
{2, '2', '0'}, {2, '2', '1'}, {2, '2', '2'}, {2, '2', '3'}, {2, '2', '4'}, {2, '2', '5'}, {2, '2', '6'}, {2, '2', '7'}, {2, '2', '8'}, {2, '2', '9'},
{2, '3', '0'}, {2, '3', '1'}, {2, '3', '2'}, {2, '3', '3'}, {2, '3', '4'}, {2, '3', '5'}, {2, '3', '6'}, {2, '3', '7'}, {2, '3', '8'}, {2, '3', '9'},
{2, '4', '0'}, {2, '4', '1'}, {2, '4', '2'}, {2, '4', '3'}, {2, '4', '4'}, {2, '4', '5'}, {2, '4', '6'}, {2, '4', '7'}, {2, '4', '8'}, {2, '4', '9'},
{2, '5', '0'}, {2, '5', '1'}, {2, '5', '2'}, {2, '5', '3'}, {2, '5', '4'}, {2, '5', '5'}, {2, '5', '6'}, {2, '5', '7'}, {2, '5', '8'}, {2, '5', '9'},
{2, '6', '0'}, {2, '6', '1'}, {2, '6', '2'}, {2, '6', '3'}, {2, '6', '4'}, {2, '6', '5'}, {2, '6', '6'}, {2, '6', '7'}, {2, '6', '8'}, {2, '6', '9'},
{2, '7', '0'}, {2, '7', '1'}, {2, '7', '2'}, {2, '7', '3'}, {2, '7', '4'}, {2, '7', '5'}, {2, '7', '6'}, {2, '7', '7'}, {2, '7', '8'}, {2, '7', '9'},
{2, '8', '0'}, {2, '8', '1'}, {2, '8', '2'}, {2, '8', '3'}, {2, '8', '4'}, {2, '8', '5'}, {2, '8', '6'}, {2, '8', '7'}, {2, '8', '8'}, {2, '8', '9'},
{2, '9', '0'}, {2, '9', '1'}, {2, '9', '2'}, {2, '9', '3'}, {2, '9', '4'}, {2, '9', '5'}, {2, '9', '6'}, {2, '9', '7'}, {2, '9', '8'}, {2, '9', '9'},
{3, '1', '0', '0'}, {3, '1', '0', '1'}, {3, '1', '0', '2'}, {3, '1', '0', '3'}, {3, '1', '0', '4'}, {3, '1', '0', '5'}, {3, '1', '0', '6'}, {3, '1', '0', '7'}, {3, '1', '0', '8'}, {3, '1', '0', '9'},
{3, '1', '1', '0'}, {3, '1', '1', '1'}, {3, '1', '1', '2'}, {3, '1', '1', '3'}, {3, '1', '1', '4'}, {3, '1', '1', '5'}, {3, '1', '1', '6'}, {3, '1', '1', '7'}, {3, '1', '1', '8'}, {3, '1', '1', '9'},
{3, '1', '2', '0'}, {3, '1', '2', '1'}, {3, '1', '2', '2'}, {3, '1', '2', '3'}, {3, '1', '2', '4'}, {3, '1', '2', '5'}, {3, '1', '2', '6'}, {3, '1', '2', '7'}, {3, '1', '2', '8'}, {3, '1', '2', '9'},
{3, '1', '3', '0'}, {3, '1', '3', '1'}, {3, '1', '3', '2'}, {3, '1', '3', '3'}, {3, '1', '3', '4'}, {3, '1', '3', '5'}, {3, '1', '3', '6'}, {3, '1', '3', '7'}, {3, '1', '3', '8'}, {3, '1', '3', '9'},
{3, '1', '4', '0'}, {3, '1', '4', '1'}, {3, '1', '4', '2'}, {3, '1', '4', '3'}, {3, '1', '4', '4'}, {3, '1', '4', '5'}, {3, '1', '4', '6'}, {3, '1', '4', '7'}, {3, '1', '4', '8'}, {3, '1', '4', '9'},
{3, '1', '5', '0'}, {3, '1', '5', '1'}, {3, '1', '5', '2'}, {3, '1', '5', '3'}, {3, '1', '5', '4'}, {3, '1', '5', '5'}, {3, '1', '5', '6'}, {3, '1', '5', '7'}, {3, '1', '5', '8'}, {3, '1', '5', '9'},
{3, '1', '6', '0'}, {3, '1', '6', '1'}, {3, '1', '6', '2'}, {3, '1', '6', '3'}, {3, '1', '6', '4'}, {3, '1', '6', '5'}, {3, '1', '6', '6'}, {3, '1', '6', '7'}, {3, '1', '6', '8'}, {3, '1', '6', '9'},
{3, '1', '7', '0'}, {3, '1', '7', '1'}, {3, '1', '7', '2'}, {3, '1', '7', '3'}, {3, '1', '7', '4'}, {3, '1', '7', '5'}, {3, '1', '7', '6'}, {3, '1', '7', '7'}, {3, '1', '7', '8'}, {3, '1', '7', '9'},
{3, '1', '8', '0'}, {3, '1', '8', '1'}, {3, '1', '8', '2'}, {3, '1', '8', '3'}, {3, '1', '8', '4'}, {3, '1', '8', '5'}, {3, '1', '8', '6'}, {3, '1', '8', '7'}, {3, '1', '8', '8'}, {3, '1', '8', '9'},
{3, '1', '9', '0'}, {3, '1', '9', '1'}, {3, '1', '9', '2'}, {3, '1', '9', '3'}, {3, '1', '9', '4'}, {3, '1', '9', '5'}, {3, '1', '9', '6'}, {3, '1', '9', '7'}, {3, '1', '9', '8'}, {3, '1', '9', '9'},
{3, '2', '0', '0'}, {3, '2', '0', '1'}, {3, '2', '0', '2'}, {3, '2', '0', '3'}, {3, '2', '0', '4'}, {3, '2', '0', '5'}, {3, '2', '0', '6'}, {3, '2', '0', '7'}, {3, '2', '0', '8'}, {3, '2', '0', '9'},
{3, '2', '1', '0'}, {3, '2', '1', '1'}, {3, '2', '1', '2'}, {3, '2', '1', '3'}, {3, '2', '1', '4'}, {3, '2', '1', '5'}, {3, '2', '1', '6'}, {3, '2', '1', '7'}, {3, '2', '1', '8'}, {3, '2', '1', '9'},
{3, '2', '2', '0'}, {3, '2', '2', '1'}, {3, '2', '2', '2'}, {3, '2', '2', '3'}, {3, '2', '2', '4'}, {3, '2', '2', '5'}, {3, '2', '2', '6'}, {3, '2', '2', '7'}, {3, '2', '2', '8'}, {3, '2', '2', '9'},
{3, '2', '3', '0'}, {3, '2', '3', '1'}, {3, '2', '3', '2'}, {3, '2', '3', '3'}, {3, '2', '3', '4'}, {3, '2', '3', '5'}, {3, '2', '3', '6'}, {3, '2', '3', '7'}, {3, '2', '3', '8'}, {3, '2', '3', '9'},
{3, '2', '4', '0'}, {3, '2', '4', '1'}, {3, '2', '4', '2'}, {3, '2', '4', '3'}, {3, '2', '4', '4'}, {3, '2', '4', '5'}, {3, '2', '4', '6'}, {3, '2', '4', '7'}, {3, '2', '4', '8'}, {3, '2', '4', '9'},
{3, '2', '5', '0'}, {3, '2', '5', '1'}, {3, '2', '5', '2'}, {3, '2', '5', '3'}, {3, '2', '5', '4'}, {3, '2', '5', '5'},
};
template <unsigned... digits>
struct ToChars
{
static const char value[];
static const size_t size;
};
template <unsigned... digits>
constexpr char ToChars<digits...>::value[] = {('0' + digits)..., 0};
template <unsigned... digits>
constexpr size_t ToChars<digits...>::size = sizeof...(digits);
template <unsigned rem, unsigned... digits>
struct Decompose : Decompose<rem / 10, rem % 10, digits...> {};
template <unsigned... digits>
struct Decompose<0, digits...> : ToChars<digits...> {};
template <>
struct Decompose<0> : ToChars<0> {};
template <unsigned num>
struct NumToString : Decompose<num> {};
template <class T, T... ints>
consteval std::array<std::pair<const char *, size_t>, sizeof...(ints)> str_make_array_impl(std::integer_sequence<T, ints...>)
{
return std::array<std::pair<const char *, size_t>, sizeof...(ints)> { std::pair<const char *, size_t> {NumToString<ints>::value, NumToString<ints>::size}... };
}
}
/** str_make_array<N>() - generates static array of std::pair<const char *, size_t> for numbers [0..N), where:
* first - null-terminated string representing number
* second - size of the string as would returned by strlen()
*/
template <size_t N>
consteval std::array<std::pair<const char *, size_t>, N> str_make_array()
{
return detail::str_make_array_impl(std::make_integer_sequence<int, N>{});
}
/// This will generate static array of pair<const char *, size_t> for [0..255] at compile time
extern constexpr auto one_byte_to_string_lookup_table = str_make_array<256>();
/// integer logarithm, return ceil(log(value, base)) (the smallest integer greater or equal than log(value, base)
static constexpr UInt32 intLog(const UInt32 value, const UInt32 base, const bool carry)

View File

@ -3,6 +3,7 @@
#include <base/types.h>
#include <cstring>
#include <algorithm>
#include <type_traits>
#include <utility>
#include <base/range.h>
#include <base/unaligned.h>
@ -17,6 +18,7 @@ constexpr size_t IPV6_MAX_TEXT_LENGTH = 45; /// Does not count tail zero byt
namespace DB
{
extern const std::array<std::pair<const char *, size_t>, 256> one_byte_to_string_lookup_table;
/** Rewritten inet_ntop6 from http://svn.apache.org/repos/asf/apr/apr/trunk/network_io/unix/inet_pton.c
* performs significantly faster than the reference implementation due to the absence of sprintf calls,
@ -30,17 +32,38 @@ void formatIPv6(const unsigned char * src, char *& dst, uint8_t zeroed_tail_byte
* which should be long enough.
* That is "127.0.0.1" becomes 0x7f000001.
*
* In case of failure returns false and doesn't modify buffer pointed by `dst`.
* In case of failure doesn't modify buffer pointed by `dst`.
*
* @param src - input string, expected to be non-null and null-terminated right after the IPv4 string value.
* WARNING - this function is adapted to work with ReadBuffer, where src is the position reference (ReadBuffer::position())
* and eof is the ReadBuffer::eof() - therefore algorithm below does not rely on buffer's continuity.
* To parse strings use overloads below.
*
* @param src - iterator (reference to pointer) over input string - warning - continuity is not guaranteed.
* @param eof - function returning true if iterator riched the end - warning - can break iterator's continuity.
* @param dst - where to put output bytes, expected to be non-null and at IPV4_BINARY_LENGTH-long.
* @return false if parsing failed, true otherwise.
* @param first_octet - preparsed first octet
* @return - true if parsed successfully, false otherwise.
*/
inline bool parseIPv4(const char * src, unsigned char * dst)
template <typename T, typename EOFfunction>
requires (std::is_same<typename std::remove_cv<T>::type, char>::value)
inline bool parseIPv4(T * &src, EOFfunction eof, unsigned char * dst, int32_t first_octet = -1)
{
if (src == nullptr || first_octet > 255)
return false;
UInt32 result = 0;
for (int offset = 24; offset >= 0; offset -= 8)
int offset = 24;
if (first_octet >= 0)
{
result |= first_octet << offset;
offset -= 8;
}
for (; true; offset -= 8, ++src)
{
if (eof())
return false;
UInt32 value = 0;
size_t len = 0;
while (isNumericASCII(*src) && len <= 3)
@ -48,137 +71,333 @@ inline bool parseIPv4(const char * src, unsigned char * dst)
value = value * 10 + (*src - '0');
++len;
++src;
if (eof())
break;
}
if (len == 0 || value > 255 || (offset > 0 && *src != '.'))
if (len == 0 || value > 255 || (offset > 0 && (eof() || *src != '.')))
return false;
result |= value << offset;
++src;
}
if (*(src - 1) != '\0')
return false;
if (offset == 0)
break;
}
if constexpr (std::endian::native == std::endian::little)
memcpy(dst, &result, sizeof(result));
else
reverseMemcpy(dst, &result, sizeof(result));
return true;
}
/// returns pointer to the right after parsed sequence or null on failed parsing
inline const char * parseIPv4(const char * src, const char * end, unsigned char * dst)
{
if (parseIPv4(src, [&src, end](){ return src == end; }, dst))
return src;
return nullptr;
}
/// returns true if whole buffer was parsed successfully
inline bool parseIPv4whole(const char * src, const char * end, unsigned char * dst)
{
return parseIPv4(src, end, dst) == end;
}
/// returns pointer to the right after parsed sequence or null on failed parsing
inline const char * parseIPv4(const char * src, unsigned char * dst)
{
if (parseIPv4(src, [](){ return false; }, dst))
return src;
return nullptr;
}
/// returns true if whole null-terminated string was parsed successfully
inline bool parseIPv4whole(const char * src, unsigned char * dst)
{
const char * end = parseIPv4(src, dst);
return end != nullptr && *end == '\0';
}
/** Unsafe (no bounds-checking for src nor dst), optimized version of parsing IPv6 string.
*
* Slightly altered implementation from http://svn.apache.org/repos/asf/apr/apr/trunk/network_io/unix/inet_pton.c
* Parses the input string `src` and stores binary big-endian value into buffer pointed by `dst`,
* which should be long enough. In case of failure zeroes
* IPV6_BINARY_LENGTH bytes of buffer pointed by `dst`.
* which should be long enough. In case of failure zeroes IPV6_BINARY_LENGTH bytes of buffer pointed by `dst`.
*
* @param src - input string, expected to be non-null and null-terminated right after the IPv6 string value.
* WARNING - this function is adapted to work with ReadBuffer, where src is the position reference (ReadBuffer::position())
* and eof is the ReadBuffer::eof() - therefore algorithm below does not rely on buffer's continuity.
* To parse strings use overloads below.
*
* @param src - iterator (reference to pointer) over input string - warning - continuity is not guaranteed.
* @param eof - function returning true if iterator riched the end - warning - can break iterator's continuity.
* @param dst - where to put output bytes, expected to be non-null and at IPV6_BINARY_LENGTH-long.
* @return false if parsing failed, true otherwise.
* @param first_block - preparsed first block
* @return - true if parsed successfully, false otherwise.
*/
inline bool parseIPv6(const char * src, unsigned char * dst)
template <typename T, typename EOFfunction>
requires (std::is_same<typename std::remove_cv<T>::type, char>::value)
inline bool parseIPv6(T * &src, EOFfunction eof, unsigned char * dst, int32_t first_block = -1)
{
const auto clear_dst = [dst]()
{
memset(dst, '\0', IPV6_BINARY_LENGTH);
std::memset(dst, '\0', IPV6_BINARY_LENGTH);
return false;
};
/// Leading :: requires some special handling.
if (src == nullptr || eof())
return clear_dst();
int groups = 0; /// number of parsed groups
unsigned char * iter = dst; /// iterator over dst buffer
unsigned char * zptr = nullptr; /// pointer into dst buffer array where all-zeroes block ("::") is started
std::memset(dst, '\0', IPV6_BINARY_LENGTH);
if (first_block >= 0)
{
*iter++ = static_cast<unsigned char>((first_block >> 8) & 0xffu);
*iter++ = static_cast<unsigned char>(first_block & 0xffu);
if (*src == ':')
if (*++src != ':')
return clear_dst();
unsigned char tmp[IPV6_BINARY_LENGTH]{};
unsigned char * tp = tmp;
unsigned char * endp = tp + IPV6_BINARY_LENGTH;
const char * curtok = src;
bool saw_xdigit = false;
UInt32 val{};
unsigned char * colonp = nullptr;
/// Assuming zero-terminated string.
while (char ch = *src++)
{
UInt8 num = unhex(ch);
if (num != 0xFF)
{
val <<= 4;
val |= num;
if (val > 0xffffu)
return clear_dst();
saw_xdigit = true;
continue;
zptr = iter;
++src;
}
++groups;
}
if (ch == ':')
bool group_start = true;
while (!eof() && groups < 8)
{
curtok = src;
if (!saw_xdigit)
if (*src == ':')
{
if (colonp)
++src;
if (eof()) /// trailing colon is not allowed
return clear_dst();
colonp = tp;
group_start = true;
if (*src == ':')
{
if (zptr != nullptr) /// multiple all-zeroes blocks are not allowed
return clear_dst();
zptr = iter;
++src;
continue;
}
if (groups == 0) /// leading colon is not allowed
return clear_dst();
}
if (tp + sizeof(UInt16) > endp)
if (*src == '.') /// mixed IPv4 parsing
{
if (groups <= 1 && zptr == nullptr) /// IPv4 block can't be the first
return clear_dst();
*tp++ = static_cast<unsigned char>((val >> 8) & 0xffu);
*tp++ = static_cast<unsigned char>(val & 0xffu);
saw_xdigit = false;
val = 0;
continue;
}
++src;
if (eof())
return clear_dst();
if (ch == '.' && (tp + IPV4_BINARY_LENGTH) <= endp)
/// last parsed group should be reinterpreted as a decimal value - it's the first octet of IPv4
--groups;
iter -= 2;
UInt16 num = 0;
for (int i = 0; i < 2; ++i)
{
if (!parseIPv4(curtok, tp))
unsigned char first = (iter[i] >> 4) & 0x0fu;
unsigned char second = iter[i] & 0x0fu;
if (first > 9 || second > 9)
return clear_dst();
(num *= 100) += first * 10 + second;
}
if (num > 255)
return clear_dst();
/// parse IPv4 with known first octet
if (!parseIPv4(src, eof, iter, num))
return clear_dst();
if constexpr (std::endian::native == std::endian::little)
std::reverse(tp, tp + IPV4_BINARY_LENGTH);
std::reverse(iter, iter + IPV4_BINARY_LENGTH);
tp += IPV4_BINARY_LENGTH;
saw_xdigit = false;
break; /* '\0' was seen by ipv4_scan(). */
iter += 4;
groups += 2;
break; /// IPv4 block is the last - end of parsing
}
return clear_dst();
}
if (!group_start) /// end of parsing
break;
group_start = false;
if (saw_xdigit)
UInt16 val = 0; /// current decoded group
int xdigits = 0; /// number of decoded hex digits in current group
for (; !eof() && xdigits < 4; ++src, ++xdigits)
{
if (tp + sizeof(UInt16) > endp)
UInt8 num = unhex(*src);
if (num == 0xFF)
break;
(val <<= 4) |= num;
}
if (xdigits == 0) /// end of parsing
break;
*iter++ = static_cast<unsigned char>((val >> 8) & 0xffu);
*iter++ = static_cast<unsigned char>(val & 0xffu);
++groups;
}
/// either all 8 groups or all-zeroes block should be present
if (groups < 8 && zptr == nullptr)
return clear_dst();
*tp++ = static_cast<unsigned char>((val >> 8) & 0xffu);
*tp++ = static_cast<unsigned char>(val & 0xffu);
}
if (colonp)
if (zptr != nullptr) /// process all-zeroes block
{
/*
* Since some memmove()'s erroneously fail to handle
* overlapping regions, we'll do the shift by hand.
*/
const auto n = tp - colonp;
for (int i = 1; i <= n; ++i)
{
endp[- i] = colonp[n - i];
colonp[n - i] = 0;
}
tp = endp;
size_t msize = iter - zptr;
std::memmove(dst + IPV6_BINARY_LENGTH - msize, zptr, msize);
std::memset(zptr, '\0', IPV6_BINARY_LENGTH - (iter - dst));
}
if (tp != endp)
return clear_dst();
memcpy(dst, tmp, sizeof(tmp));
return true;
}
/// returns pointer to the right after parsed sequence or null on failed parsing
inline const char * parseIPv6(const char * src, const char * end, unsigned char * dst)
{
if (parseIPv6(src, [&src, end](){ return src == end; }, dst))
return src;
return nullptr;
}
/// returns true if whole buffer was parsed successfully
inline bool parseIPv6whole(const char * src, const char * end, unsigned char * dst)
{
return parseIPv6(src, end, dst) == end;
}
/// returns pointer to the right after parsed sequence or null on failed parsing
inline const char * parseIPv6(const char * src, unsigned char * dst)
{
if (parseIPv6(src, [](){ return false; }, dst))
return src;
return nullptr;
}
/// returns true if whole null-terminated string was parsed successfully
inline bool parseIPv6whole(const char * src, unsigned char * dst)
{
const char * end = parseIPv6(src, dst);
return end != nullptr && *end == '\0';
}
/** Unsafe (no bounds-checking for src nor dst), optimized version of parsing IPv6 string.
*
* Parses the input string `src` IPv6 or possible IPv4 into IPv6 and stores binary big-endian value into buffer pointed by `dst`,
* which should be long enough. In case of failure zeroes IPV6_BINARY_LENGTH bytes of buffer pointed by `dst`.
*
* WARNING - this function is adapted to work with ReadBuffer, where src is the position reference (ReadBuffer::position())
* and eof is the ReadBuffer::eof() - therefore algorithm below does not rely on buffer's continuity.
*
* @param src - iterator (reference to pointer) over input string - warning - continuity is not guaranteed.
* @param eof - function returning true if iterator riched the end - warning - can break iterator's continuity.
* @param dst - where to put output bytes, expected to be non-null and at IPV6_BINARY_LENGTH-long.
* @return - true if parsed successfully, false otherwise.
*/
template <typename T, typename EOFfunction>
requires (std::is_same<typename std::remove_cv<T>::type, char>::value)
inline bool parseIPv6orIPv4(T * &src, EOFfunction eof, unsigned char * dst)
{
const auto clear_dst = [dst]()
{
std::memset(dst, '\0', IPV6_BINARY_LENGTH);
return false;
};
if (src == nullptr)
return clear_dst();
bool leading_zero = false;
uint16_t val = 0;
int digits = 0;
/// parse up to 4 first digits as hexadecimal
for (; !eof() && digits < 4; ++src, ++digits)
{
if (*src == ':' || *src == '.')
break;
if (digits == 0 && *src == '0')
leading_zero = true;
UInt8 num = unhex(*src);
if (num == 0xFF)
return clear_dst();
(val <<= 4) |= num;
}
if (eof())
return clear_dst();
if (*src == ':') /// IPv6
{
if (digits == 0) /// leading colon - no preparsed group
return parseIPv6(src, eof, dst);
++src;
return parseIPv6(src, eof, dst, val); /// parse with first preparsed group
}
if (*src == '.') /// IPv4
{
/// should has some digits
if (digits == 0)
return clear_dst();
/// should not has leading zeroes, should has no more than 3 digits
if ((leading_zero && digits > 1) || digits > 3)
return clear_dst();
/// recode first group as decimal
UInt16 num = 0;
for (int exp = 1; exp < 1000; exp *= 10)
{
int n = val & 0x0fu;
if (n > 9)
return clear_dst();
num += n * exp;
val >>= 4;
}
if (num > 255)
return clear_dst();
++src;
if (!parseIPv4(src, eof, dst, num)) /// try to parse as IPv4 with preparsed first octet
return clear_dst();
/// convert into IPv6
if constexpr (std::endian::native == std::endian::little)
{
dst[15] = dst[0]; dst[0] = 0;
dst[14] = dst[1]; dst[1] = 0;
dst[13] = dst[2]; dst[2] = 0;
dst[12] = dst[3]; dst[3] = 0;
}
else
{
dst[15] = dst[3]; dst[3] = 0;
dst[14] = dst[2]; dst[2] = 0;
dst[13] = dst[1]; dst[1] = 0;
dst[12] = dst[0]; dst[0] = 0;
}
dst[11] = 0xff;
dst[10] = 0xff;
return true;
}
return clear_dst();
}
/** Format 4-byte binary sequesnce as IPv4 text: 'aaa.bbb.ccc.ddd',
* expects in out to be in BE-format, that is 0x7f000001 => "127.0.0.1".
*
@ -198,22 +417,27 @@ inline bool parseIPv6(const char * src, unsigned char * dst)
* formatIPv4(&0x7f000001, dst, mask_tail_octets = 1, "0");
* > dst == "127.0.0.0"
*/
inline void formatIPv4(const unsigned char * src, char *& dst, uint8_t mask_tail_octets = 0, const char * mask_string = "xxx")
inline void formatIPv4(const unsigned char * src, size_t src_size, char *& dst, uint8_t mask_tail_octets = 0, const char * mask_string = "xxx")
{
extern const char one_byte_to_string_lookup_table[256][4];
const size_t mask_length = mask_string ? strlen(mask_string) : 0;
const size_t limit = std::min(IPV4_BINARY_LENGTH, IPV4_BINARY_LENGTH - mask_tail_octets);
for (size_t octet = 0; octet < limit; ++octet)
const size_t padding = std::min(4 - src_size, limit);
for (size_t octet = 0; octet < padding; ++octet)
{
*dst++ = '0';
*dst++ = '.';
}
for (size_t octet = 4 - src_size; octet < limit; ++octet)
{
uint8_t value = 0;
if constexpr (std::endian::native == std::endian::little)
value = static_cast<uint8_t>(src[IPV4_BINARY_LENGTH - octet - 1]);
else
value = static_cast<uint8_t>(src[octet]);
const auto * rep = one_byte_to_string_lookup_table[value];
const uint8_t len = rep[0];
const char* str = rep + 1;
const uint8_t len = one_byte_to_string_lookup_table[value].second;
const char* str = one_byte_to_string_lookup_table[value].first;
memcpy(dst, str, len);
dst += len;
@ -231,4 +455,9 @@ inline void formatIPv4(const unsigned char * src, char *& dst, uint8_t mask_tail
dst[-1] = '\0';
}
inline void formatIPv4(const unsigned char * src, char *& dst, uint8_t mask_tail_octets = 0, const char * mask_string = "xxx")
{
formatIPv4(src, 4, dst, mask_tail_octets, mask_string);
}
}

View File

@ -1,6 +1,6 @@
#include <Common/tests/gtest_global_context.h>
#include <Storages/NamedCollections/NamedCollections.h>
#include <Storages/NamedCollections/NamedCollectionUtils.h>
#include <Common/NamedCollections/NamedCollections.h>
#include <Common/NamedCollections/NamedCollectionUtils.h>
#include <Poco/Util/XMLConfiguration.h>
#include <Poco/DOM/DOMParser.h>
#include <gtest/gtest.h>
@ -143,3 +143,82 @@ key2:
ASSERT_EQ(collection->get<Int64>("key2.key2_2.key2_3.key2_5"), 5);
}
TEST(NamedCollections, NestedConfigDuplicateKeys)
{
std::string xml(R"CONFIG(<clickhouse>
<named_collections>
<collection>
<headers>
<header>
<name>key1</name>
<value>value1</value>
</header>
<header>
<name>key2</name>
<value>value2</value>
</header>
<header>
<name>key3</name>
<value>value3</value>
</header>
</headers>
</collection>
</named_collections>
</clickhouse>)CONFIG");
Poco::XML::DOMParser dom_parser;
Poco::AutoPtr<Poco::XML::Document> document = dom_parser.parseString(xml);
Poco::AutoPtr<Poco::Util::XMLConfiguration> config = new Poco::Util::XMLConfiguration(document);
NamedCollectionUtils::loadFromConfig(*config);
auto collection = NamedCollectionFactory::instance().get("collection");
auto keys = collection->getKeys();
ASSERT_EQ(keys.size(), 6);
ASSERT_TRUE(keys.contains("headers.header.name"));
ASSERT_TRUE(keys.contains("headers.header[1].name"));
ASSERT_TRUE(keys.contains("headers.header[2].name"));
ASSERT_TRUE(keys.contains("headers.header.value"));
ASSERT_TRUE(keys.contains("headers.header[1].value"));
ASSERT_TRUE(keys.contains("headers.header[2].value"));
ASSERT_EQ(collection->get<String>("headers.header.name"), "key1");
ASSERT_EQ(collection->get<String>("headers.header[1].name"), "key2");
ASSERT_EQ(collection->get<String>("headers.header[2].name"), "key3");
ASSERT_EQ(collection->get<String>("headers.header.value"), "value1");
ASSERT_EQ(collection->get<String>("headers.header[1].value"), "value2");
ASSERT_EQ(collection->get<String>("headers.header[2].value"), "value3");
keys = collection->getKeys(0);
ASSERT_EQ(keys.size(), 1);
ASSERT_TRUE(keys.contains("headers"));
keys = collection->getKeys(0, "headers");
ASSERT_EQ(keys.size(), 3);
ASSERT_TRUE(keys.contains("headers.header"));
ASSERT_TRUE(keys.contains("headers.header[1]"));
ASSERT_TRUE(keys.contains("headers.header[2]"));
keys = collection->getKeys(1);
ASSERT_EQ(keys.size(), 3);
ASSERT_TRUE(keys.contains("headers.header"));
ASSERT_TRUE(keys.contains("headers.header[1]"));
ASSERT_TRUE(keys.contains("headers.header[2]"));
keys = collection->getKeys(2);
ASSERT_EQ(keys.size(), 6);
ASSERT_TRUE(keys.contains("headers.header.name"));
ASSERT_TRUE(keys.contains("headers.header[1].name"));
ASSERT_TRUE(keys.contains("headers.header[2].name"));
ASSERT_TRUE(keys.contains("headers.header.value"));
ASSERT_TRUE(keys.contains("headers.header[1].value"));
ASSERT_TRUE(keys.contains("headers.header[2].value"));
}

View File

@ -18,6 +18,9 @@ namespace DB
}
}
template<typename T, typename ... U>
concept is_any_of = (std::same_as<T, U> || ...);
/** Checks type by comparing typeid.
* The exact match of the type is checked. That is, cast to the ancestor will be unsuccessful.

View File

@ -79,7 +79,7 @@ void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractCo
LOG_INFO(log, "S3 configuration was updated");
auto credentials = Aws::Auth::AWSCredentials(auth_settings.access_key_id, auth_settings.secret_access_key);
HeaderCollection headers = auth_settings.headers;
auto headers = auth_settings.headers;
static constexpr size_t s3_max_redirects = 10;
static constexpr bool enable_s3_requests_logging = false;

View File

@ -51,6 +51,18 @@ inline Field getBinaryValue(UInt8 type, ReadBuffer & buf)
readBinary(value, buf);
return value;
}
case Field::Types::IPv4:
{
IPv4 value;
readBinary(value, buf);
return value;
}
case Field::Types::IPv6:
{
IPv6 value;
readBinary(value.toUnderType(), buf);
return value;
}
case Field::Types::Int64:
{
Int64 value;
@ -583,6 +595,8 @@ String fieldTypeToString(Field::Types::Which type)
case Field::Types::Which::UInt128: return "UInt128";
case Field::Types::Which::UInt256: return "UInt256";
case Field::Types::Which::UUID: return "UUID";
case Field::Types::Which::IPv4: return "IPv4";
case Field::Types::Which::IPv6: return "IPv6";
}
}

View File

@ -13,6 +13,7 @@
#include <Core/Defines.h>
#include <Core/DecimalFunctions.h>
#include <Core/UUID.h>
#include <base/IPv4andIPv6.h>
#include <base/DayNum.h>
#include <base/strong_typedef.h>
#include <base/EnumReflection.h>
@ -192,6 +193,8 @@ template <> struct NearestFieldTypeImpl<UInt32> { using Type = UInt64; };
template <> struct NearestFieldTypeImpl<DayNum> { using Type = UInt64; };
template <> struct NearestFieldTypeImpl<UUID> { using Type = UUID; };
template <> struct NearestFieldTypeImpl<IPv4> { using Type = IPv4; };
template <> struct NearestFieldTypeImpl<IPv6> { using Type = IPv6; };
template <> struct NearestFieldTypeImpl<Int16> { using Type = Int64; };
template <> struct NearestFieldTypeImpl<Int32> { using Type = Int64; };
@ -292,6 +295,8 @@ public:
UUID = 27,
Bool = 28,
Object = 29,
IPv4 = 30,
IPv6 = 31,
};
};
@ -468,6 +473,8 @@ public:
case Types::Int128: return get<Int128>() < rhs.get<Int128>();
case Types::Int256: return get<Int256>() < rhs.get<Int256>();
case Types::UUID: return get<UUID>() < rhs.get<UUID>();
case Types::IPv4: return get<IPv4>() < rhs.get<IPv4>();
case Types::IPv6: return get<IPv6>() < rhs.get<IPv6>();
case Types::Float64: return get<Float64>() < rhs.get<Float64>();
case Types::String: return get<String>() < rhs.get<String>();
case Types::Array: return get<Array>() < rhs.get<Array>();
@ -507,6 +514,8 @@ public:
case Types::Int128: return get<Int128>() <= rhs.get<Int128>();
case Types::Int256: return get<Int256>() <= rhs.get<Int256>();
case Types::UUID: return get<UUID>().toUnderType() <= rhs.get<UUID>().toUnderType();
case Types::IPv4: return get<IPv4>() <= rhs.get<IPv4>();
case Types::IPv6: return get<IPv6>() <= rhs.get<IPv6>();
case Types::Float64: return get<Float64>() <= rhs.get<Float64>();
case Types::String: return get<String>() <= rhs.get<String>();
case Types::Array: return get<Array>() <= rhs.get<Array>();
@ -547,6 +556,8 @@ public:
return std::bit_cast<UInt64>(get<Float64>()) == std::bit_cast<UInt64>(rhs.get<Float64>());
}
case Types::UUID: return get<UUID>() == rhs.get<UUID>();
case Types::IPv4: return get<IPv4>() == rhs.get<IPv4>();
case Types::IPv6: return get<IPv6>() == rhs.get<IPv6>();
case Types::String: return get<String>() == rhs.get<String>();
case Types::Array: return get<Array>() == rhs.get<Array>();
case Types::Tuple: return get<Tuple>() == rhs.get<Tuple>();
@ -586,6 +597,8 @@ public:
case Types::Int128: return f(field.template get<Int128>());
case Types::Int256: return f(field.template get<Int256>());
case Types::UUID: return f(field.template get<UUID>());
case Types::IPv4: return f(field.template get<IPv4>());
case Types::IPv6: return f(field.template get<IPv6>());
case Types::Float64: return f(field.template get<Float64>());
case Types::String: return f(field.template get<String>());
case Types::Array: return f(field.template get<Array>());
@ -612,7 +625,7 @@ public:
private:
std::aligned_union_t<DBMS_MIN_FIELD_SIZE - sizeof(Types::Which),
Null, UInt64, UInt128, UInt256, Int64, Int128, Int256, UUID, Float64, String, Array, Tuple, Map,
Null, UInt64, UInt128, UInt256, Int64, Int128, Int256, UUID, IPv4, IPv6, Float64, String, Array, Tuple, Map,
DecimalField<Decimal32>, DecimalField<Decimal64>, DecimalField<Decimal128>, DecimalField<Decimal256>,
AggregateFunctionStateData
> storage;
@ -747,6 +760,8 @@ template <> struct Field::TypeToEnum<Int64> { static constexpr Types::Which va
template <> struct Field::TypeToEnum<Int128> { static constexpr Types::Which value = Types::Int128; };
template <> struct Field::TypeToEnum<Int256> { static constexpr Types::Which value = Types::Int256; };
template <> struct Field::TypeToEnum<UUID> { static constexpr Types::Which value = Types::UUID; };
template <> struct Field::TypeToEnum<IPv4> { static constexpr Types::Which value = Types::IPv4; };
template <> struct Field::TypeToEnum<IPv6> { static constexpr Types::Which value = Types::IPv6; };
template <> struct Field::TypeToEnum<Float64> { static constexpr Types::Which value = Types::Float64; };
template <> struct Field::TypeToEnum<String> { static constexpr Types::Which value = Types::String; };
template <> struct Field::TypeToEnum<Array> { static constexpr Types::Which value = Types::Array; };
@ -769,6 +784,8 @@ template <> struct Field::EnumToType<Field::Types::Int64> { using Type = Int64
template <> struct Field::EnumToType<Field::Types::Int128> { using Type = Int128; };
template <> struct Field::EnumToType<Field::Types::Int256> { using Type = Int256; };
template <> struct Field::EnumToType<Field::Types::UUID> { using Type = UUID; };
template <> struct Field::EnumToType<Field::Types::IPv4> { using Type = IPv4; };
template <> struct Field::EnumToType<Field::Types::IPv6> { using Type = IPv6; };
template <> struct Field::EnumToType<Field::Types::Float64> { using Type = Float64; };
template <> struct Field::EnumToType<Field::Types::String> { using Type = String; };
template <> struct Field::EnumToType<Field::Types::Array> { using Type = Array; };

View File

@ -12,6 +12,9 @@ using DataTypes = std::vector<DataTypePtr>;
struct Array;
/* Generic class for all functions.
* Represents interface for function signature.
*/
class IResolvedFunction
{
public:

View File

@ -14,6 +14,7 @@ namespace DB
namespace ErrorCodes
{
extern const int POSTGRESQL_CONNECTION_FAILURE;
extern const int LOGICAL_ERROR;
}
}
@ -45,7 +46,7 @@ PoolWithFailover::PoolWithFailover(
}
PoolWithFailover::PoolWithFailover(
const DB::StoragePostgreSQLConfiguration & configuration,
const DB::StoragePostgreSQL::Configuration & configuration,
size_t pool_size,
size_t pool_wait_timeout_,
size_t max_tries_,
@ -70,6 +71,9 @@ ConnectionHolderPtr PoolWithFailover::get()
{
std::lock_guard lock(mutex);
if (replicas_with_priority.empty())
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "No address specified");
DB::WriteBufferFromOwnString error_message;
for (size_t try_idx = 0; try_idx < max_tries; ++try_idx)
{

View File

@ -10,6 +10,7 @@
#include <Poco/Util/AbstractConfiguration.h>
#include <Common/logger_useful.h>
#include <Storages/ExternalDataSourceConfiguration.h>
#include <Storages/StoragePostgreSQL.h>
static constexpr inline auto POSTGRESQL_POOL_DEFAULT_SIZE = 16;
@ -33,7 +34,7 @@ public:
bool auto_close_connection_);
explicit PoolWithFailover(
const DB::StoragePostgreSQLConfiguration & configuration,
const DB::StoragePostgreSQL::Configuration & configuration,
size_t pool_size,
size_t pool_wait_timeout,
size_t max_tries_,

View File

@ -20,6 +20,7 @@
#include <DataTypes/DataTypeDateTime64.h>
#include <DataTypes/DataTypeEnum.h>
#include <DataTypes/DataTypeUUID.h>
#include <DataTypes/DataTypeIPv4andIPv6.h>
#include <Columns/IColumn.h>
#include <Columns/ColumnDecimal.h>
#include <Columns/ColumnString.h>
@ -696,6 +697,8 @@ private:
SortingQueueImpl<SpecializedSingleColumnSortCursor<ColumnDecimal<DateTime64>>, strategy>,
SortingQueueImpl<SpecializedSingleColumnSortCursor<ColumnVector<UUID>>, strategy>,
SortingQueueImpl<SpecializedSingleColumnSortCursor<ColumnVector<IPv4>>, strategy>,
SortingQueueImpl<SpecializedSingleColumnSortCursor<ColumnVector<IPv6>>, strategy>,
SortingQueueImpl<SpecializedSingleColumnSortCursor<ColumnString>, strategy>,
SortingQueueImpl<SpecializedSingleColumnSortCursor<ColumnFixedString>, strategy>,

View File

@ -47,6 +47,8 @@ TYPEID_MAP(Int256)
TYPEID_MAP(Float32)
TYPEID_MAP(Float64)
TYPEID_MAP(UUID)
TYPEID_MAP(IPv4)
TYPEID_MAP(IPv6)
TYPEID_MAP(Decimal32)
TYPEID_MAP(Decimal64)

View File

@ -8,6 +8,7 @@
#include <base/Decimal.h>
#include <base/defines.h>
#include <base/UUID.h>
#include <base/IPv4andIPv6.h>
namespace DB
@ -84,6 +85,8 @@ enum class TypeIndex
LowCardinality,
Map,
Object,
IPv4,
IPv6,
};

View File

@ -160,6 +160,9 @@ class DataTypeDate32;
class DataTypeString;
class DataTypeFixedString;
class DataTypeUUID;
template <typename IPv> class DataTypeIP;
using DataTypeIPv4 = DataTypeIP<IPv4>;
using DataTypeIPv6 = DataTypeIP<IPv6>;
class DataTypeDateTime;
class DataTypeDateTime64;
template <typename T> class DataTypeEnum;
@ -206,6 +209,8 @@ bool callOnIndexAndDataType(TypeIndex number, F && f, ExtraArgs && ... args)
case TypeIndex::Enum16: return f(TypePair<DataTypeEnum<Int16>, T>(), std::forward<ExtraArgs>(args)...);
case TypeIndex::UUID: return f(TypePair<DataTypeUUID, T>(), std::forward<ExtraArgs>(args)...);
case TypeIndex::IPv4: return f(TypePair<DataTypeIPv4, T>(), std::forward<ExtraArgs>(args)...);
case TypeIndex::IPv6: return f(TypePair<DataTypeIPv6, T>(), std::forward<ExtraArgs>(args)...);
default:
break;

View File

@ -1,29 +0,0 @@
#include <DataTypes/Serializations/SerializationIP.h>
#include <DataTypes/DataTypeFactory.h>
#include <DataTypes/DataTypeCustom.h>
namespace DB
{
void registerDataTypeDomainIPv4AndIPv6(DataTypeFactory & factory)
{
factory.registerSimpleDataTypeCustom("IPv4", []
{
auto type = DataTypeFactory::instance().get("UInt32");
return std::make_pair(type, std::make_unique<DataTypeCustomDesc>(
std::make_unique<DataTypeCustomFixedName>("IPv4"), std::make_unique<SerializationIPv4>(type->getDefaultSerialization())));
});
factory.registerSimpleDataTypeCustom("IPv6", []
{
auto type = DataTypeFactory::instance().get("FixedString(16)");
return std::make_pair(type, std::make_unique<DataTypeCustomDesc>(
std::make_unique<DataTypeCustomFixedName>("IPv6"), std::make_unique<SerializationIPv6>(type->getDefaultSerialization())));
});
/// MySQL, MariaDB
factory.registerAlias("INET4", "IPv4", DataTypeFactory::CaseInsensitive);
factory.registerAlias("INET6", "IPv6", DataTypeFactory::CaseInsensitive);
}
}

View File

@ -208,11 +208,11 @@ DataTypeFactory::DataTypeFactory()
registerDataTypeNullable(*this);
registerDataTypeNothing(*this);
registerDataTypeUUID(*this);
registerDataTypeIPv4andIPv6(*this);
registerDataTypeAggregateFunction(*this);
registerDataTypeNested(*this);
registerDataTypeInterval(*this);
registerDataTypeLowCardinality(*this);
registerDataTypeDomainIPv4AndIPv6(*this);
registerDataTypeDomainBool(*this);
registerDataTypeDomainSimpleAggregateFunction(*this);
registerDataTypeDomainGeo(*this);

Some files were not shown because too many files have changed in this diff Show More