diff --git a/README.md b/README.md index 87fb4920222..eac036c2d9b 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,7 @@ curl https://clickhouse.com/ | sh * [**ClickHouse Meetup in Boston**](https://www.meetup.com/clickhouse-boston-user-group/events/293913596) - Jul 18 * [**ClickHouse Meetup in NYC**](https://www.meetup.com/clickhouse-new-york-user-group/events/293913441) - Jul 19 * [**ClickHouse Meetup in Toronto**](https://www.meetup.com/clickhouse-toronto-user-group/events/294183127) - Jul 20 +* [**ClickHouse Meetup in Singapore**](https://www.meetup.com/clickhouse-singapore-meetup-group/events/294428050/) - Jul 27 Also, keep an eye out for upcoming meetups around the world. Somewhere else you want us to be? Please feel free to reach out to tyler clickhouse com. diff --git a/base/base/IPv4andIPv6.h b/base/base/IPv4andIPv6.h index 7b745ec7b84..e2f93b54124 100644 --- a/base/base/IPv4andIPv6.h +++ b/base/base/IPv4andIPv6.h @@ -2,21 +2,23 @@ #include #include +#include #include namespace DB { - using IPv4 = StrongTypedef; + struct IPv4 : StrongTypedef + { + using StrongTypedef::StrongTypedef; + using StrongTypedef::operator=; + constexpr explicit IPv4(UInt64 value): StrongTypedef(static_cast(value)) {} + }; struct IPv6 : StrongTypedef { - constexpr IPv6() = default; - constexpr explicit IPv6(const UInt128 & x) : StrongTypedef(x) {} - constexpr explicit IPv6(UInt128 && x) : StrongTypedef(std::move(x)) {} - - IPv6 & operator=(const UInt128 & rhs) { StrongTypedef::operator=(rhs); return *this; } - IPv6 & operator=(UInt128 && rhs) { StrongTypedef::operator=(std::move(rhs)); return *this; } + using StrongTypedef::StrongTypedef; + using StrongTypedef::operator=; bool operator<(const IPv6 & rhs) const { @@ -54,12 +56,22 @@ namespace DB namespace std { + /// For historical reasons we hash IPv6 as a FixedString(16) template <> struct hash { size_t operator()(const DB::IPv6 & x) const { - return std::hash()(x.toUnderType()); + return std::hash{}(std::string_view(reinterpret_cast(&x.toUnderType()), IPV6_BINARY_LENGTH)); + } + }; + + template <> + struct hash + { + size_t operator()(const DB::IPv4 & x) const + { + return std::hash()(x.toUnderType()); } }; } diff --git a/base/base/bit_cast.h b/base/base/bit_cast.h index 5373ead36e8..4783a84586b 100644 --- a/base/base/bit_cast.h +++ b/base/base/bit_cast.h @@ -7,7 +7,13 @@ /** Returns value `from` converted to type `To` while retaining bit representation. * `To` and `From` must satisfy `CopyConstructible`. + * * In contrast to std::bit_cast can cast types of different width. + * + * Note: for signed types of narrower size, the casted result is zero-extended + * instead of sign-extended as with regular static_cast. + * For example, -1 Int8 (represented as 0xFF) bit_casted to UInt64 + * gives 255 (represented as 0x00000000000000FF) instead of 0xFFFFFFFFFFFFFFFF */ template std::decay_t bit_cast(const From & from) diff --git a/base/poco/NetSSL_OpenSSL/include/Poco/Net/ConsoleCertificateHandler.h b/base/poco/NetSSL_OpenSSL/include/Poco/Net/ConsoleCertificateHandler.h deleted file mode 100644 index 8e09b6f18ae..00000000000 --- a/base/poco/NetSSL_OpenSSL/include/Poco/Net/ConsoleCertificateHandler.h +++ /dev/null @@ -1,53 +0,0 @@ -// -// ConsoleCertificateHandler.h -// -// Library: NetSSL_OpenSSL -// Package: SSLCore -// Module: ConsoleCertificateHandler -// -// Definition of the ConsoleCertificateHandler class. -// -// Copyright (c) 2006-2009, Applied Informatics Software Engineering GmbH. -// and Contributors. -// -// SPDX-License-Identifier: BSL-1.0 -// - - -#ifndef NetSSL_ConsoleCertificateHandler_INCLUDED -#define NetSSL_ConsoleCertificateHandler_INCLUDED - - -#include "Poco/Net/InvalidCertificateHandler.h" -#include "Poco/Net/NetSSL.h" - - -namespace Poco -{ -namespace Net -{ - - - class NetSSL_API ConsoleCertificateHandler : public InvalidCertificateHandler - /// A ConsoleCertificateHandler is invoked whenever an error occurs verifying the certificate. - /// - /// The certificate is printed to stdout and the user is asked via console if he wants to accept it. - { - public: - ConsoleCertificateHandler(bool handleErrorsOnServerSide); - /// Creates the ConsoleCertificateHandler. - - virtual ~ConsoleCertificateHandler(); - /// Destroys the ConsoleCertificateHandler. - - void onInvalidCertificate(const void * pSender, VerificationErrorArgs & errorCert); - /// Prints the certificate to stdout and waits for user input on the console - /// to decide if a certificate should be accepted/rejected. - }; - - -} -} // namespace Poco::Net - - -#endif // NetSSL_ConsoleCertificateHandler_INCLUDED diff --git a/base/poco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h b/base/poco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h index a4fde26286e..21a1ed685e5 100644 --- a/base/poco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h +++ b/base/poco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h @@ -85,7 +85,7 @@ namespace Net /// /// /// - /// ConsoleCertificateHandler + /// RejectCertificateHandler /// /// true|false /// someString @@ -186,7 +186,7 @@ namespace Net /// /// Valid initialization code would be: /// SharedPtr pConsoleHandler = new KeyConsoleHandler; - /// SharedPtr pInvalidCertHandler = new ConsoleCertificateHandler; + /// SharedPtr pInvalidCertHandler = new RejectCertificateHandler; /// Context::Ptr pContext = new Context(Context::SERVER_USE, "any.pem", "any.pem", "rootcert.pem", Context::VERIFY_RELAXED, 9, false, "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH"); /// SSLManager::instance().initializeServer(pConsoleHandler, pInvalidCertHandler, pContext); @@ -203,7 +203,7 @@ namespace Net /// /// Valid initialization code would be: /// SharedPtr pConsoleHandler = new KeyConsoleHandler; - /// SharedPtr pInvalidCertHandler = new ConsoleCertificateHandler; + /// SharedPtr pInvalidCertHandler = new RejectCertificateHandler; /// Context::Ptr pContext = new Context(Context::CLIENT_USE, "", "", "rootcert.pem", Context::VERIFY_RELAXED, 9, false, "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH"); /// SSLManager::instance().initializeClient(pConsoleHandler, pInvalidCertHandler, pContext); diff --git a/base/poco/NetSSL_OpenSSL/src/CertificateHandlerFactoryMgr.cpp b/base/poco/NetSSL_OpenSSL/src/CertificateHandlerFactoryMgr.cpp index a89bbea11f2..f570e2d3599 100644 --- a/base/poco/NetSSL_OpenSSL/src/CertificateHandlerFactoryMgr.cpp +++ b/base/poco/NetSSL_OpenSSL/src/CertificateHandlerFactoryMgr.cpp @@ -13,7 +13,6 @@ #include "Poco/Net/CertificateHandlerFactoryMgr.h" -#include "Poco/Net/ConsoleCertificateHandler.h" #include "Poco/Net/AcceptCertificateHandler.h" #include "Poco/Net/RejectCertificateHandler.h" @@ -24,7 +23,6 @@ namespace Net { CertificateHandlerFactoryMgr::CertificateHandlerFactoryMgr() { - setFactory("ConsoleCertificateHandler", new CertificateHandlerFactoryImpl()); setFactory("AcceptCertificateHandler", new CertificateHandlerFactoryImpl()); setFactory("RejectCertificateHandler", new CertificateHandlerFactoryImpl()); } diff --git a/base/poco/NetSSL_OpenSSL/src/ConsoleCertificateHandler.cpp b/base/poco/NetSSL_OpenSSL/src/ConsoleCertificateHandler.cpp deleted file mode 100644 index db64752e70c..00000000000 --- a/base/poco/NetSSL_OpenSSL/src/ConsoleCertificateHandler.cpp +++ /dev/null @@ -1,53 +0,0 @@ -// -// ConsoleCertificateHandler.cpp -// -// Library: NetSSL_OpenSSL -// Package: SSLCore -// Module: ConsoleCertificateHandler -// -// Copyright (c) 2006-2009, Applied Informatics Software Engineering GmbH. -// and Contributors. -// -// SPDX-License-Identifier: BSL-1.0 -// - - -#include "Poco/Net/ConsoleCertificateHandler.h" -#include - - -namespace Poco { -namespace Net { - - -ConsoleCertificateHandler::ConsoleCertificateHandler(bool server): InvalidCertificateHandler(server) -{ -} - - -ConsoleCertificateHandler::~ConsoleCertificateHandler() -{ -} - - -void ConsoleCertificateHandler::onInvalidCertificate(const void*, VerificationErrorArgs& errorCert) -{ - const X509Certificate& aCert = errorCert.certificate(); - std::cout << "\n"; - std::cout << "WARNING: Certificate verification failed\n"; - std::cout << "----------------------------------------\n"; - std::cout << "Issuer Name: " << aCert.issuerName() << "\n"; - std::cout << "Subject Name: " << aCert.subjectName() << "\n\n"; - std::cout << "The certificate yielded the error: " << errorCert.errorMessage() << "\n\n"; - std::cout << "The error occurred in the certificate chain at position " << errorCert.errorDepth() << "\n"; - std::cout << "Accept the certificate (y,n)? "; - char c = 0; - std::cin >> c; - if (c == 'y' || c == 'Y') - errorCert.setIgnoreError(true); - else - errorCert.setIgnoreError(false); -} - - -} } // namespace Poco::Net diff --git a/base/poco/NetSSL_OpenSSL/src/SSLManager.cpp b/base/poco/NetSSL_OpenSSL/src/SSLManager.cpp index 82eed1a29eb..927602ca658 100644 --- a/base/poco/NetSSL_OpenSSL/src/SSLManager.cpp +++ b/base/poco/NetSSL_OpenSSL/src/SSLManager.cpp @@ -46,7 +46,7 @@ const std::string SSLManager::CFG_PREFER_SERVER_CIPHERS("preferServerCiphers"); const std::string SSLManager::CFG_DELEGATE_HANDLER("privateKeyPassphraseHandler.name"); const std::string SSLManager::VAL_DELEGATE_HANDLER("KeyConsoleHandler"); const std::string SSLManager::CFG_CERTIFICATE_HANDLER("invalidCertificateHandler.name"); -const std::string SSLManager::VAL_CERTIFICATE_HANDLER("ConsoleCertificateHandler"); +const std::string SSLManager::VAL_CERTIFICATE_HANDLER("RejectCertificateHandler"); const std::string SSLManager::CFG_SERVER_PREFIX("openSSL.server."); const std::string SSLManager::CFG_CLIENT_PREFIX("openSSL.client."); const std::string SSLManager::CFG_CACHE_SESSIONS("cacheSessions"); diff --git a/contrib/arrow-cmake/CMakeLists.txt b/contrib/arrow-cmake/CMakeLists.txt index 5fe942d1cd0..46b86cb4ddb 100644 --- a/contrib/arrow-cmake/CMakeLists.txt +++ b/contrib/arrow-cmake/CMakeLists.txt @@ -31,12 +31,12 @@ endif() set (CMAKE_CXX_STANDARD 17) -set(ARROW_VERSION "6.0.1") +set(ARROW_VERSION "11.0.0") string(REGEX MATCH "^[0-9]+\\.[0-9]+\\.[0-9]+" ARROW_BASE_VERSION "${ARROW_VERSION}") -set(ARROW_VERSION_MAJOR "6") +set(ARROW_VERSION_MAJOR "11") set(ARROW_VERSION_MINOR "0") -set(ARROW_VERSION_PATCH "1") +set(ARROW_VERSION_PATCH "0") if(ARROW_VERSION_MAJOR STREQUAL "0") # Arrow 0.x.y => SO version is "x", full SO version is "x.y.0" @@ -514,6 +514,10 @@ if (SANITIZE STREQUAL "undefined") target_compile_options(_arrow PRIVATE -fno-sanitize=undefined) endif () +# Define Thrift version for parquet (we use 0.16.0) +add_definitions(-DPARQUET_THRIFT_VERSION_MAJOR=0) +add_definitions(-DPARQUET_THRIFT_VERSION_MINOR=16) + # === tools set(TOOLS_DIR "${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/tools/parquet") diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index dd21c8552d3..e824161a688 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -89,7 +89,7 @@ RUN arch=${TARGETARCH:-amd64} \ && dpkg -i /tmp/nfpm.deb \ && rm /tmp/nfpm.deb -ARG GO_VERSION=1.19.5 +ARG GO_VERSION=1.19.10 # We need go for clickhouse-diagnostics RUN arch=${TARGETARCH:-amd64} \ && curl -Lo /tmp/go.tgz "https://go.dev/dl/go${GO_VERSION}.linux-${arch}.tar.gz" \ diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index f393b98cfe6..42ae81655d2 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -1,4 +1,4 @@ -FROM ubuntu:22.04 +FROM ubuntu:20.04 # see https://github.com/moby/moby/issues/4032#issuecomment-192327844 ARG DEBIAN_FRONTEND=noninteractive @@ -11,17 +11,18 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list && apt-get update \ && apt-get upgrade -yq \ && apt-get install --yes --no-install-recommends \ - apt-transport-https \ ca-certificates \ - dirmngr \ - gnupg2 \ - wget \ locales \ tzdata \ - && apt-get clean + wget \ + && apt-get clean \ + && rm -rf \ + /var/lib/apt/lists/* \ + /var/cache/debconf \ + /tmp/* ARG REPO_CHANNEL="stable" -ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" +ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" ARG VERSION="23.5.3.24" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" @@ -43,49 +44,68 @@ ARG single_binary_location_url="" ARG TARGETARCH -RUN arch=${TARGETARCH:-amd64} \ +# install from a web location with deb packages +RUN arch="${TARGETARCH:-amd64}" \ && if [ -n "${deb_location_url}" ]; then \ echo "installing from custom url with deb packages: ${deb_location_url}" \ - rm -rf /tmp/clickhouse_debs \ + && rm -rf /tmp/clickhouse_debs \ && mkdir -p /tmp/clickhouse_debs \ && for package in ${PACKAGES}; do \ { wget --progress=bar:force:noscroll "${deb_location_url}/${package}_${VERSION}_${arch}.deb" -P /tmp/clickhouse_debs || \ wget --progress=bar:force:noscroll "${deb_location_url}/${package}_${VERSION}_all.deb" -P /tmp/clickhouse_debs ; } \ || exit 1 \ ; done \ - && dpkg -i /tmp/clickhouse_debs/*.deb ; \ - elif [ -n "${single_binary_location_url}" ]; then \ + && dpkg -i /tmp/clickhouse_debs/*.deb \ + && rm -rf /tmp/* ; \ + fi + +# install from a single binary +RUN if [ -n "${single_binary_location_url}" ]; then \ echo "installing from single binary url: ${single_binary_location_url}" \ && rm -rf /tmp/clickhouse_binary \ && mkdir -p /tmp/clickhouse_binary \ && wget --progress=bar:force:noscroll "${single_binary_location_url}" -O /tmp/clickhouse_binary/clickhouse \ && chmod +x /tmp/clickhouse_binary/clickhouse \ - && /tmp/clickhouse_binary/clickhouse install --user "clickhouse" --group "clickhouse" ; \ - else \ - mkdir -p /etc/apt/sources.list.d \ - && apt-key adv --keyserver keyserver.ubuntu.com --recv 8919F6BD2B48D754 \ - && echo ${REPOSITORY} > /etc/apt/sources.list.d/clickhouse.list \ + && /tmp/clickhouse_binary/clickhouse install --user "clickhouse" --group "clickhouse" \ + && rm -rf /tmp/* ; \ + fi + +# A fallback to installation from ClickHouse repository +RUN if ! clickhouse local -q "SELECT ''" > /dev/null 2>&1; then \ + apt-get update \ + && apt-get install --yes --no-install-recommends \ + apt-transport-https \ + ca-certificates \ + dirmngr \ + gnupg2 \ + && mkdir -p /etc/apt/sources.list.d \ + && GNUPGHOME=$(mktemp -d) \ + && GNUPGHOME="$GNUPGHOME" gpg --no-default-keyring \ + --keyring /usr/share/keyrings/clickhouse-keyring.gpg \ + --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 8919F6BD2B48D754 \ + && rm -r "$GNUPGHOME" \ + && chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \ + && echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \ && echo "installing from repository: ${REPOSITORY}" \ && apt-get update \ - && apt-get --yes -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" upgrade \ && for package in ${PACKAGES}; do \ packages="${packages} ${package}=${VERSION}" \ ; done \ && apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \ - ; fi \ - && clickhouse-local -q 'SELECT * FROM system.build_options' \ - && rm -rf \ - /var/lib/apt/lists/* \ - /var/cache/debconf \ - /tmp/* \ - && mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \ - && chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client - -RUN apt-get autoremove --purge -yq libksba8 && \ - apt-get autoremove -yq + && rm -rf \ + /var/lib/apt/lists/* \ + /var/cache/debconf \ + /tmp/* \ + && apt-get autoremove --purge -yq libksba8 \ + && apt-get autoremove -yq \ + ; fi +# post install # we need to allow "others" access to clickhouse folder, because docker container # can be started with arbitrary uid (openshift usecase) +RUN clickhouse-local -q 'SELECT * FROM system.build_options' \ + && mkdir -p /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client \ + && chmod ugo+Xrw -R /var/lib/clickhouse /var/log/clickhouse-server /etc/clickhouse-server /etc/clickhouse-client RUN locale-gen en_US.UTF-8 ENV LANG en_US.UTF-8 diff --git a/docker/server/README.md b/docker/server/README.md index 18dce492123..67646a262f5 100644 --- a/docker/server/README.md +++ b/docker/server/README.md @@ -20,7 +20,6 @@ For more information and documentation see https://clickhouse.com/. - The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. - The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A). Most ARM CPUs after 2017 support ARMv8.2-A. A notable exception is Raspberry Pi 4 from 2019 whose CPU only supports ARMv8.0-A. -- Since the Clickhouse 23.3 Ubuntu image started using `ubuntu:22.04` as its base image, it requires docker version >= `20.10.10`, or use `docker run -- privileged` instead. Alternatively, try the Clickhouse Alpine image. ## How to use this image diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index 293ad9ac411..798d2a40b12 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -14,6 +14,13 @@ LEFT_SERVER_PORT=9001 # patched version RIGHT_SERVER_PORT=9002 +# abort_conf -- abort if some options is not recognized +# abort -- abort if something is not right in the env (i.e. per-cpu arenas does not work) +# narenas -- set them explicitly to avoid disabling per-cpu arena in env +# that returns different number of CPUs for some of the following +# _SC_NPROCESSORS_ONLN/_SC_NPROCESSORS_CONF/sched_getaffinity +export MALLOC_CONF="abort_conf:true,abort:true,narenas:$(nproc --all)" + function wait_for_server # port, pid { for _ in {1..60} @@ -109,10 +116,6 @@ function restart while pkill -f clickhouse-serv ; do echo . ; sleep 1 ; done echo all killed - # Change the jemalloc settings here. - # https://github.com/jemalloc/jemalloc/wiki/Getting-Started - export MALLOC_CONF="confirm_conf:true" - set -m # Spawn servers in their own process groups local left_server_opts=( @@ -147,8 +150,6 @@ function restart set +m - unset MALLOC_CONF - wait_for_server $LEFT_SERVER_PORT $left_pid echo left ok diff --git a/docker/test/util/process_functional_tests_result.py b/docker/test/util/process_functional_tests_result.py index c75a3500831..fd4cc9f4bf7 100755 --- a/docker/test/util/process_functional_tests_result.py +++ b/docker/test/util/process_functional_tests_result.py @@ -86,7 +86,7 @@ def process_test_log(log_path, broken_tests): test_name, "NOT_FAILED", test_time, - ["This test passed. Update broken_tests.txt.\n"], + ["This test passed. Update analyzer_tech_debt.txt.\n"], ) ) else: @@ -205,7 +205,7 @@ if __name__ == "__main__": parser.add_argument("--in-results-dir", default="/test_output/") parser.add_argument("--out-results-file", default="/test_output/test_results.tsv") parser.add_argument("--out-status-file", default="/test_output/check_status.tsv") - parser.add_argument("--broken-tests", default="/broken_tests.txt") + parser.add_argument("--broken-tests", default="/analyzer_tech_debt.txt") args = parser.parse_args() broken_tests = list() diff --git a/docs/changelogs/v23.3.6.7-lts.md b/docs/changelogs/v23.3.6.7-lts.md new file mode 100644 index 00000000000..387cc126aba --- /dev/null +++ b/docs/changelogs/v23.3.6.7-lts.md @@ -0,0 +1,19 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.3.6.7-lts (7e3f0a271b7) FIXME as compared to v23.3.5.9-lts (f5fbc2fd2b3) + +#### Improvement +* Backported in [#51240](https://github.com/ClickHouse/ClickHouse/issues/51240): Improve the progress bar for file/s3/hdfs/url table functions by using chunk size from source data and using incremental total size counting in each thread. Fix the progress bar for *Cluster functions. This closes [#47250](https://github.com/ClickHouse/ClickHouse/issues/47250). [#51088](https://github.com/ClickHouse/ClickHouse/pull/51088) ([Kruglov Pavel](https://github.com/Avogar)). + +#### Build/Testing/Packaging Improvement +* Backported in [#51529](https://github.com/ClickHouse/ClickHouse/issues/51529): Split huge `RUN` in Dockerfile into smaller conditional. Install the necessary tools on demand in the same `RUN` layer, and remove them after that. Upgrade the OS only once at the beginning. Use a modern way to check the signed repository. Downgrade the base repo to ubuntu:20.04 to address the issues on older docker versions. Upgrade golang version to address golang vulnerabilities. [#51504](https://github.com/ClickHouse/ClickHouse/pull/51504) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Fix type of LDAP server params hash in cache entry [#50865](https://github.com/ClickHouse/ClickHouse/pull/50865) ([Julian Maicher](https://github.com/jmaicher)). + diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md index 79baf04d75d..378a1c46d93 100644 --- a/docs/en/interfaces/formats.md +++ b/docs/en/interfaces/formats.md @@ -2454,18 +2454,22 @@ In this format, all input data is read to a single value. It is possible to pars The result is output in binary format without delimiters and escaping. If more than one value is output, the format is ambiguous, and it will be impossible to read the data back. Below is a comparison of the formats `RawBLOB` and [TabSeparatedRaw](#tabseparatedraw). + `RawBLOB`: - data is output in binary format, no escaping; - there are no delimiters between values; - no newline at the end of each value. -[TabSeparatedRaw] (#tabseparatedraw): + +`TabSeparatedRaw`: - data is output without escaping; - the rows contain values separated by tabs; - there is a line feed after the last value in every row. The following is a comparison of the `RawBLOB` and [RowBinary](#rowbinary) formats. + `RawBLOB`: - String fields are output without being prefixed by length. + `RowBinary`: - String fields are represented as length in varint format (unsigned [LEB128] (https://en.wikipedia.org/wiki/LEB128)), followed by the bytes of the string. diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 5643e273d39..40c1b8d64a1 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -1602,7 +1602,7 @@ Keys for server/client settings: - requireTLSv1_2 (default: false) – Require a TLSv1.2 connection. Acceptable values: `true`, `false`. - fips (default: false) – Activates OpenSSL FIPS mode. Supported if the library’s OpenSSL version supports FIPS. - privateKeyPassphraseHandler (default: `KeyConsoleHandler`)– Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: ``, `KeyFileHandler`, `test`, ``. -- invalidCertificateHandler (default: `ConsoleCertificateHandler`) – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` ConsoleCertificateHandler ` . +- invalidCertificateHandler (default: `RejectCertificateHandler`) – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` RejectCertificateHandler ` . - disableProtocols (default: "") – Protocols that are not allowed to use. - preferServerCiphers (default: false) – Preferred server ciphers on the client. diff --git a/docs/en/sql-reference/aggregate-functions/combinators.md b/docs/en/sql-reference/aggregate-functions/combinators.md index fd693430064..a395b350a55 100644 --- a/docs/en/sql-reference/aggregate-functions/combinators.md +++ b/docs/en/sql-reference/aggregate-functions/combinators.md @@ -97,6 +97,10 @@ Result: If you apply this combinator, the aggregate function does not return the resulting value (such as the number of unique values for the [uniq](../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) function), but an intermediate state of the aggregation (for `uniq`, this is the hash table for calculating the number of unique values). This is an `AggregateFunction(...)` that can be used for further processing or stored in a table to finish aggregating later. +:::note +Please notice, that -MapState is not an invariant for the same data due to the fact that order of data in intermediate state can change, though it doesn't impact ingestion of this data. +::: + To work with these states, use: - [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) table engine. diff --git a/docs/en/sql-reference/aggregate-functions/reference/index.md b/docs/en/sql-reference/aggregate-functions/reference/index.md index 17ef494e9ad..6c56aefd51d 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/index.md +++ b/docs/en/sql-reference/aggregate-functions/reference/index.md @@ -19,8 +19,19 @@ Standard aggregate functions: - [stddevSamp](/docs/en/sql-reference/aggregate-functions/reference/stddevsamp.md) - [varPop](/docs/en/sql-reference/aggregate-functions/reference/varpop.md) - [varSamp](/docs/en/sql-reference/aggregate-functions/reference/varsamp.md) +- [corr](./corr.md) - [covarPop](/docs/en/sql-reference/aggregate-functions/reference/covarpop.md) - [covarSamp](/docs/en/sql-reference/aggregate-functions/reference/covarsamp.md) +- [entropy](./entropy.md) +- [exponentialMovingAverage](./exponentialmovingaverage.md) +- [intervalLengthSum](./intervalLengthSum.md) +- [kolmogorovSmirnovTest](./kolmogorovsmirnovtest.md) +- [mannwhitneyutest](./mannwhitneyutest.md) +- [median](./median.md) +- [rankCorr](./rankCorr.md) +- [sumKahan](./sumkahan.md) +- [studentTTest](./studentttest.md) +- [welchTTest](./welchttest.md) ClickHouse-specific aggregate functions: @@ -34,12 +45,15 @@ ClickHouse-specific aggregate functions: - [avgWeighted](/docs/en/sql-reference/aggregate-functions/reference/avgweighted.md) - [topK](/docs/en/sql-reference/aggregate-functions/reference/topk.md) - [topKWeighted](/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md) +- [deltaSum](./deltasum.md) +- [deltaSumTimestamp](./deltasumtimestamp.md) - [groupArray](/docs/en/sql-reference/aggregate-functions/reference/grouparray.md) - [groupArrayLast](/docs/en/sql-reference/aggregate-functions/reference/grouparraylast.md) - [groupUniqArray](/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray.md) - [groupArrayInsertAt](/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md) - [groupArrayMovingAvg](/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md) - [groupArrayMovingSum](/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md) +- [groupArraySample](./grouparraysample.md) - [groupBitAnd](/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md) - [groupBitOr](/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md) - [groupBitXor](/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md) @@ -84,3 +98,9 @@ ClickHouse-specific aggregate functions: - [theilsU](./theilsu.md) - [maxIntersections](./maxintersections.md) - [maxIntersectionsPosition](./maxintersectionsposition.md) +- [meanZTest](./meanztest.md) +- [quantileGK](./quantileGK.md) +- [quantileInterpolatedWeighted](./quantileinterpolatedweighted.md) +- [sparkBar](./sparkbar.md) +- [sumCount](./sumcount.md) + diff --git a/docs/en/sql-reference/functions/functions-for-nulls.md b/docs/en/sql-reference/functions/functions-for-nulls.md index 6f82fedaab7..d57b799e94c 100644 --- a/docs/en/sql-reference/functions/functions-for-nulls.md +++ b/docs/en/sql-reference/functions/functions-for-nulls.md @@ -8,7 +8,7 @@ sidebar_label: Nullable ## isNull -Returns whether the argument is [NULL](../../sql-reference/syntax.md#null-literal). +Returns whether the argument is [NULL](../../sql-reference/syntax.md#null). ``` sql isNull(x) diff --git a/docs/en/sql-reference/statements/select/join.md b/docs/en/sql-reference/statements/select/join.md index 08ffae838f8..7971b3ba275 100644 --- a/docs/en/sql-reference/statements/select/join.md +++ b/docs/en/sql-reference/statements/select/join.md @@ -21,6 +21,9 @@ Expressions from `ON` clause and columns from `USING` clause are called “join ## Related Content - Blog: [ClickHouse: A Blazingly Fast DBMS with Full SQL Join Support - Part 1](https://clickhouse.com/blog/clickhouse-fully-supports-joins) +- Blog: [ClickHouse: A Blazingly Fast DBMS with Full SQL Join Support - Under the Hood - Part 2](https://clickhouse.com/blog/clickhouse-fully-supports-joins-hash-joins-part2) +- Blog: [ClickHouse: A Blazingly Fast DBMS with Full SQL Join Support - Under the Hood - Part 3](https://clickhouse.com/blog/clickhouse-fully-supports-joins-full-sort-partial-merge-part3) +- Blog: [ClickHouse: A Blazingly Fast DBMS with Full SQL Join Support - Under the Hood - Part 4](https://clickhouse.com/blog/clickhouse-fully-supports-joins-direct-join-part4) ## Supported Types of JOIN diff --git a/docs/ru/operations/server-configuration-parameters/settings.md b/docs/ru/operations/server-configuration-parameters/settings.md index 33db6df0fdd..5430469ea18 100644 --- a/docs/ru/operations/server-configuration-parameters/settings.md +++ b/docs/ru/operations/server-configuration-parameters/settings.md @@ -1067,7 +1067,7 @@ ClickHouse использует потоки из глобального пул - requireTLSv1_2 - Требование соединения TLSv1.2. Допустимые значения: `true`, `false`. - fips - Активация режима OpenSSL FIPS. Поддерживается, если версия OpenSSL, с которой собрана библиотека поддерживает fips. - privateKeyPassphraseHandler - Класс (подкласс PrivateKeyPassphraseHandler)запрашивающий кодовую фразу доступа к секретному ключу. Например, ``, `KeyFileHandler`, `test`, ``. -- invalidCertificateHandler - Класс (подкласс CertificateHandler) для подтверждения не валидных сертификатов. Например, ` ConsoleCertificateHandler `. +- invalidCertificateHandler - Класс (подкласс CertificateHandler) для подтверждения не валидных сертификатов. Например, ` RejectCertificateHandler `. - disableProtocols - Запрещенные к использованию протоколы. - preferServerCiphers - Предпочтение серверных шифров на клиенте. diff --git a/docs/ru/sql-reference/aggregate-functions/combinators.md b/docs/ru/sql-reference/aggregate-functions/combinators.md index 3a7ff571f99..99d5f11442c 100644 --- a/docs/ru/sql-reference/aggregate-functions/combinators.md +++ b/docs/ru/sql-reference/aggregate-functions/combinators.md @@ -66,6 +66,10 @@ WITH anySimpleState(number) AS c SELECT toTypeName(c), c FROM numbers(1); В случае применения этого комбинатора, агрегатная функция возвращает не готовое значение (например, в случае функции [uniq](reference/uniq.md#agg_function-uniq) — количество уникальных значений), а промежуточное состояние агрегации (например, в случае функции `uniq` — хэш-таблицу для расчёта количества уникальных значений), которое имеет тип `AggregateFunction(...)` и может использоваться для дальнейшей обработки или может быть сохранено в таблицу для последующей доагрегации. +:::note +Промежуточное состояние для -MapState не является инвариантом для одних и тех же исходных данные т.к. порядок данных может меняться. Это не влияет, тем не менее, на загрузку таких данных. +::: + Для работы с промежуточными состояниями предназначены: - Движок таблиц [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md). diff --git a/docs/zh/operations/server-configuration-parameters/settings.md b/docs/zh/operations/server-configuration-parameters/settings.md index 52142eda2e8..f6106d8734e 100644 --- a/docs/zh/operations/server-configuration-parameters/settings.md +++ b/docs/zh/operations/server-configuration-parameters/settings.md @@ -466,7 +466,7 @@ SSL客户端/服务器配置。 - requireTLSv1_2 – Require a TLSv1.2 connection. Acceptable values: `true`, `false`. - fips – Activates OpenSSL FIPS mode. Supported if the library’s OpenSSL version supports FIPS. - privateKeyPassphraseHandler – Class (PrivateKeyPassphraseHandler subclass) that requests the passphrase for accessing the private key. For example: ``, `KeyFileHandler`, `test`, ``. -- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` ConsoleCertificateHandler ` . +- invalidCertificateHandler – Class (a subclass of CertificateHandler) for verifying invalid certificates. For example: ` RejectCertificateHandler ` . - disableProtocols – Protocols that are not allowed to use. - preferServerCiphers – Preferred server ciphers on the client. diff --git a/programs/server/config.xml b/programs/server/config.xml index 2f69f23a718..acd6d92a896 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -282,6 +282,13 @@ true sslv2,sslv3 true + + + + RejectCertificateHandler + diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp index 15f500b8bb6..bb1368b9ff8 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp @@ -25,6 +25,7 @@ IAggregateFunction * createWithNumericOrTimeType(const IDataType & argument_type WhichDataType which(argument_type); if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate(std::forward(args)...); if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate(std::forward(args)...); + if (which.idx == TypeIndex::IPv4) return new AggregateFunctionTemplate(std::forward(args)...); return createWithNumericType(argument_type, std::forward(args)...); } diff --git a/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp index 16f2feb71bf..9e8060d44cc 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB @@ -39,12 +40,22 @@ public: static DataTypePtr createResultType() { return std::make_shared(std::make_shared()); } }; +template +class AggregateFunctionGroupUniqArrayIPv4 : public AggregateFunctionGroupUniqArray +{ +public: + explicit AggregateFunctionGroupUniqArrayIPv4(const DataTypePtr & argument_type, const Array & parameters_, UInt64 max_elems_ = std::numeric_limits::max()) + : AggregateFunctionGroupUniqArray(argument_type, parameters_, createResultType(), max_elems_) {} + static DataTypePtr createResultType() { return std::make_shared(std::make_shared()); } +}; + template IAggregateFunction * createWithExtraTypes(const DataTypePtr & argument_type, TArgs && ... args) { WhichDataType which(argument_type); if (which.idx == TypeIndex::Date) return new AggregateFunctionGroupUniqArrayDate(argument_type, std::forward(args)...); else if (which.idx == TypeIndex::DateTime) return new AggregateFunctionGroupUniqArrayDateTime(argument_type, std::forward(args)...); + else if (which.idx == TypeIndex::IPv4) return new AggregateFunctionGroupUniqArrayIPv4(argument_type, std::forward(args)...); else { /// Check that we can use plain version of AggregateFunctionGroupUniqArrayGeneric diff --git a/src/AggregateFunctions/AggregateFunctionMap.cpp b/src/AggregateFunctions/AggregateFunctionMap.cpp index f6100602f3f..b957b541fe1 100644 --- a/src/AggregateFunctions/AggregateFunctionMap.cpp +++ b/src/AggregateFunctions/AggregateFunctionMap.cpp @@ -100,6 +100,10 @@ public: return std::make_shared>(nested_function, arguments); case TypeIndex::UUID: return std::make_shared>(nested_function, arguments); + case TypeIndex::IPv4: + return std::make_shared>(nested_function, arguments); + case TypeIndex::IPv6: + return std::make_shared>(nested_function, arguments); case TypeIndex::FixedString: case TypeIndex::String: return std::make_shared>(nested_function, arguments); diff --git a/src/AggregateFunctions/AggregateFunctionMap.h b/src/AggregateFunctions/AggregateFunctionMap.h index 55f6611974e..7b9bb088d8f 100644 --- a/src/AggregateFunctions/AggregateFunctionMap.h +++ b/src/AggregateFunctions/AggregateFunctionMap.h @@ -19,7 +19,9 @@ #include #include #include "DataTypes/Serializations/ISerialization.h" +#include #include "base/types.h" +#include #include #include "AggregateFunctions/AggregateFunctionFactory.h" @@ -69,6 +71,31 @@ struct AggregateFunctionMapCombinatorData } }; +/// Specialization for IPv6 - for historical reasons it should be stored as FixedString(16) +template <> +struct AggregateFunctionMapCombinatorData +{ + struct IPv6Hash + { + using hash_type = std::hash; + using is_transparent = void; + + size_t operator()(const IPv6 & ip) const { return hash_type{}(ip); } + }; + + using SearchType = IPv6; + std::unordered_map> merged_maps; + + static void writeKey(const IPv6 & key, WriteBuffer & buf) + { + writeIPv6Binary(key, buf); + } + static void readKey(IPv6 & key, ReadBuffer & buf) + { + readIPv6Binary(key, buf); + } +}; + template class AggregateFunctionMap final : public IAggregateFunctionDataHelper, AggregateFunctionMap> @@ -147,6 +174,8 @@ public: StringRef key_ref; if (key_type->getTypeId() == TypeIndex::FixedString) key_ref = assert_cast(key_column).getDataAt(offset + i); + else if (key_type->getTypeId() == TypeIndex::IPv6) + key_ref = assert_cast(key_column).getDataAt(offset + i); else key_ref = assert_cast(key_column).getDataAt(offset + i); diff --git a/src/AggregateFunctions/AggregateFunctionTopK.cpp b/src/AggregateFunctions/AggregateFunctionTopK.cpp index e568694df02..8f6652223cc 100644 --- a/src/AggregateFunctions/AggregateFunctionTopK.cpp +++ b/src/AggregateFunctions/AggregateFunctionTopK.cpp @@ -5,6 +5,7 @@ #include #include #include +#include static inline constexpr UInt64 TOP_K_MAX_SIZE = 0xFFFFFF; @@ -60,6 +61,22 @@ public: {} }; +template +class AggregateFunctionTopKIPv4 : public AggregateFunctionTopK +{ +public: + using AggregateFunctionTopK::AggregateFunctionTopK; + + AggregateFunctionTopKIPv4(UInt64 threshold_, UInt64 load_factor, const DataTypes & argument_types_, const Array & params) + : AggregateFunctionTopK( + threshold_, + load_factor, + argument_types_, + params, + std::make_shared(std::make_shared())) + {} +}; + template IAggregateFunction * createWithExtraTypes(const DataTypes & argument_types, UInt64 threshold, UInt64 load_factor, const Array & params) @@ -72,6 +89,8 @@ IAggregateFunction * createWithExtraTypes(const DataTypes & argument_types, UInt return new AggregateFunctionTopKDate(threshold, load_factor, argument_types, params); if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTopKDateTime(threshold, load_factor, argument_types, params); + if (which.idx == TypeIndex::IPv4) + return new AggregateFunctionTopKIPv4(threshold, load_factor, argument_types, params); /// Check that we can use plain version of AggregateFunctionTopKGeneric if (argument_types[0]->isValueUnambiguouslyRepresentedInContiguousMemoryRegion()) diff --git a/src/AggregateFunctions/AggregateFunctionUniq.cpp b/src/AggregateFunctions/AggregateFunctionUniq.cpp index bf998c5ee9e..748a232641e 100644 --- a/src/AggregateFunctions/AggregateFunctionUniq.cpp +++ b/src/AggregateFunctions/AggregateFunctionUniq.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include @@ -60,6 +61,10 @@ createAggregateFunctionUniq(const std::string & name, const DataTypes & argument return std::make_shared>(argument_types); else if (which.isUUID()) return std::make_shared>(argument_types); + else if (which.isIPv4()) + return std::make_shared>(argument_types); + else if (which.isIPv6()) + return std::make_shared>(argument_types); else if (which.isTuple()) { if (use_exact_hash_function) @@ -109,6 +114,10 @@ createAggregateFunctionUniq(const std::string & name, const DataTypes & argument return std::make_shared>>(argument_types); else if (which.isUUID()) return std::make_shared>>(argument_types); + else if (which.isIPv4()) + return std::make_shared>>(argument_types); + else if (which.isIPv6()) + return std::make_shared>>(argument_types); else if (which.isTuple()) { if (use_exact_hash_function) diff --git a/src/AggregateFunctions/AggregateFunctionUniq.h b/src/AggregateFunctions/AggregateFunctionUniq.h index c782b9314fd..de68e9076a0 100644 --- a/src/AggregateFunctions/AggregateFunctionUniq.h +++ b/src/AggregateFunctions/AggregateFunctionUniq.h @@ -101,6 +101,18 @@ struct AggregateFunctionUniqHLL12Data static String getName() { return "uniqHLL12"; } }; +template <> +struct AggregateFunctionUniqHLL12Data +{ + using Set = HyperLogLogWithSmallSetOptimization; + Set set; + + constexpr static bool is_able_to_parallelize_merge = false; + constexpr static bool is_variadic = false; + + static String getName() { return "uniqHLL12"; } +}; + template struct AggregateFunctionUniqHLL12DataForVariadic { @@ -155,6 +167,25 @@ struct AggregateFunctionUniqExactData static String getName() { return "uniqExact"; } }; +/// For historical reasons IPv6 is treated as FixedString(16) +template +struct AggregateFunctionUniqExactData +{ + using Key = UInt128; + + /// When creating, the hash table must be small. + using SingleLevelSet = HashSet, HashTableAllocatorWithStackMemory>; + using TwoLevelSet = TwoLevelHashSet; + using Set = UniqExactSet; + + Set set; + + constexpr static bool is_able_to_parallelize_merge = is_able_to_parallelize_merge_; + constexpr static bool is_variadic = false; + + static String getName() { return "uniqExact"; } +}; + template struct AggregateFunctionUniqExactDataForVariadic : AggregateFunctionUniqExactData { @@ -248,27 +279,22 @@ struct Adder AggregateFunctionUniqUniquesHashSetData> || std::is_same_v>) { const auto & column = *columns[0]; - if constexpr (!std::is_same_v) + if constexpr (std::is_same_v || std::is_same_v) + { + StringRef value = column.getDataAt(row_num); + data.set.insert(CityHash_v1_0_2::CityHash64(value.data, value.size)); + } + else { using ValueType = typename decltype(data.set)::value_type; const auto & value = assert_cast &>(column).getElement(row_num); data.set.insert(static_cast(AggregateFunctionUniqTraits::hash(value))); } - else - { - StringRef value = column.getDataAt(row_num); - data.set.insert(CityHash_v1_0_2::CityHash64(value.data, value.size)); - } } else if constexpr (std::is_same_v>) { const auto & column = *columns[0]; - if constexpr (!std::is_same_v) - { - data.set.template insert( - assert_cast &>(column).getData()[row_num]); - } - else + if constexpr (std::is_same_v || std::is_same_v) { StringRef value = column.getDataAt(row_num); @@ -279,6 +305,11 @@ struct Adder data.set.template insert(key); } + else + { + data.set.template insert( + assert_cast &>(column).getData()[row_num]); + } } #if USE_DATASKETCHES else if constexpr (std::is_same_v) diff --git a/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp b/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp index 1c59da59e83..8c2cb6ea0de 100644 --- a/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp +++ b/src/AggregateFunctions/AggregateFunctionUniqCombined.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include @@ -60,6 +61,10 @@ namespace return std::make_shared::template AggregateFunction>(argument_types, params); else if (which.isUUID()) return std::make_shared::template AggregateFunction>(argument_types, params); + else if (which.isIPv4()) + return std::make_shared::template AggregateFunction>(argument_types, params); + else if (which.isIPv6()) + return std::make_shared::template AggregateFunction>(argument_types, params); else if (which.isTuple()) { if (use_exact_hash_function) diff --git a/src/AggregateFunctions/AggregateFunctionUniqCombined.h b/src/AggregateFunctions/AggregateFunctionUniqCombined.h index d879e3b3dde..5e8fa69f9de 100644 --- a/src/AggregateFunctions/AggregateFunctionUniqCombined.h +++ b/src/AggregateFunctions/AggregateFunctionUniqCombined.h @@ -119,6 +119,10 @@ struct AggregateFunctionUniqCombinedData : public Aggr { }; +template +struct AggregateFunctionUniqCombinedData : public AggregateFunctionUniqCombinedDataWithKey +{ +}; template class AggregateFunctionUniqCombined final @@ -141,16 +145,16 @@ public: void add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { - if constexpr (!std::is_same_v) - { - const auto & value = assert_cast &>(*columns[0]).getElement(row_num); - this->data(place).set.insert(detail::AggregateFunctionUniqCombinedTraits::hash(value)); - } - else + if constexpr (std::is_same_v || std::is_same_v) { StringRef value = columns[0]->getDataAt(row_num); this->data(place).set.insert(CityHash_v1_0_2::CityHash64(value.data, value.size)); } + else + { + const auto & value = assert_cast &>(*columns[0]).getElement(row_num); + this->data(place).set.insert(detail::AggregateFunctionUniqCombinedTraits::hash(value)); + } } void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena *) const override diff --git a/src/Analyzer/Passes/QueryAnalysisPass.cpp b/src/Analyzer/Passes/QueryAnalysisPass.cpp index c454ad9f84f..163092f1b7f 100644 --- a/src/Analyzer/Passes/QueryAnalysisPass.cpp +++ b/src/Analyzer/Passes/QueryAnalysisPass.cpp @@ -2383,7 +2383,6 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveTableIdentifierFromDatabaseCatalog(con auto storage_lock = storage->lockForShare(context->getInitialQueryId(), context->getSettingsRef().lock_acquire_timeout); auto storage_snapshot = storage->getStorageSnapshot(storage->getInMemoryMetadataPtr(), context); - auto result = std::make_shared(std::move(storage), std::move(storage_lock), std::move(storage_snapshot)); if (is_temporary_table) result->setTemporaryTableName(table_name); @@ -5210,14 +5209,26 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi const auto & second_argument_constant_literal = second_argument_constant_node->getValue(); const auto & second_argument_constant_type = second_argument_constant_node->getResultType(); - auto set = makeSetForConstantValue(first_argument_constant_type, + const auto & settings = scope.context->getSettingsRef(); + + auto result_block = getSetElementsForConstantValue(first_argument_constant_type, second_argument_constant_literal, second_argument_constant_type, - scope.context->getSettingsRef()); + settings.transform_null_in); + + SizeLimits size_limits_for_set = {settings.max_rows_in_set, settings.max_bytes_in_set, settings.set_overflow_mode}; + + auto set = std::make_shared(size_limits_for_set, 0, settings.transform_null_in); + + set->setHeader(result_block.cloneEmpty().getColumnsWithTypeAndName()); + set->insertFromBlock(result_block.getColumnsWithTypeAndName()); + set->finishInsert(); + + auto future_set = std::make_shared(std::move(set)); /// Create constant set column for constant folding - auto column_set = ColumnSet::create(1, FutureSet(std::move(set))); + auto column_set = ColumnSet::create(1, std::move(future_set)); argument_columns[1].column = ColumnConst::create(std::move(column_set), 1); } diff --git a/src/Analyzer/SetUtils.cpp b/src/Analyzer/SetUtils.cpp index 0fb075e925b..15eec16e899 100644 --- a/src/Analyzer/SetUtils.cpp +++ b/src/Analyzer/SetUtils.cpp @@ -118,7 +118,7 @@ Block createBlockFromCollection(const Collection & collection, const DataTypes & } -SetPtr makeSetForConstantValue(const DataTypePtr & expression_type, const Field & value, const DataTypePtr & value_type, const Settings & settings) +Block getSetElementsForConstantValue(const DataTypePtr & expression_type, const Field & value, const DataTypePtr & value_type, bool transform_null_in) { DataTypes set_element_types = {expression_type}; const auto * lhs_tuple_type = typeid_cast(expression_type.get()); @@ -135,9 +135,6 @@ SetPtr makeSetForConstantValue(const DataTypePtr & expression_type, const Field size_t lhs_type_depth = getCompoundTypeDepth(*expression_type); size_t rhs_type_depth = getCompoundTypeDepth(*value_type); - SizeLimits size_limits_for_set = {settings.max_rows_in_set, settings.max_bytes_in_set, settings.set_overflow_mode}; - bool tranform_null_in = settings.transform_null_in; - Block result_block; if (lhs_type_depth == rhs_type_depth) @@ -145,7 +142,7 @@ SetPtr makeSetForConstantValue(const DataTypePtr & expression_type, const Field /// 1 in 1; (1, 2) in (1, 2); identity(tuple(tuple(tuple(1)))) in tuple(tuple(tuple(1))); etc. Array array{value}; - result_block = createBlockFromCollection(array, set_element_types, tranform_null_in); + result_block = createBlockFromCollection(array, set_element_types, transform_null_in); } else if (lhs_type_depth + 1 == rhs_type_depth) { @@ -154,9 +151,9 @@ SetPtr makeSetForConstantValue(const DataTypePtr & expression_type, const Field WhichDataType rhs_which_type(value_type); if (rhs_which_type.isArray()) - result_block = createBlockFromCollection(value.get(), set_element_types, tranform_null_in); + result_block = createBlockFromCollection(value.get(), set_element_types, transform_null_in); else if (rhs_which_type.isTuple()) - result_block = createBlockFromCollection(value.get(), set_element_types, tranform_null_in); + result_block = createBlockFromCollection(value.get(), set_element_types, transform_null_in); else throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Unsupported type at the right-side of IN. Expected Array or Tuple. Actual {}", @@ -170,13 +167,7 @@ SetPtr makeSetForConstantValue(const DataTypePtr & expression_type, const Field value_type->getName()); } - auto set = std::make_shared(size_limits_for_set, true /*fill_set_elements*/, tranform_null_in); - - set->setHeader(result_block.cloneEmpty().getColumnsWithTypeAndName()); - set->insertFromBlock(result_block.getColumnsWithTypeAndName()); - set->finishInsert(); - - return set; + return result_block; } } diff --git a/src/Analyzer/SetUtils.h b/src/Analyzer/SetUtils.h index 7afc8e5259c..c35b45dce59 100644 --- a/src/Analyzer/SetUtils.h +++ b/src/Analyzer/SetUtils.h @@ -12,19 +12,12 @@ namespace DB class Set; using SetPtr = std::shared_ptr; -/** Make set for constant part of IN subquery. +/** Get set elements for constant part of IN subquery. * Throws exception if parameters are not valid for IN function. * * Example: SELECT id FROM test_table WHERE id IN (1, 2, 3, 4); * Example: SELECT id FROM test_table WHERE id IN ((1, 2), (3, 4)); - * - * @param expression_type - type of first argument of function IN. - * @param value - constant value of second argument of function IN. - * @param value_type - type of second argument of function IN. - * @param settings - query settings. - * - * @return SetPtr for constant value. */ -SetPtr makeSetForConstantValue(const DataTypePtr & expression_type, const Field & value, const DataTypePtr & value_type, const Settings & settings); +Block getSetElementsForConstantValue(const DataTypePtr & expression_type, const Field & value, const DataTypePtr & value_type, bool transform_null_in); } diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 479db03c883..34b3b1e228a 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -102,6 +102,7 @@ namespace ErrorCodes extern const int UNRECOGNIZED_ARGUMENTS; extern const int LOGICAL_ERROR; extern const int CANNOT_OPEN_FILE; + extern const int FILE_ALREADY_EXISTS; } } @@ -567,30 +568,17 @@ try CompressionMethod compression_method = chooseCompressionMethod(out_file, compression_method_string); UInt64 compression_level = 3; - if (query_with_output->is_outfile_append && compression_method != CompressionMethod::None) - { - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Cannot append to compressed file. Please use uncompressed file or remove APPEND keyword."); - } - if (query_with_output->compression_level) { const auto & compression_level_node = query_with_output->compression_level->as(); - bool res = compression_level_node.value.tryGet(compression_level); - auto range = getCompressionLevelRange(compression_method); - - if (!res || compression_level < range.first || compression_level > range.second) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "Invalid compression level, must be positive integer in range {}-{}", - range.first, - range.second); + compression_level_node.value.tryGet(compression_level); } auto flags = O_WRONLY | O_EXCL; if (query_with_output->is_outfile_append) flags |= O_APPEND; + else if (query_with_output->is_outfile_truncate) + flags |= O_TRUNC; else flags |= O_CREAT; @@ -871,6 +859,67 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa } } + // Run some local checks to make sure queries into output file will work before sending to server. + if (const auto * query_with_output = dynamic_cast(parsed_query.get())) + { + String out_file; + if (query_with_output->out_file) + { + const auto & out_file_node = query_with_output->out_file->as(); + out_file = out_file_node.value.safeGet(); + + std::string compression_method_string; + + if (query_with_output->compression) + { + const auto & compression_method_node = query_with_output->compression->as(); + compression_method_string = compression_method_node.value.safeGet(); + } + + CompressionMethod compression_method = chooseCompressionMethod(out_file, compression_method_string); + UInt64 compression_level = 3; + + if (query_with_output->is_outfile_append && query_with_output->is_outfile_truncate) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Cannot use INTO OUTFILE with APPEND and TRUNCATE simultaneously."); + } + + if (query_with_output->is_outfile_append && compression_method != CompressionMethod::None) + { + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Cannot append to compressed file. Please use uncompressed file or remove APPEND keyword."); + } + + if (query_with_output->compression_level) + { + const auto & compression_level_node = query_with_output->compression_level->as(); + bool res = compression_level_node.value.tryGet(compression_level); + auto range = getCompressionLevelRange(compression_method); + + if (!res || compression_level < range.first || compression_level > range.second) + throw Exception( + ErrorCodes::BAD_ARGUMENTS, + "Invalid compression level, must be positive integer in range {}-{}", + range.first, + range.second); + } + + if (fs::exists(out_file)) + { + if (!query_with_output->is_outfile_append && !query_with_output->is_outfile_truncate) + { + throw Exception( + ErrorCodes::FILE_ALREADY_EXISTS, + "File {} exists, consider using APPEND or TRUNCATE.", + out_file); + } + } + } + } + const auto & settings = global_context->getSettingsRef(); const Int32 signals_before_stop = settings.partial_result_on_first_cancel ? 2 : 1; diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index be226bfd9dd..cd102f46ffe 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -1175,16 +1175,12 @@ ProfileInfo Connection::receiveProfileInfo() const ParallelReadRequest Connection::receiveParallelReadRequest() const { - ParallelReadRequest request; - request.deserialize(*in); - return request; + return ParallelReadRequest::deserialize(*in); } InitialAllRangesAnnouncement Connection::receiveInitialParallelReadAnnounecement() const { - InitialAllRangesAnnouncement announcement; - announcement.deserialize(*in); - return announcement; + return InitialAllRangesAnnouncement::deserialize(*in); } diff --git a/src/Client/IServerConnection.h b/src/Client/IServerConnection.h index 52382ff9d45..a0c029c79fb 100644 --- a/src/Client/IServerConnection.h +++ b/src/Client/IServerConnection.h @@ -16,6 +16,10 @@ #include +#include +#include +#include +#include namespace DB { @@ -34,9 +38,9 @@ struct Packet ProfileInfo profile_info; std::vector part_uuids; - InitialAllRangesAnnouncement announcement; - ParallelReadRequest request; - ParallelReadResponse response; + /// The part of parallel replicas protocol + std::optional announcement; + std::optional request; std::string server_timezone; diff --git a/src/Client/Suggest.cpp b/src/Client/Suggest.cpp index 3d07080bb28..6e989e10f76 100644 --- a/src/Client/Suggest.cpp +++ b/src/Client/Suggest.cpp @@ -43,7 +43,7 @@ Suggest::Suggest() "IN", "KILL", "QUERY", "SYNC", "ASYNC", "TEST", "BETWEEN", "TRUNCATE", "USER", "ROLE", "PROFILE", "QUOTA", "POLICY", "ROW", "GRANT", "REVOKE", "OPTION", "ADMIN", "EXCEPT", "REPLACE", "IDENTIFIED", "HOST", "NAME", "READONLY", "WRITABLE", "PERMISSIVE", "FOR", "RESTRICTIVE", "RANDOMIZED", - "INTERVAL", "LIMITS", "ONLY", "TRACKING", "IP", "REGEXP", "ILIKE", "CLEANUP" + "INTERVAL", "LIMITS", "ONLY", "TRACKING", "IP", "REGEXP", "ILIKE", "CLEANUP", "APPEND" }); } diff --git a/src/Columns/ColumnSet.h b/src/Columns/ColumnSet.h index 3f5cf4ad280..935a72e551a 100644 --- a/src/Columns/ColumnSet.h +++ b/src/Columns/ColumnSet.h @@ -21,7 +21,7 @@ class ColumnSet final : public COWHelper private: friend class COWHelper; - ColumnSet(size_t s_, FutureSet data_) : data(std::move(data_)) { s = s_; } + ColumnSet(size_t s_, FutureSetPtr data_) : data(std::move(data_)) { s = s_; } ColumnSet(const ColumnSet &) = default; public: @@ -29,13 +29,13 @@ public: TypeIndex getDataType() const override { return TypeIndex::Set; } MutableColumnPtr cloneDummy(size_t s_) const override { return ColumnSet::create(s_, data); } - ConstSetPtr getData() const { if (!data.isReady()) return nullptr; return data.get(); } + FutureSetPtr getData() const { return data; } // Used only for debugging, making it DUMPABLE Field operator[](size_t) const override { return {}; } private: - FutureSet data; + FutureSetPtr data; }; } diff --git a/src/Common/StringSearcher.h b/src/Common/StringSearcher.h index ae440f9151b..3ed192d05f3 100644 --- a/src/Common/StringSearcher.h +++ b/src/Common/StringSearcher.h @@ -21,12 +21,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int BAD_ARGUMENTS; -} - - /** Variants for searching a substring in a string. * In most cases, performance is less than Volnitsky (see Volnitsky.h). */ @@ -811,15 +805,22 @@ class TokenSearcher : public StringSearcherBase size_t needle_size; public: + + template + requires (sizeof(CharT) == 1) + static bool isValidNeedle(const CharT * needle_, size_t needle_size_) + { + return std::none_of(needle_, needle_ + needle_size_, isTokenSeparator); + } + template requires (sizeof(CharT) == 1) TokenSearcher(const CharT * needle_, size_t needle_size_) : searcher(needle_, needle_size_) , needle_size(needle_size_) { - if (std::any_of(needle_, needle_ + needle_size_, isTokenSeparator)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Needle must not contain whitespace or separator characters"); - + /// The caller is responsible for calling isValidNeedle() + chassert(isValidNeedle(needle_, needle_size_)); } template @@ -880,6 +881,7 @@ using ASCIICaseSensitiveStringSearcher = impl::StringSearcher; using ASCIICaseInsensitiveStringSearcher = impl::StringSearcher; using UTF8CaseSensitiveStringSearcher = impl::StringSearcher; using UTF8CaseInsensitiveStringSearcher = impl::StringSearcher; + using ASCIICaseSensitiveTokenSearcher = impl::TokenSearcher; using ASCIICaseInsensitiveTokenSearcher = impl::TokenSearcher; diff --git a/src/Common/Volnitsky.h b/src/Common/Volnitsky.h index 64c5bf4c0d3..8f9aa23a38a 100644 --- a/src/Common/Volnitsky.h +++ b/src/Common/Volnitsky.h @@ -406,8 +406,7 @@ public: /// And also adding from the end guarantees that we will find first occurrence because we will lookup bigger offsets first. for (auto i = static_cast(needle_size - sizeof(VolnitskyTraits::Ngram)); i >= 0; --i) { - bool ok = VolnitskyTraits::putNGram( - needle + i, static_cast(i + 1), needle, needle_size, callback); + bool ok = VolnitskyTraits::putNGram(needle + i, static_cast(i + 1), needle, needle_size, callback); /** `putNGramUTF8CaseInsensitive` does not work if characters with lower and upper cases * are represented by different number of bytes or code points. diff --git a/src/Compression/CompressionCodecDeflateQpl.cpp b/src/Compression/CompressionCodecDeflateQpl.cpp index 4a2866d152d..859a9c2463a 100644 --- a/src/Compression/CompressionCodecDeflateQpl.cpp +++ b/src/Compression/CompressionCodecDeflateQpl.cpp @@ -9,6 +9,7 @@ #include #include "libaccel_config.h" #include +#include namespace DB { @@ -34,6 +35,7 @@ DeflateQplJobHWPool::DeflateQplJobHWPool() // loop all configured workqueue size to get maximum job number. accfg_ctx * ctx_ptr = nullptr; auto ctx_status = accfg_new(&ctx_ptr); + SCOPE_EXIT({ accfg_unref(ctx_ptr); }); if (ctx_status == 0) { auto * dev_ptr = accfg_device_get_first(ctx_ptr); diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 3d42bd582ed..658f3c8025b 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -141,7 +141,7 @@ class IColumn; M(Bool, allow_suspicious_low_cardinality_types, false, "In CREATE TABLE statement allows specifying LowCardinality modifier for types of small fixed size (8 or less). Enabling this may increase merge times and memory consumption.", 0) \ M(Bool, allow_suspicious_fixed_string_types, false, "In CREATE TABLE statement allows creating columns of type FixedString(n) with n > 256. FixedString with length >= 256 is suspicious and most likely indicates misusage", 0) \ M(Bool, allow_suspicious_indices, false, "Reject primary/secondary indexes and sorting keys with identical expressions", 0) \ - M(Bool, compile_expressions, true, "Compile some scalar functions and operators to native code.", 0) \ + M(Bool, compile_expressions, false, "Compile some scalar functions and operators to native code.", 0) \ M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \ M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code. This feature has a bug and should not be used.", 0) \ M(UInt64, min_count_to_compile_aggregate_expression, 3, "The number of identical aggregate expressions before they are JIT-compiled", 0) \ diff --git a/src/Core/Types_fwd.h b/src/Core/Types_fwd.h index 3db1127fe5a..a59e4b6eab8 100644 --- a/src/Core/Types_fwd.h +++ b/src/Core/Types_fwd.h @@ -27,7 +27,7 @@ namespace DB using UUID = StrongTypedef; -using IPv4 = StrongTypedef; +struct IPv4; struct IPv6; diff --git a/src/DataTypes/DataTypeMap.cpp b/src/DataTypes/DataTypeMap.cpp index 0f5d97e6761..90561857fad 100644 --- a/src/DataTypes/DataTypeMap.cpp +++ b/src/DataTypes/DataTypeMap.cpp @@ -69,7 +69,7 @@ void DataTypeMap::assertKeyType() const if (!checkKeyType(key_type)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Type of Map key must be a type, that can be represented by integer " - "or String or FixedString (possibly LowCardinality) or UUID," + "or String or FixedString (possibly LowCardinality) or UUID or IPv6," " but {} given", key_type->getName()); } @@ -120,6 +120,7 @@ bool DataTypeMap::checkKeyType(DataTypePtr key_type) else if (!key_type->isValueRepresentedByInteger() && !isStringOrFixedString(*key_type) && !WhichDataType(key_type).isNothing() + && !WhichDataType(key_type).isIPv6() && !WhichDataType(key_type).isUUID()) { return false; diff --git a/src/DataTypes/DataTypeSet.h b/src/DataTypes/DataTypeSet.h index d88d76b31be..e71a345a195 100644 --- a/src/DataTypes/DataTypeSet.h +++ b/src/DataTypes/DataTypeSet.h @@ -22,7 +22,7 @@ public: bool isParametric() const override { return true; } // Used for expressions analysis. - MutableColumnPtr createColumn() const override { return ColumnSet::create(0, FutureSet{}); } + MutableColumnPtr createColumn() const override { return ColumnSet::create(0, nullptr); } // Used only for debugging, making it DUMPABLE Field getDefault() const override { return Tuple(); } diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index 59cf8f0174c..202f40bfdb2 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -875,10 +875,11 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() LOG_TEST( log, - "Current read type: {}, read offset: {}, impl offset: {}, file segment: {}", + "Current read type: {}, read offset: {}, impl offset: {}, impl position: {}, file segment: {}", toString(read_type), file_offset_of_buffer_end, implementation_buffer->getFileOffsetOfBufferEnd(), + implementation_buffer->getPosition(), file_segment.getInfoForLog()); chassert(current_read_range.left <= file_offset_of_buffer_end); @@ -937,7 +938,8 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() // We don't support implementation_buffer implementations that use nextimpl_working_buffer_offset. chassert(implementation_buffer->position() == implementation_buffer->buffer().begin()); - size = implementation_buffer->buffer().size(); + if (result) + size = implementation_buffer->buffer().size(); LOG_TEST( log, @@ -951,15 +953,21 @@ bool CachedOnDiskReadBufferFromFile::nextImplStep() ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromCacheBytes, size); ProfileEvents::increment(ProfileEvents::CachedReadBufferReadFromCacheMicroseconds, elapsed); - const size_t new_file_offset = file_offset_of_buffer_end + size; - const size_t file_segment_write_offset = file_segment.getCurrentWriteOffset(true); - if (new_file_offset > file_segment.range().right + 1 || new_file_offset > file_segment_write_offset) + if (result) { - auto file_segment_path = file_segment.getPathInLocalCache(); - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "Read unexpected size. File size: {}, file path: {}, file segment info: {}", - fs::file_size(file_segment_path), file_segment_path, file_segment.getInfoForLog()); + const size_t new_file_offset = file_offset_of_buffer_end + size; + const size_t file_segment_write_offset = file_segment.getCurrentWriteOffset(true); + if (new_file_offset > file_segment.range().right + 1 || new_file_offset > file_segment_write_offset) + { + auto file_segment_path = file_segment.getPathInLocalCache(); + throw Exception( + ErrorCodes::LOGICAL_ERROR, "Read unexpected size. " + "File size: {}, file segment path: {}, impl size: {}, impl path: {}" + "file segment info: {}", + fs::file_size(file_segment_path), file_segment_path, + implementation_buffer->getFileSize(), implementation_buffer->getFileName(), + file_segment.getInfoForLog()); + } } } else diff --git a/src/Disks/ObjectStorages/ObjectStorageIterator.h b/src/Disks/ObjectStorages/ObjectStorageIterator.h index 2ff5ce60acc..841b0ea6664 100644 --- a/src/Disks/ObjectStorages/ObjectStorageIterator.h +++ b/src/Disks/ObjectStorages/ObjectStorageIterator.h @@ -14,6 +14,7 @@ public: virtual bool isValid() = 0; virtual RelativePathWithMetadata current() = 0; virtual RelativePathsWithMetadata currentBatch() = 0; + virtual std::optional getCurrrentBatchAndScheduleNext() = 0; virtual size_t getAccumulatedSize() const = 0; virtual ~IObjectStorageIterator() = default; @@ -53,6 +54,11 @@ public: return batch; } + virtual std::optional getCurrrentBatchAndScheduleNext() override + { + return std::nullopt; + } + size_t getAccumulatedSize() const override { return batch.size(); diff --git a/src/Disks/ObjectStorages/ObjectStorageIteratorAsync.cpp b/src/Disks/ObjectStorages/ObjectStorageIteratorAsync.cpp index f91c19f2fb9..7425f629a5a 100644 --- a/src/Disks/ObjectStorages/ObjectStorageIteratorAsync.cpp +++ b/src/Disks/ObjectStorages/ObjectStorageIteratorAsync.cpp @@ -100,6 +100,22 @@ RelativePathsWithMetadata IObjectStorageIteratorAsync::currentBatch() return current_batch; } +std::optional IObjectStorageIteratorAsync::getCurrrentBatchAndScheduleNext() +{ + std::lock_guard lock(mutex); + if (!is_initialized) + nextBatch(); + + if (current_batch_iterator != current_batch.end()) + { + auto temp_current_batch = current_batch; + nextBatch(); + return temp_current_batch; + } + + return std::nullopt; +} + size_t IObjectStorageIteratorAsync::getAccumulatedSize() const { return accumulated_size.load(std::memory_order_relaxed); diff --git a/src/Disks/ObjectStorages/ObjectStorageIteratorAsync.h b/src/Disks/ObjectStorages/ObjectStorageIteratorAsync.h index a2b06da9a91..b0dd3cef39c 100644 --- a/src/Disks/ObjectStorages/ObjectStorageIteratorAsync.h +++ b/src/Disks/ObjectStorages/ObjectStorageIteratorAsync.h @@ -27,6 +27,7 @@ public: RelativePathWithMetadata current() override; RelativePathsWithMetadata currentBatch() override; size_t getAccumulatedSize() const override; + std::optional getCurrrentBatchAndScheduleNext() override; ~IObjectStorageIteratorAsync() override { @@ -48,7 +49,7 @@ protected: bool is_initialized{false}; bool is_finished{false}; - mutable std::mutex mutex; + mutable std::recursive_mutex mutex; ThreadPool list_objects_pool; ThreadPoolCallbackRunner list_objects_scheduler; std::future outcome_future; diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 84c71c89b11..019e0c42cde 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -322,7 +322,6 @@ struct ToTimeImpl { throwDateTimeIsNotSupported(name); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToDateImpl; }; @@ -394,7 +393,6 @@ struct ToStartOfSecondImpl { throwDateTimeIsNotSupported(name); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -442,7 +440,6 @@ struct ToStartOfMillisecondImpl { throwDateTimeIsNotSupported(name); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -486,7 +483,6 @@ struct ToStartOfMicrosecondImpl { throwDateTimeIsNotSupported(name); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -524,7 +520,6 @@ struct ToStartOfNanosecondImpl { throwDateTimeIsNotSupported(name); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -723,28 +718,6 @@ struct ToYearImpl return time_zone.toYear(DayNum(d)); } - static inline constexpr bool hasPreimage() { return true; } - - static inline RangeOrNull getPreimage(const IDataType & type, const Field & point) - { - if (point.getType() != Field::Types::UInt64) return std::nullopt; - - auto year = point.get(); - if (year < DATE_LUT_MIN_YEAR || year >= DATE_LUT_MAX_YEAR) return std::nullopt; - - const DateLUTImpl & date_lut = DateLUT::instance(); - - auto start_time = date_lut.makeDateTime(year, 1, 1, 0, 0, 0); - auto end_time = date_lut.addYears(start_time, 1); - - if (isDateOrDate32(type) || isDateTime(type) || isDateTime64(type)) - return {std::make_pair(Field(start_time), Field(end_time))}; - else - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal type {} of argument of function {}. Should be Date, Date32, DateTime or DateTime64", - type.getName(), name); - } - using FactorTransform = ZeroTransform; }; @@ -818,7 +791,6 @@ struct ToQuarterImpl { return time_zone.toQuarter(DayNum(d)); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToStartOfYearImpl; }; @@ -843,7 +815,6 @@ struct ToMonthImpl { return time_zone.toMonth(DayNum(d)); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToStartOfYearImpl; }; @@ -869,7 +840,6 @@ struct ToDayOfMonthImpl return time_zone.toDayOfMonth(DayNum(d)); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToStartOfMonthImpl; }; @@ -917,7 +887,6 @@ struct ToDayOfYearImpl { return time_zone.toDayOfYear(DayNum(d)); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToStartOfYearImpl; }; @@ -942,7 +911,6 @@ struct ToHourImpl { throwDateTimeIsNotSupported(name); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToDateImpl; }; @@ -971,7 +939,6 @@ struct TimezoneOffsetImpl throwDateTimeIsNotSupported(name); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToTimeImpl; }; @@ -995,7 +962,6 @@ struct ToMinuteImpl { throwDateTimeIsNotSupported(name); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToStartOfHourImpl; }; @@ -1020,7 +986,6 @@ struct ToSecondImpl { throwDateTimeIsNotSupported(name); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToStartOfMinuteImpl; }; @@ -1045,7 +1010,6 @@ struct ToISOYearImpl { return time_zone.toISOYear(DayNum(d)); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1102,7 +1066,6 @@ struct ToISOWeekImpl { return time_zone.toISOWeek(DayNum(d)); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ToISOYearImpl; }; @@ -1145,7 +1108,6 @@ struct ToRelativeYearNumImpl { return time_zone.toYear(DayNum(d)); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1177,7 +1139,6 @@ struct ToRelativeQuarterNumImpl { return time_zone.toRelativeQuarterNum(DayNum(d)); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1209,7 +1170,6 @@ struct ToRelativeMonthNumImpl { return time_zone.toRelativeMonthNum(DayNum(d)); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1241,7 +1201,6 @@ struct ToRelativeWeekNumImpl { return time_zone.toRelativeWeekNum(DayNum(d)); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1273,7 +1232,6 @@ struct ToRelativeDayNumImpl { return static_cast(d); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1311,7 +1269,6 @@ struct ToRelativeHourNumImpl else return static_cast(time_zone.toRelativeHourNum(DayNum(d))); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1343,7 +1300,6 @@ struct ToRelativeMinuteNumImpl { return static_cast(time_zone.toRelativeMinuteNum(DayNum(d))); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1372,7 +1328,6 @@ struct ToRelativeSecondNumImpl { return static_cast(time_zone.fromDayNum(DayNum(d))); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1397,31 +1352,6 @@ struct ToYYYYMMImpl { return time_zone.toNumYYYYMM(DayNum(d)); } - static inline constexpr bool hasPreimage() { return true; } - - static inline RangeOrNull getPreimage(const IDataType & type, const Field & point) - { - if (point.getType() != Field::Types::UInt64) return std::nullopt; - - auto year_month = point.get(); - auto year = year_month / 100; - auto month = year_month % 100; - - if (year < DATE_LUT_MIN_YEAR || year > DATE_LUT_MAX_YEAR || month < 1 || month > 12 || (year == DATE_LUT_MAX_YEAR && month == 12)) - return std::nullopt; - - const DateLUTImpl & date_lut = DateLUT::instance(); - - auto start_time = date_lut.makeDateTime(year, month, 1, 0, 0, 0); - auto end_time = date_lut.addMonths(start_time, 1); - - if (isDateOrDate32(type) || isDateTime(type) || isDateTime64(type)) - return {std::make_pair(Field(start_time), Field(end_time))}; - else - throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Illegal type {} of argument of function {}. Should be Date, Date32, DateTime or DateTime64", - type.getName(), name); - } using FactorTransform = ZeroTransform; }; @@ -1446,7 +1376,6 @@ struct ToYYYYMMDDImpl { return time_zone.toNumYYYYMMDD(DayNum(d)); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; @@ -1471,7 +1400,6 @@ struct ToYYYYMMDDhhmmssImpl { return time_zone.toNumYYYYMMDDhhmmss(time_zone.toDate(DayNum(d))); } - static inline constexpr bool hasPreimage() { return false; } using FactorTransform = ZeroTransform; }; diff --git a/src/Functions/FunctionDateOrDateTimeToSomething.h b/src/Functions/FunctionDateOrDateTimeToSomething.h index d98b788c7d7..82818cc3d2b 100644 --- a/src/Functions/FunctionDateOrDateTimeToSomething.h +++ b/src/Functions/FunctionDateOrDateTimeToSomething.h @@ -7,7 +7,6 @@ namespace DB namespace ErrorCodes { extern const int ILLEGAL_TYPE_OF_ARGUMENT; - extern const int NOT_IMPLEMENTED; } /// See DateTimeTransforms.h @@ -84,18 +83,6 @@ public: arguments[0].type->getName(), this->getName()); } - bool hasInformationAboutPreimage() const override { return Transform::hasPreimage(); } - - RangeOrNull getPreimage(const IDataType & type, const Field & point) const override - { - if constexpr (Transform::hasPreimage()) - return Transform::getPreimage(type, point); - else - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "Function {} has no information about its preimage", - Transform::name); - } - }; } diff --git a/src/Functions/FunctionsCodingIP.h b/src/Functions/FunctionsCodingIP.h index d02cc81f608..9d090abb736 100644 --- a/src/Functions/FunctionsCodingIP.h +++ b/src/Functions/FunctionsCodingIP.h @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -16,6 +17,7 @@ namespace ErrorCodes extern const int CANNOT_PARSE_IPV4; extern const int CANNOT_PARSE_IPV6; extern const int ILLEGAL_COLUMN; + extern const int CANNOT_CONVERT_TYPE; } enum class IPStringToNumExceptionMode : uint8_t @@ -296,4 +298,87 @@ ColumnPtr convertToIPv4(ColumnPtr column, const PaddedPODArray * null_map return col_res; } +template +ColumnPtr convertIPv6ToIPv4(ColumnPtr column, const PaddedPODArray * null_map = nullptr) +{ + const ColumnIPv6 * column_ipv6 = checkAndGetColumn(column.get()); + + if (!column_ipv6) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column type {}. Expected IPv6.", column->getName()); + + size_t column_size = column_ipv6->size(); + + ColumnUInt8::MutablePtr col_null_map_to; + ColumnUInt8::Container * vec_null_map_to = nullptr; + + if constexpr (exception_mode == IPStringToNumExceptionMode::Null) + { + col_null_map_to = ColumnUInt8::create(column_size, false); + vec_null_map_to = &col_null_map_to->getData(); + } + + const uint8_t ip4_cidr[] {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}; + + auto col_res = ToColumn::create(); + auto & vec_res = col_res->getData(); + vec_res.resize(column_size); + const auto & vec_src = column_ipv6->getData(); + + for (size_t i = 0; i < vec_res.size(); ++i) + { + const uint8_t * src = reinterpret_cast(&vec_src[i]); + uint8_t * dst = reinterpret_cast(&vec_res[i]); + + if (null_map && (*null_map)[i]) + { + std::memset(dst, '\0', IPV4_BINARY_LENGTH); + if constexpr (exception_mode == IPStringToNumExceptionMode::Null) + (*vec_null_map_to)[i] = true; + continue; + } + + if (!matchIPv6Subnet(src, ip4_cidr, 96)) + { + if constexpr (exception_mode == IPStringToNumExceptionMode::Throw) + { + char addr[IPV6_MAX_TEXT_LENGTH + 1] {}; + char * paddr = addr; + formatIPv6(src, paddr); + + throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "IPv6 {} in column {} is not in IPv4 mapping block", addr, column->getName()); + } + else if constexpr (exception_mode == IPStringToNumExceptionMode::Default) + { + std::memset(dst, '\0', IPV4_BINARY_LENGTH); + } + else if constexpr (exception_mode == IPStringToNumExceptionMode::Null) + { + (*vec_null_map_to)[i] = true; + std::memset(dst, '\0', IPV4_BINARY_LENGTH); + } + continue; + } + + if constexpr (std::endian::native == std::endian::little) + { + dst[0] = src[15]; + dst[1] = src[14]; + dst[2] = src[13]; + dst[3] = src[12]; + } + else + { + dst[0] = src[12]; + dst[1] = src[13]; + dst[2] = src[14]; + dst[3] = src[15]; + } + } + + if constexpr (exception_mode == IPStringToNumExceptionMode::Null) + return ColumnNullable::create(std::move(col_res), std::move(col_null_map_to)); + + return col_res; +} + } diff --git a/src/Functions/FunctionsConversion.h b/src/Functions/FunctionsConversion.h index 023f1ab5425..00c4cfe7284 100644 --- a/src/Functions/FunctionsConversion.h +++ b/src/Functions/FunctionsConversion.h @@ -57,6 +57,7 @@ #include #include #include +#include #include @@ -210,13 +211,13 @@ struct ConvertImpl } else if constexpr ( (std::is_same_v != std::is_same_v) - && !(is_any_of || is_any_of) + && !(is_any_of || is_any_of) ) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion from {} to {} is not supported", TypeName, TypeName); } - else if constexpr (std::is_same_v != std::is_same_v) + else if constexpr (std::is_same_v != std::is_same_v && !(std::is_same_v || std::is_same_v)) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Conversion between numeric types and IPv6 is not supported. " @@ -297,7 +298,58 @@ struct ConvertImpl } else { - if constexpr (std::is_same_v && std::is_same_v) + if constexpr (std::is_same_v && std::is_same_v) + { + const uint8_t ip4_cidr[] {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00}; + const uint8_t * src = reinterpret_cast(&vec_from[i].toUnderType()); + if (!matchIPv6Subnet(src, ip4_cidr, 96)) + { + char addr[IPV6_MAX_TEXT_LENGTH + 1] {}; + char * paddr = addr; + formatIPv6(src, paddr); + + throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "IPv6 {} in column {} is not in IPv4 mapping block", addr, named_from.column->getName()); + } + + uint8_t * dst = reinterpret_cast(&vec_to[i].toUnderType()); + if constexpr (std::endian::native == std::endian::little) + { + dst[0] = src[15]; + dst[1] = src[14]; + dst[2] = src[13]; + dst[3] = src[12]; + } + else + { + dst[0] = src[12]; + dst[1] = src[13]; + dst[2] = src[14]; + dst[3] = src[15]; + } + } + else if constexpr (std::is_same_v && std::is_same_v) + { + const uint8_t * src = reinterpret_cast(&vec_from[i].toUnderType()); + uint8_t * dst = reinterpret_cast(&vec_to[i].toUnderType()); + std::memset(dst, '\0', IPV6_BINARY_LENGTH); + dst[10] = dst[11] = 0xff; + + if constexpr (std::endian::native == std::endian::little) + { + dst[12] = src[3]; + dst[13] = src[2]; + dst[14] = src[1]; + dst[15] = src[0]; + } + else + { + dst[12] = src[0]; + dst[13] = src[1]; + dst[14] = src[2]; + dst[15] = src[3]; + } + } + else if constexpr (std::is_same_v && std::is_same_v) vec_to[i] = static_cast(static_cast(vec_from[i])); else if constexpr (std::is_same_v && (std::is_same_v || std::is_same_v)) vec_to[i] = static_cast(vec_from[i] * DATE_SECONDS_PER_DAY); @@ -4010,6 +4062,26 @@ private: return true; } } + else if constexpr (WhichDataType(FromDataType::type_id).isIPv6() && WhichDataType(ToDataType::type_id).isIPv4()) + { + ret = [cast_ipv4_ipv6_default_on_conversion_error_value, requested_result_is_nullable]( + ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * column_nullable, size_t) + -> ColumnPtr + { + if (!WhichDataType(result_type).isIPv4()) + throw Exception( + ErrorCodes::TYPE_MISMATCH, "Wrong result type {}. Expected IPv4", result_type->getName()); + + const auto * null_map = column_nullable ? &column_nullable->getNullMapData() : nullptr; + if (cast_ipv4_ipv6_default_on_conversion_error_value || requested_result_is_nullable) + return convertIPv6ToIPv4(arguments[0].column, null_map); + else + return convertIPv6ToIPv4(arguments[0].column, null_map); + }; + + return true; + } + if constexpr (WhichDataType(ToDataType::type_id).isStringOrFixedString()) { if (from_type->getCustomSerialization()) diff --git a/src/Functions/FunctionsHashing.h b/src/Functions/FunctionsHashing.h index a4d4fbd085d..db0ff976d63 100644 --- a/src/Functions/FunctionsHashing.h +++ b/src/Functions/FunctionsHashing.h @@ -402,34 +402,6 @@ struct SipHash128ReferenceImpl static constexpr bool use_int_hash_for_pods = false; }; -struct SipHash128ReferenceKeyedImpl -{ - static constexpr auto name = "sipHash128ReferenceKeyed"; - using ReturnType = UInt128; - using Key = impl::SipHashKey; - - static Key parseKey(const ColumnWithTypeAndName & key) { return impl::parseSipHashKey(key); } - - static UInt128 applyKeyed(const Key & key, const char * begin, size_t size) - { - return sipHash128ReferenceKeyed(key.key0, key.key1, begin, size); - } - - static UInt128 combineHashesKeyed(const Key & key, UInt128 h1, UInt128 h2) - { -#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - UInt128 tmp; - reverseMemcpy(&tmp, &h1, sizeof(UInt128)); - h1 = tmp; - reverseMemcpy(&tmp, &h2, sizeof(UInt128)); - h2 = tmp; -#endif - UInt128 hashes[] = {h1, h2}; - return applyKeyed(key, reinterpret_cast(hashes), 2 * sizeof(UInt128)); - } - - static constexpr bool use_int_hash_for_pods = false; -}; /** Why we need MurmurHash2? * MurmurHash2 is an outdated hash function, superseded by MurmurHash3 and subsequently by CityHash, xxHash, HighwayHash. @@ -1737,7 +1709,6 @@ using FunctionSHA512 = FunctionStringHashFixedString; using FunctionSipHash128 = FunctionAnyHash; using FunctionSipHash128Keyed = FunctionAnyHash; using FunctionSipHash128Reference = FunctionAnyHash; -using FunctionSipHash128ReferenceKeyed = FunctionAnyHash; using FunctionCityHash64 = FunctionAnyHash; using FunctionFarmFingerprint64 = FunctionAnyHash; using FunctionFarmHash64 = FunctionAnyHash; diff --git a/src/Functions/FunctionsHashingMisc.cpp b/src/Functions/FunctionsHashingMisc.cpp index 127c87ba940..56c3c1ed00c 100644 --- a/src/Functions/FunctionsHashingMisc.cpp +++ b/src/Functions/FunctionsHashingMisc.cpp @@ -20,11 +20,6 @@ REGISTER_FUNCTION(Hashing) .examples{{"hash", "SELECT hex(sipHash128Reference('foo', '\\x01', 3))", ""}}, .categories{"Hash"} }); - factory.registerFunction(FunctionDocumentation{ - .description="Same as [sipHash128Reference](#hash_functions-siphash128reference) but additionally takes an explicit key argument instead of using a fixed key.", - .examples{{"hash", "SELECT hex(sipHash128ReferenceKeyed((506097522914230528, 1084818905618843912),'foo', '\\x01', 3));", ""}}, - .categories{"Hash"} - }); factory.registerFunction(); factory.registerFunction(); factory.registerFunction(); diff --git a/src/Functions/HasTokenImpl.h b/src/Functions/HasTokenImpl.h index 18e629b58fb..8cacdfff99d 100644 --- a/src/Functions/HasTokenImpl.h +++ b/src/Functions/HasTokenImpl.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include @@ -9,6 +10,7 @@ namespace DB namespace ErrorCodes { + extern const int BAD_ARGUMENTS; extern const int ILLEGAL_COLUMN; extern const int ILLEGAL_TYPE_OF_ARGUMENT; } @@ -44,51 +46,47 @@ struct HasTokenImpl const UInt8 * const end = haystack_data.data() + haystack_data.size(); const UInt8 * pos = begin; - try + if (!ASCIICaseSensitiveTokenSearcher::isValidNeedle(pattern.data(), pattern.size())) { - /// Parameter `pattern` is supposed to be a literal of letters and/or numbers. - /// Otherwise, an exception from the constructor of `TokenSearcher` is thrown. - /// If no exception is thrown at that point, then no further error cases may occur. - TokenSearcher searcher(pattern.data(), pattern.size(), end - pos); if (res_null) - std::ranges::fill(res_null->getData(), false); - - /// The current index in the array of strings. - size_t i = 0; - /// We will search for the next occurrence in all rows at once. - while (pos < end && end != (pos = searcher.search(pos, end - pos))) - { - /// Let's determine which index it refers to. - while (begin + haystack_offsets[i] <= pos) - { - res[i] = negate; - ++i; - } - - /// We check that the entry does not pass through the boundaries of strings. - if (pos + pattern.size() < begin + haystack_offsets[i]) - res[i] = !negate; - else - res[i] = negate; - - pos = begin + haystack_offsets[i]; - ++i; - } - - /// Tail, in which there can be no substring. - if (i < res.size()) - memset(&res[i], negate, (res.size() - i) * sizeof(res[0])); - } - catch (...) - { - if (!res_null) - throw; - else { std::ranges::fill(res, 0); std::ranges::fill(res_null->getData(), true); + return; } + else + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Needle must not contain whitespace or separator characters"); } + + TokenSearcher searcher(pattern.data(), pattern.size(), end - pos); + if (res_null) + std::ranges::fill(res_null->getData(), false); + + /// The current index in the array of strings. + size_t i = 0; + /// We will search for the next occurrence in all rows at once. + while (pos < end && end != (pos = searcher.search(pos, end - pos))) + { + /// Let's determine which index it refers to. + while (begin + haystack_offsets[i] <= pos) + { + res[i] = negate; + ++i; + } + + /// We check that the entry does not pass through the boundaries of strings. + if (pos + pattern.size() < begin + haystack_offsets[i]) + res[i] = !negate; + else + res[i] = negate; + + pos = begin + haystack_offsets[i]; + ++i; + } + + /// Tail, in which there can be no substring. + if (i < res.size()) + memset(&res[i], negate, (res.size() - i) * sizeof(res[0])); } template diff --git a/src/Functions/IFunction.h b/src/Functions/IFunction.h index 433cb61d04e..c5b9a78015d 100644 --- a/src/Functions/IFunction.h +++ b/src/Functions/IFunction.h @@ -2,8 +2,6 @@ #include #include -#include -#include #include #include #include @@ -13,6 +11,11 @@ #include +#if USE_EMBEDDED_COMPILER +# include +#endif + + /// This file contains user interface for functions. namespace llvm @@ -32,8 +35,7 @@ namespace ErrorCodes extern const int ILLEGAL_TYPE_OF_ARGUMENT; } -/// A left-closed and right-open interval representing the preimage of a function. -using RangeOrNull = std::optional>; +class Field; /// The simplest executable object. /// Motivation: @@ -231,12 +233,6 @@ public: */ virtual bool hasInformationAboutMonotonicity() const { return false; } - /** Lets you know if the function has its definition of preimage. - * This is used to work with predicate optimizations, where the comparison between - * f(x) and a constant c could be converted to the comparison between x and f's preimage [b, e). - */ - virtual bool hasInformationAboutPreimage() const { return false; } - struct ShortCircuitSettings { /// Should we enable lazy execution for the first argument of short-circuit function? @@ -290,14 +286,6 @@ public: throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} has no information about its monotonicity", getName()); } - /** Get the preimage of a function in the form of a left-closed and right-open interval. Call only if hasInformationAboutPreimage. - * std::nullopt might be returned if the point (a single value) is invalid for this function. - */ - virtual RangeOrNull getPreimage(const IDataType & /*type*/, const Field & /*point*/) const - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} has no information about its preimage", getName()); - } - }; using FunctionBasePtr = std::shared_ptr; @@ -487,17 +475,12 @@ public: virtual bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const = 0; virtual bool hasInformationAboutMonotonicity() const { return false; } - virtual bool hasInformationAboutPreimage() const { return false; } using Monotonicity = IFunctionBase::Monotonicity; virtual Monotonicity getMonotonicityForRange(const IDataType & /*type*/, const Field & /*left*/, const Field & /*right*/) const { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} has no information about its monotonicity", getName()); } - virtual RangeOrNull getPreimage(const IDataType & /*type*/, const Field & /*point*/) const - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} has no information about its preimage", getName()); - } /// For non-variadic functions, return number of arguments; otherwise return zero (that should be ignored). virtual size_t getNumberOfArguments() const = 0; diff --git a/src/Functions/IFunctionAdaptors.h b/src/Functions/IFunctionAdaptors.h index 123fdbc2f50..23725b1a8b1 100644 --- a/src/Functions/IFunctionAdaptors.h +++ b/src/Functions/IFunctionAdaptors.h @@ -90,17 +90,10 @@ public: bool hasInformationAboutMonotonicity() const override { return function->hasInformationAboutMonotonicity(); } - bool hasInformationAboutPreimage() const override { return function->hasInformationAboutPreimage(); } - Monotonicity getMonotonicityForRange(const IDataType & type, const Field & left, const Field & right) const override { return function->getMonotonicityForRange(type, left, right); } - - RangeOrNull getPreimage(const IDataType & type, const Field & point) const override - { - return function->getPreimage(type, point); - } private: std::shared_ptr function; DataTypes arguments; diff --git a/src/Functions/LowerUpperUTF8Impl.h b/src/Functions/LowerUpperUTF8Impl.h index e8d0abae322..f6b18439fd1 100644 --- a/src/Functions/LowerUpperUTF8Impl.h +++ b/src/Functions/LowerUpperUTF8Impl.h @@ -139,7 +139,7 @@ struct LowerUpperUTF8Impl /// In case partial buffer was passed (due to SSE optimization) /// we cannot convert it with current src_end, but we may have more /// bytes to convert and eventually got correct symbol. - if (partial && src_sequence_length > static_cast(src_end-src)) + if (partial && src_sequence_length > static_cast(src_end - src)) return false; auto src_code_point = UTF8::convertUTF8ToCodePoint(src, src_end - src); @@ -181,7 +181,9 @@ private: #ifdef __SSE2__ static constexpr auto bytes_sse = sizeof(__m128i); - const auto * src_end_sse = src + (src_end - src) / bytes_sse * bytes_sse; + + /// If we are before this position, we can still read at least bytes_sse. + const auto * src_end_sse = src_end - bytes_sse + 1; /// SSE2 packed comparison operate on signed types, hence compare (c < 0) instead of (c > 0x7f) const auto v_zero = _mm_setzero_si128(); @@ -227,9 +229,11 @@ private: { /// UTF-8 + /// Find the offset of the next string after src size_t offset_from_begin = src - begin; while (offset_from_begin >= *offset_it) ++offset_it; + /// Do not allow one row influence another (since row may have invalid sequence, and break the next) const UInt8 * row_end = begin + *offset_it; chassert(row_end >= src); @@ -247,8 +251,9 @@ private: } } - /// Find which offset src has now - while (offset_it != offsets.end() && static_cast(src - begin) >= *offset_it) + /// Find the offset of the next string after src + size_t offset_from_begin = src - begin; + while (offset_it != offsets.end() && offset_from_begin >= *offset_it) ++offset_it; #endif diff --git a/src/Functions/addressToLineWithInlines.cpp b/src/Functions/addressToLineWithInlines.cpp index 9a7b0858b87..43e2eeb10c7 100644 --- a/src/Functions/addressToLineWithInlines.cpp +++ b/src/Functions/addressToLineWithInlines.cpp @@ -59,14 +59,13 @@ protected: void setResult(StringRefs & result, const Dwarf::LocationInfo & location, const std::vector & inline_frames) const override { - appendLocationToResult(result, location, nullptr); for (const auto & inline_frame : inline_frames) appendLocationToResult(result, inline_frame.location, &inline_frame); } -private: - inline ALWAYS_INLINE void appendLocationToResult(StringRefs & result, const Dwarf::LocationInfo & location, const Dwarf::SymbolizedFrame * frame) const +private: + void appendLocationToResult(StringRefs & result, const Dwarf::LocationInfo & location, const Dwarf::SymbolizedFrame * frame) const { const char * arena_begin = nullptr; WriteBufferFromArena out(cache.arena, arena_begin); @@ -83,6 +82,7 @@ private: } result.emplace_back(out.complete()); + out.finalize(); } }; diff --git a/src/Functions/hasToken.cpp b/src/Functions/hasToken.cpp index de67e80e752..b90750ea233 100644 --- a/src/Functions/hasToken.cpp +++ b/src/Functions/hasToken.cpp @@ -16,18 +16,18 @@ struct NameHasTokenOrNull static constexpr auto name = "hasTokenOrNull"; }; -using FunctionHasToken = DB::FunctionsStringSearch>; -using FunctionHasTokenOrNull = DB:: - FunctionsStringSearch, DB::ExecutionErrorPolicy::Null>; +using FunctionHasToken + = FunctionsStringSearch>; +using FunctionHasTokenOrNull + = FunctionsStringSearch, ExecutionErrorPolicy::Null>; REGISTER_FUNCTION(HasToken) { factory.registerFunction(FunctionDocumentation - {.description="Performs lookup of needle in haystack using tokenbf_v1 index."}, DB::FunctionFactory::CaseSensitive); + {.description="Performs lookup of needle in haystack using tokenbf_v1 index."}, FunctionFactory::CaseSensitive); factory.registerFunction(FunctionDocumentation - {.description="Performs lookup of needle in haystack using tokenbf_v1 index. Returns null if needle is ill-formed."}, - DB::FunctionFactory::CaseSensitive); + {.description="Performs lookup of needle in haystack using tokenbf_v1 index. Returns null if needle is ill-formed."}, FunctionFactory::CaseSensitive); } } diff --git a/src/Functions/hasTokenCaseInsensitive.cpp b/src/Functions/hasTokenCaseInsensitive.cpp index a6e8ecf3e9d..d7381e336b5 100644 --- a/src/Functions/hasTokenCaseInsensitive.cpp +++ b/src/Functions/hasTokenCaseInsensitive.cpp @@ -17,10 +17,9 @@ struct NameHasTokenCaseInsensitiveOrNull }; using FunctionHasTokenCaseInsensitive - = DB::FunctionsStringSearch>; -using FunctionHasTokenCaseInsensitiveOrNull = DB::FunctionsStringSearch< - DB::HasTokenImpl, - DB::ExecutionErrorPolicy::Null>; + = FunctionsStringSearch>; +using FunctionHasTokenCaseInsensitiveOrNull + = FunctionsStringSearch, ExecutionErrorPolicy::Null>; REGISTER_FUNCTION(HasTokenCaseInsensitive) { diff --git a/src/Functions/in.cpp b/src/Functions/in.cpp index 7a41ae2e3ea..9045ba677f2 100644 --- a/src/Functions/in.cpp +++ b/src/Functions/in.cpp @@ -122,9 +122,13 @@ public: tuple = typeid_cast(materialized_tuple.get()); } - auto set = column_set->getData(); + auto future_set = column_set->getData(); + if (!future_set) + throw Exception(ErrorCodes::LOGICAL_ERROR, "No Set is passed as the second argument for function '{}'", getName()); + + auto set = future_set->get(); if (!set) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Not-ready Set passed as the second argument for function '{}'", getName()); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Not-ready Set is passed as the second argument for function '{}'", getName()); auto set_types = set->getDataTypes(); diff --git a/src/Functions/transform.cpp b/src/Functions/transform.cpp index 9970012ef4a..8d6e53c491e 100644 --- a/src/Functions/transform.cpp +++ b/src/Functions/transform.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -13,9 +14,10 @@ #include #include #include -#include #include #include +#include + namespace DB { @@ -25,32 +27,31 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH; extern const int ILLEGAL_COLUMN; + extern const int LOGICAL_ERROR; } namespace { - /** transform(x, from_array, to_array[, default]) - convert x according to an explicitly passed match. - */ /** transform(x, [from...], [to...], default) - * - converts the values according to the explicitly specified mapping. - * - * x - what to transform. - * from - a constant array of values for the transformation. - * to - a constant array of values into which values from `from` must be transformed. - * default - what value to use if x is not equal to any of the values in `from`. - * `from` and `to` - arrays of the same size. - * - * Types: - * transform(T, Array(T), Array(U), U) -> U - * - * transform(x, [from...], [to...]) - * - if `default` is not specified, then for values of `x` for which there is no corresponding element in `from`, the unchanged value of `x` is returned. - * - * Types: - * transform(T, Array(T), Array(T)) -> T - * - * Note: the implementation is rather cumbersome. - */ + * - converts the values according to the explicitly specified mapping. + * + * x - what to transform. + * from - a constant array of values for the transformation. + * to - a constant array of values into which values from `from` must be transformed. + * default - what value to use if x is not equal to any of the values in `from`. + * `from` and `to` - arrays of the same size. + * + * Types: + * transform(T, Array(T), Array(U), U) -> U + * + * transform(x, [from...], [to...]) + * - if `default` is not specified, then for values of `x` for which there is no corresponding element in `from`, the unchanged value of `x` is returned. + * + * Types: + * transform(T, Array(T), Array(T)) -> T + * + * Note: the implementation is rather cumbersome. + */ class FunctionTransform : public IFunction { public: @@ -79,15 +80,6 @@ namespace args_size); const DataTypePtr & type_x = arguments[0]; - const auto & type_x_nn = removeNullable(type_x); - - if (!type_x_nn->isValueRepresentedByNumber() && !isString(type_x_nn) && !isNothing(type_x_nn)) - throw Exception( - ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, - "Unsupported type {} of first argument " - "of function {}, must be numeric type or Date/DateTime or String", - type_x->getName(), - getName()); const DataTypeArray * type_arr_from = checkAndGetDataType(arguments[1].get()); @@ -99,14 +91,16 @@ namespace const auto type_arr_from_nested = type_arr_from->getNestedType(); - if ((type_x->isValueRepresentedByNumber() != type_arr_from_nested->isValueRepresentedByNumber()) - || (isString(type_x) != isString(type_arr_from_nested))) + auto src = tryGetLeastSupertype(DataTypes{type_x, type_arr_from_nested}); + if (!src + /// Compatibility with previous versions, that allowed even UInt64 with Int64, + /// regardless of ambiguous conversions. + && !isNativeNumber(type_x) && !isNativeNumber(type_arr_from_nested)) { throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First argument and elements of array " - "of second argument of function {} must have compatible types: " - "both numeric or both strings.", + "of the second argument of function {} must have compatible types", getName()); } @@ -157,8 +151,8 @@ namespace } } - ColumnPtr - executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override + ColumnPtr executeImpl( + const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override { initialize(arguments, result_type); @@ -172,22 +166,42 @@ namespace default_non_const = castColumn(arguments[3], result_type); auto column_result = result_type->createColumn(); - if (!executeNum>(in, *column_result, default_non_const) - && !executeNum>(in, *column_result, default_non_const) - && !executeNum>(in, *column_result, default_non_const) - && !executeNum>(in, *column_result, default_non_const) - && !executeNum>(in, *column_result, default_non_const) - && !executeNum>(in, *column_result, default_non_const) - && !executeNum>(in, *column_result, default_non_const) - && !executeNum>(in, *column_result, default_non_const) - && !executeNum>(in, *column_result, default_non_const) - && !executeNum>(in, *column_result, default_non_const) - && !executeNum>(in, *column_result, default_non_const) - && !executeNum>(in, *column_result, default_non_const) - && !executeString(in, *column_result, default_non_const)) + if (cache.is_empty) { - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", in->getName(), getName()); + return default_non_const + ? default_non_const + : castColumn(arguments[0], result_type); } + else if (cache.table_num_to_idx) + { + if (!executeNum>(in, *column_result, default_non_const) + && !executeNum>(in, *column_result, default_non_const) + && !executeNum>(in, *column_result, default_non_const) + && !executeNum>(in, *column_result, default_non_const) + && !executeNum>(in, *column_result, default_non_const) + && !executeNum>(in, *column_result, default_non_const) + && !executeNum>(in, *column_result, default_non_const) + && !executeNum>(in, *column_result, default_non_const) + && !executeNum>(in, *column_result, default_non_const) + && !executeNum>(in, *column_result, default_non_const) + && !executeNum>(in, *column_result, default_non_const) + && !executeNum>(in, *column_result, default_non_const)) + { + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Illegal column {} of first argument of function {}", in->getName(), getName()); + } + } + else if (cache.table_string_to_idx) + { + if (!executeString(in, *column_result, default_non_const)) + executeContiguous(in, *column_result, default_non_const); + } + else if (cache.table_anything_to_idx) + { + executeAnything(in, *column_result, default_non_const); + } + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "State of the function `transform` is not initialized"); + return column_result; } @@ -204,6 +218,47 @@ namespace return impl->execute(args, result_type, input_rows_count); } + void executeAnything(const IColumn * in, IColumn & column_result, const ColumnPtr default_non_const) const + { + const size_t size = in->size(); + const auto & table = *cache.table_anything_to_idx; + column_result.reserve(size); + for (size_t i = 0; i < size; ++i) + { + SipHash hash; + in->updateHashWithValue(i, hash); + + const auto * it = table.find(hash.get128()); + if (it) + column_result.insertFrom(*cache.to_column, it->getMapped()); + else if (cache.default_column) + column_result.insertFrom(*cache.default_column, 0); + else if (default_non_const) + column_result.insertFrom(*default_non_const, i); + else + column_result.insertFrom(*in, i); + } + } + + void executeContiguous(const IColumn * in, IColumn & column_result, const ColumnPtr default_non_const) const + { + const size_t size = in->size(); + const auto & table = *cache.table_string_to_idx; + column_result.reserve(size); + for (size_t i = 0; i < size; ++i) + { + const auto * it = table.find(in->getDataAt(i)); + if (it) + column_result.insertFrom(*cache.to_column, it->getMapped()); + else if (cache.default_column) + column_result.insertFrom(*cache.default_column, 0); + else if (default_non_const) + column_result.insertFrom(*default_non_const, i); + else + column_result.insertFrom(*in, i); + } + } + template bool executeNum(const IColumn * in_untyped, IColumn & column_result, const ColumnPtr default_non_const) const { @@ -236,7 +291,7 @@ namespace { const auto * it = table.find(bit_cast(pod[i])); if (it) - column_result.insertFrom(*cache.to_columns, it->getMapped()); + column_result.insertFrom(*cache.to_column, it->getMapped()); else if (cache.default_column) column_result.insertFrom(*cache.default_column, 0); else if (default_non_const) @@ -259,14 +314,14 @@ namespace out_offs.resize(size); auto & out_chars = out->getChars(); - const auto * to_col = reinterpret_cast(cache.to_columns.get()); + const auto * to_col = assert_cast(cache.to_column.get()); const auto & to_chars = to_col->getChars(); const auto & to_offs = to_col->getOffsets(); const auto & table = *cache.table_num_to_idx; if (cache.default_column) { - const auto * def = reinterpret_cast(cache.default_column.get()); + const auto * def = assert_cast(cache.default_column.get()); const auto & def_chars = def->getChars(); const auto & def_offs = def->getOffsets(); const auto * def_data = def_chars.data(); @@ -275,7 +330,7 @@ namespace } else { - const auto * def = reinterpret_cast(default_non_const.get()); + const auto * def = assert_cast(default_non_const.get()); const auto & def_chars = def->getChars(); const auto & def_offs = def->getOffsets(); executeNumToStringHelper(table, pod, out_chars, out_offs, to_chars, to_offs, def_chars, def_offs, size); @@ -340,16 +395,16 @@ namespace if constexpr (std::is_same_v, T> || std::is_same_v, T>) out_scale = out->getScale(); - const auto & to_pod = reinterpret_cast(cache.to_columns.get())->getData(); + const auto & to_pod = assert_cast(cache.to_column.get())->getData(); const auto & table = *cache.table_num_to_idx; if (cache.default_column) { - const auto const_def = reinterpret_cast(cache.default_column.get())->getData()[0]; + const auto const_def = assert_cast(cache.default_column.get())->getData()[0]; executeNumToNumHelper(table, pod, out_pod, to_pod, const_def, size, out_scale, out_scale); } else if (default_non_const) { - const auto & nconst_def = reinterpret_cast(default_non_const.get())->getData(); + const auto & nconst_def = assert_cast(default_non_const.get())->getData(); executeNumToNumHelper(table, pod, out_pod, to_pod, nconst_def, size, out_scale, out_scale); } else @@ -423,11 +478,11 @@ namespace ColumnString::Offset current_offset = 0; for (size_t i = 0; i < size; ++i) { - const StringRef ref{&data[current_offset], offsets[i] - current_offset}; + const StringRef ref{&data[current_offset], offsets[i] - current_offset - 1}; current_offset = offsets[i]; const auto * it = table.find(ref); if (it) - column_result.insertFrom(*cache.to_columns, it->getMapped()); + column_result.insertFrom(*cache.to_column, it->getMapped()); else if (cache.default_column) column_result.insertFrom(*cache.default_column, 0); else if (default_non_const) @@ -453,14 +508,14 @@ namespace out_offs.resize(size); auto & out_chars = out->getChars(); - const auto * to_col = reinterpret_cast(cache.to_columns.get()); + const auto * to_col = assert_cast(cache.to_column.get()); const auto & to_chars = to_col->getChars(); const auto & to_offs = to_col->getOffsets(); const auto & table = *cache.table_string_to_idx; if (cache.default_column) { - const auto * def = reinterpret_cast(cache.default_column.get()); + const auto * def = assert_cast(cache.default_column.get()); const auto & def_chars = def->getChars(); const auto & def_offs = def->getOffsets(); const auto * def_data = def_chars.data(); @@ -469,7 +524,7 @@ namespace } else if (default_non_const) { - const auto * def = reinterpret_cast(default_non_const.get()); + const auto * def = assert_cast(default_non_const.get()); const auto & def_chars = def->getChars(); const auto & def_offs = def->getOffsets(); executeStringToStringHelper(table, data, offsets, out_chars, out_offs, to_chars, to_offs, def_chars, def_offs, size); @@ -500,7 +555,7 @@ namespace { const char8_t * to = nullptr; size_t to_size = 0; - const StringRef ref{&data[current_offset], offsets[i] - current_offset}; + const StringRef ref{&data[current_offset], offsets[i] - current_offset - 1}; current_offset = offsets[i]; const auto * it = table.find(ref); if (it) @@ -542,16 +597,16 @@ namespace const size_t size = offsets.size(); out_pod.resize(size); - const auto & to_pod = reinterpret_cast(cache.to_columns.get())->getData(); + const auto & to_pod = assert_cast(cache.to_column.get())->getData(); const auto & table = *cache.table_string_to_idx; if (cache.default_column) { - const auto const_def = reinterpret_cast(cache.default_column.get())->getData()[0]; + const auto const_def = assert_cast(cache.default_column.get())->getData()[0]; executeStringToNumHelper(table, data, offsets, out_pod, to_pod, const_def, size); } else { - const auto & nconst_def = reinterpret_cast(default_non_const.get())->getData(); + const auto & nconst_def = assert_cast(default_non_const.get())->getData(); executeStringToNumHelper(table, data, offsets, out_pod, to_pod, nconst_def, size); } return true; @@ -570,7 +625,7 @@ namespace ColumnString::Offset current_offset = 0; for (size_t i = 0; i < size; ++i) { - const StringRef ref{&data[current_offset], offsets[i] - current_offset}; + const StringRef ref{&data[current_offset], offsets[i] - current_offset - 1}; current_offset = offsets[i]; const auto * it = table.find(ref); if (it) @@ -593,15 +648,18 @@ namespace { using NumToIdx = HashMap>; using StringToIdx = HashMap; + using AnythingToIdx = HashMap; std::unique_ptr table_num_to_idx; std::unique_ptr table_string_to_idx; + std::unique_ptr table_anything_to_idx; - ColumnPtr to_columns; + bool is_empty = false; + + ColumnPtr from_column; + ColumnPtr to_column; ColumnPtr default_column; - Arena string_pool; - std::atomic initialized{false}; std::mutex mutex; }; @@ -609,27 +667,6 @@ namespace mutable Cache cache; - static UInt64 bitCastToUInt64(const Field & x) - { - switch (x.getType()) - { - case Field::Types::UInt64: - return x.get(); - case Field::Types::Int64: - return x.get(); - case Field::Types::Float64: - return std::bit_cast(x.get()); - case Field::Types::Bool: - return x.get(); - case Field::Types::Decimal32: - return x.get>().getValue(); - case Field::Types::Decimal64: - return x.get>().getValue(); - default: - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected type in function 'transform'"); - } - } - static void checkAllowedType(const DataTypePtr & type) { if (type->isNullable()) @@ -656,33 +693,56 @@ namespace /// Can be called from different threads. It works only on the first call. void initialize(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type) const { - const ColumnConst * array_from = checkAndGetColumnConst(arguments[1].column.get()); - const ColumnConst * array_to = checkAndGetColumnConst(arguments[2].column.get()); + if (cache.initialized) + return; + + const DataTypePtr & from_type = arguments[0].type; + + if (from_type->onlyNull()) + { + cache.is_empty = true; + return; + } + + const ColumnArray * array_from = checkAndGetColumnConstData(arguments[1].column.get()); + const ColumnArray * array_to = checkAndGetColumnConstData(arguments[2].column.get()); if (!array_from || !array_to) throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Second and third arguments of function {} must be constant arrays.", getName()); - if (cache.initialized) - return; - - const auto & from = array_from->getValue(); - const size_t size = from.size(); - if (0 == size) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Empty arrays are illegal in function {}", getName()); - std::lock_guard lock(cache.mutex); - if (cache.initialized) - return; + const ColumnPtr & from_column_uncasted = array_from->getDataPtr(); - const auto & to = array_to->getValue(); - if (size != to.size()) + cache.from_column = castColumn( + { + from_column_uncasted, + typeid_cast(*arguments[1].type).getNestedType(), + arguments[1].name + }, + from_type); + + cache.to_column = castColumn( + { + array_to->getDataPtr(), + typeid_cast(*arguments[2].type).getNestedType(), + arguments[2].name + }, + result_type); + + const size_t size = cache.from_column->size(); + if (0 == size) + { + cache.is_empty = true; + return; + } + + if (cache.to_column->size() != size) throw Exception( ErrorCodes::BAD_ARGUMENTS, "Second and third arguments of function {} must be arrays of same size", getName()); /// Whether the default value is set. - if (arguments.size() == 4) { const IColumn * default_col = arguments[3].column.get(); @@ -702,45 +762,54 @@ namespace /// Note: Doesn't check the duplicates in the `from` array. - const IDataType & from_type = *arguments[0].type; - - if (from[0].getType() != Field::Types::String) + WhichDataType which(from_type); + if (isNativeNumber(which) || which.isDecimal32() || which.isDecimal64()) { cache.table_num_to_idx = std::make_unique(); auto & table = *cache.table_num_to_idx; for (size_t i = 0; i < size; ++i) { - Field key = convertFieldToType(from[i], from_type); - if (key.isNull()) - continue; - - /// Field may be of Float type, but for the purpose of bitwise equality we can treat them as UInt64 - table[bitCastToUInt64(key)] = i; + if (applyVisitor(FieldVisitorAccurateEquals(), (*cache.from_column)[i], (*from_column_uncasted)[i])) + { + /// Field may be of Float type, but for the purpose of bitwise equality we can treat them as UInt64 + StringRef ref = cache.from_column->getDataAt(i); + UInt64 key = 0; + memcpy(&key, ref.data, ref.size); + table[key] = i; + } } } - else + else if (from_type->isValueUnambiguouslyRepresentedInContiguousMemoryRegion()) { cache.table_string_to_idx = std::make_unique(); auto & table = *cache.table_string_to_idx; for (size_t i = 0; i < size; ++i) { - const String & str_from = from[i].get(); - StringRef ref{cache.string_pool.insert(str_from.data(), str_from.size() + 1), str_from.size() + 1}; - table[ref] = i; + if (applyVisitor(FieldVisitorAccurateEquals(), (*cache.from_column)[i], (*from_column_uncasted)[i])) + { + StringRef ref = cache.from_column->getDataAt(i); + table[ref] = i; + } + } + } + else + { + cache.table_anything_to_idx = std::make_unique(); + auto & table = *cache.table_anything_to_idx; + for (size_t i = 0; i < size; ++i) + { + if (applyVisitor(FieldVisitorAccurateEquals(), (*cache.from_column)[i], (*from_column_uncasted)[i])) + { + SipHash hash; + cache.from_column->updateHashWithValue(i, hash); + table[hash.get128()] = i; + } } } - auto to_columns = result_type->createColumn(); - for (size_t i = 0; i < size; ++i) - { - Field to_value = convertFieldToType(to[i], *result_type); - to_columns->insert(to_value); - } - cache.to_columns = std::move(to_columns); - cache.initialized = true; } -}; + }; } diff --git a/src/IO/ReadHelpers.h b/src/IO/ReadHelpers.h index af8d4942ff1..3c42e4e63b7 100644 --- a/src/IO/ReadHelpers.h +++ b/src/IO/ReadHelpers.h @@ -63,6 +63,7 @@ namespace ErrorCodes extern const int INCORRECT_DATA; extern const int TOO_LARGE_STRING_SIZE; extern const int TOO_LARGE_ARRAY_SIZE; + extern const int SIZE_OF_FIXED_STRING_DOESNT_MATCH; } /// Helper functions for formatted input. @@ -138,6 +139,19 @@ inline void readStringBinary(std::string & s, ReadBuffer & buf, size_t max_strin buf.readStrict(s.data(), size); } +/// For historical reasons we store IPv6 as a String +inline void readIPv6Binary(IPv6 & ip, ReadBuffer & buf) +{ + size_t size = 0; + readVarUInt(size, buf); + + if (size != IPV6_BINARY_LENGTH) + throw Exception(ErrorCodes::SIZE_OF_FIXED_STRING_DOESNT_MATCH, + "Size of the string {} doesn't match size of binary IPv6 {}", size, IPV6_BINARY_LENGTH); + + buf.readStrict(reinterpret_cast(&ip.toUnderType()), size); +} + template void readVectorBinary(std::vector & v, ReadBuffer & buf) { diff --git a/src/IO/WriteHelpers.h b/src/IO/WriteHelpers.h index 29eb1355fd8..a1bd6bcebff 100644 --- a/src/IO/WriteHelpers.h +++ b/src/IO/WriteHelpers.h @@ -10,6 +10,7 @@ #include +#include "Common/formatIPv6.h" #include #include #include @@ -105,6 +106,13 @@ inline void writeStringBinary(const std::string & s, WriteBuffer & buf) buf.write(s.data(), s.size()); } +/// For historical reasons we store IPv6 as a String +inline void writeIPv6Binary(const IPv6 & ip, WriteBuffer & buf) +{ + writeVarUInt(IPV6_BINARY_LENGTH, buf); + buf.write(reinterpret_cast(&ip.toUnderType()), IPV6_BINARY_LENGTH); +} + inline void writeStringBinary(StringRef s, WriteBuffer & buf) { writeVarUInt(s.size, buf); diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 28ba2e3e838..cfbe53b5e4d 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -57,6 +57,9 @@ #include #include +#include +#include +#include namespace DB { @@ -422,9 +425,8 @@ Block createBlockForSet( } -SetPtr makeExplicitSet( - const ASTFunction * node, const ActionsDAG & actions, bool create_ordered_set, - ContextPtr context, const SizeLimits & size_limits, PreparedSets & prepared_sets) +FutureSetPtr makeExplicitSet( + const ASTFunction * node, const ActionsDAG & actions, ContextPtr context, PreparedSets & prepared_sets) { const IAST & args = *node->arguments; @@ -443,14 +445,16 @@ SetPtr makeExplicitSet( if (left_tuple_type && left_tuple_type->getElements().size() != 1) set_element_types = left_tuple_type->getElements(); + auto set_element_keys = Set::getElementTypes(set_element_types, context->getSettingsRef().transform_null_in); + + auto set_key = right_arg->getTreeHash(); + if (auto set = prepared_sets.findTuple(set_key, set_element_keys)) + return set; /// Already prepared. + for (auto & element_type : set_element_types) if (const auto * low_cardinality_type = typeid_cast(element_type.get())) element_type = low_cardinality_type->getDictionaryType(); - auto set_key = PreparedSetKey::forLiteral(*right_arg, set_element_types); - if (auto set = prepared_sets.get(set_key)) - return set; /// Already prepared. - Block block; const auto & right_arg_func = std::dynamic_pointer_cast(right_arg); if (right_arg_func && (right_arg_func->name == "tuple" || right_arg_func->name == "array")) @@ -458,14 +462,7 @@ SetPtr makeExplicitSet( else block = createBlockForSet(left_arg_type, right_arg, set_element_types, context); - SetPtr set - = std::make_shared(size_limits, create_ordered_set, context->getSettingsRef().transform_null_in); - set->setHeader(block.cloneEmpty().getColumnsWithTypeAndName()); - set->insertFromBlock(block.getColumnsWithTypeAndName()); - set->finishInsert(); - - prepared_sets.set(set_key, set); - return set; + return prepared_sets.addFromTuple(set_key, block, context->getSettings()); } class ScopeStack::Index @@ -950,7 +947,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & return; } - FutureSet prepared_set; + FutureSetPtr prepared_set; if (checkFunctionIsInOrGlobalInOperator(node)) { /// Let's find the type of the first argument (then getActionsImpl will be called again and will not affect anything). @@ -959,7 +956,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & if (!data.no_makeset && !(data.is_create_parameterized_view && !analyzeReceiveQueryParams(ast).empty())) prepared_set = makeSet(node, data, data.no_subqueries); - if (prepared_set.isValid()) + if (prepared_set) { /// Transform tuple or subquery into a set. } @@ -1171,14 +1168,14 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data & num_arguments += columns.size() - 1; arg += columns.size() - 1; } - else if (checkFunctionIsInOrGlobalInOperator(node) && arg == 1 && prepared_set.isValid()) + else if (checkFunctionIsInOrGlobalInOperator(node) && arg == 1 && prepared_set) { ColumnWithTypeAndName column; column.type = std::make_shared(); /// If the argument is a set given by an enumeration of values (so, the set was already built), give it a unique name, /// so that sets with the same literal representation do not fuse together (they can have different types). - const bool is_constant_set = prepared_set.isCreated(); + const bool is_constant_set = typeid_cast(prepared_set.get()) == nullptr; if (is_constant_set) column.name = data.getUniqueName("__set"); else @@ -1381,7 +1378,7 @@ void ActionsMatcher::visit(const ASTLiteral & literal, const ASTPtr & /* ast */, data.addColumn(std::move(column)); } -FutureSet ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_subqueries) +FutureSetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_subqueries) { if (!data.prepared_sets) return {}; @@ -1400,13 +1397,34 @@ FutureSet ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no { if (no_subqueries) return {}; - auto set_key = PreparedSetKey::forSubquery(*right_in_operand); + PreparedSets::Hash set_key; + if (data.getContext()->getSettingsRef().allow_experimental_analyzer && !identifier) { - auto set = data.prepared_sets->getFuture(set_key); - if (set.isValid()) - return set; + /// Here we can be only from mutation interpreter. Normal selects with analyzed use other interpreter. + /// This is a hacky way to allow reusing cache for prepared sets. + /// + /// Mutation is executed in two stages: + /// * first, query 'SELECT count() FROM table WHERE ...' is executed to get the set of affected parts (using analyzer) + /// * second, every part is mutated separately, where plan is build "manually", using this code as well + /// To share the Set in between first and second stage, we should use the same hash. + /// New analyzer is uses a hash from query tree, so here we also build a query tree. + /// + /// Note : this code can be safely removed, but the test 02581_share_big_sets will be too slow (and fail by timeout). + /// Note : we should use new analyzer for mutations and remove this hack. + InterpreterSelectQueryAnalyzer interpreter(right_in_operand, data.getContext(), SelectQueryOptions().analyze(true).subquery()); + const auto & query_tree = interpreter.getQueryTree(); + if (auto * query_node = query_tree->as()) + query_node->setIsSubquery(true); + set_key = query_tree->getTreeHash(); } + else + set_key = right_in_operand->getTreeHash(); + + if (auto set = data.prepared_sets->findSubquery(set_key)) + return set; + + FutureSetPtr external_table_set; /// A special case is if the name of the table is specified on the right side of the IN statement, /// and the table has the type Set (a previously prepared set). @@ -1417,20 +1435,22 @@ FutureSet ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no if (table) { - StorageSet * storage_set = dynamic_cast(table.get()); - if (storage_set) - { - SetPtr set = storage_set->getSet(); - data.prepared_sets->set(set_key, set); - return FutureSet(set); - } + if (StorageSet * storage_set = dynamic_cast(table.get())) + return data.prepared_sets->addFromStorage(set_key, storage_set->getSet()); + } + + if (!data.getContext()->isGlobalContext()) + { + /// If we are reading from storage, it can be an external table which is used for GLOBAL IN. + /// Here, we take FutureSet which is used to build external table. + /// It will be used if set is useful for primary key. During PK analysis + /// temporary table is not filled yet, so we need to fill it first. + if (auto tmp_table = data.getContext()->findExternalTable(identifier->getColumnName())) + external_table_set = tmp_table->future_set; } } - /// We get the stream of blocks for the subquery. Create Set and put it in place of the subquery. - String set_id = right_in_operand->getColumnName(); - bool transform_null_in = data.getContext()->getSettingsRef().transform_null_in; - SubqueryForSet & subquery_for_set = data.prepared_sets->createOrGetSubquery(set_id, set_key, data.set_size_limit, transform_null_in); + std::unique_ptr source = std::make_unique(); /** The following happens for GLOBAL INs or INs: * - in the addExternalStorage function, the IN (SELECT ...) subquery is replaced with IN _data1, @@ -1440,13 +1460,12 @@ FutureSet ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no * In case that we have HAVING with IN subquery, we have to force creating set for it. * Also it doesn't make sense if it is GLOBAL IN or ordinary IN. */ - if (!subquery_for_set.hasSource()) { auto interpreter = interpretSubquery(right_in_operand, data.getContext(), data.subquery_depth, {}); - subquery_for_set.createSource(*interpreter); + interpreter->buildQueryPlan(*source); } - return subquery_for_set.set; + return data.prepared_sets->addFromSubquery(set_key, std::move(source), nullptr, std::move(external_table_set), data.getContext()->getSettingsRef()); } else { @@ -1454,8 +1473,7 @@ FutureSet ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no const auto & index = data.actions_stack.getLastActionsIndex(); if (data.prepared_sets && index.contains(left_in_operand->getColumnName())) /// An explicit enumeration of values in parentheses. - return FutureSet( - makeExplicitSet(&node, last_actions, false, data.getContext(), data.set_size_limit, *data.prepared_sets)); + return makeExplicitSet(&node, last_actions, data.getContext(), *data.prepared_sets); else return {}; } diff --git a/src/Interpreters/ActionsVisitor.h b/src/Interpreters/ActionsVisitor.h index 3cf43746630..1e2ebaf6d87 100644 --- a/src/Interpreters/ActionsVisitor.h +++ b/src/Interpreters/ActionsVisitor.h @@ -25,9 +25,8 @@ class IFunctionOverloadResolver; using FunctionOverloadResolverPtr = std::shared_ptr; /// The case of an explicit enumeration of values. -SetPtr makeExplicitSet( - const ASTFunction * node, const ActionsDAG & actions, bool create_ordered_set, - ContextPtr context, const SizeLimits & limits, PreparedSets & prepared_sets); +FutureSetPtr makeExplicitSet( + const ASTFunction * node, const ActionsDAG & actions, ContextPtr context, PreparedSets & prepared_sets); /** For ActionsVisitor * A stack of ExpressionActions corresponding to nested lambda expressions. @@ -217,7 +216,7 @@ private: static void visit(const ASTLiteral & literal, const ASTPtr & ast, Data & data); static void visit(ASTExpressionList & expression_list, const ASTPtr & ast, Data & data); - static FutureSet makeSet(const ASTFunction & node, Data & data, bool no_subqueries); + static FutureSetPtr makeSet(const ASTFunction & node, Data & data, bool no_subqueries); static ASTs doUntuple(const ASTFunction * function, ActionsMatcher::Data & data); static std::optional getNameAndTypeFromAST(const ASTPtr & ast, Data & data); }; diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index b9e189747ac..eb5b59a447d 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -498,12 +498,14 @@ KeyMetadata::iterator FileCache::addFileSegment( chassert(size > 0); /// Empty file segments in cache are not allowed. const auto & key = locked_key.getKey(); - if (locked_key.tryGetByOffset(offset)) + const FileSegment::Range range(offset, offset + size - 1); + + if (auto intersecting_range = locked_key.hasIntersectingRange(range)) { throw Exception( ErrorCodes::LOGICAL_ERROR, - "Cache entry already exists for key: `{}`, offset: {}, size: {}.", - key, offset, size); + "Attempt to add intersecting file segment in cache ({} intersects {})", + range.toString(), intersecting_range->toString()); } FileSegment::State result_state; @@ -958,8 +960,20 @@ void FileCache::loadMetadata() if ((main_priority->getSizeLimit() == 0 || main_priority->getSize(lock) + size <= main_priority->getSizeLimit()) && (main_priority->getElementsLimit() == 0 || main_priority->getElementsCount(lock) + 1 <= main_priority->getElementsLimit())) { - auto file_segment_metadata_it = addFileSegment( - *locked_key, offset, size, FileSegment::State::DOWNLOADED, CreateFileSegmentSettings(segment_kind), &lock); + KeyMetadata::iterator file_segment_metadata_it; + try + { + file_segment_metadata_it = addFileSegment( + *locked_key, offset, size, FileSegment::State::DOWNLOADED, CreateFileSegmentSettings(segment_kind), &lock); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + chassert(false); + + fs::remove(offset_it->path()); + continue; + } const auto & file_segment_metadata = file_segment_metadata_it->second; chassert(file_segment_metadata->file_segment->assertCorrectness()); diff --git a/src/Interpreters/Cache/FileSegment.h b/src/Interpreters/Cache/FileSegment.h index 062f168a046..c583ba4c90c 100644 --- a/src/Interpreters/Cache/FileSegment.h +++ b/src/Interpreters/Cache/FileSegment.h @@ -129,6 +129,8 @@ public: bool operator==(const Range & other) const { return left == other.left && right == other.right; } + bool operator<(const Range & other) const { return right < other.left; } + size_t size() const { return right - left + 1; } String toString() const { return fmt::format("[{}, {}]", std::to_string(left), std::to_string(right)); } diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 77b9deb7846..8c8524f7fa7 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -448,6 +448,29 @@ void LockedKey::shrinkFileSegmentToDownloadedSize( chassert(file_segment->assertCorrectnessUnlocked(segment_lock)); } +std::optional LockedKey::hasIntersectingRange(const FileSegment::Range & range) const +{ + if (key_metadata->empty()) + return {}; + + auto it = key_metadata->lower_bound(range.left); + if (it != key_metadata->end()) /// has next range + { + auto next_range = it->second->file_segment->range(); + if (!(range < next_range)) + return next_range; + + if (it == key_metadata->begin()) + return {}; + } + + auto prev_range = std::prev(it)->second->file_segment->range(); + if (!(prev_range < range)) + return prev_range; + + return {}; +} + std::shared_ptr LockedKey::getByOffset(size_t offset) const { auto it = key_metadata->find(offset); diff --git a/src/Interpreters/Cache/Metadata.h b/src/Interpreters/Cache/Metadata.h index 635594a289a..8ee40aa977f 100644 --- a/src/Interpreters/Cache/Metadata.h +++ b/src/Interpreters/Cache/Metadata.h @@ -164,6 +164,8 @@ struct LockedKey : private boost::noncopyable bool isLastOwnerOfFileSegment(size_t offset) const; + std::optional hasIntersectingRange(const FileSegment::Range & range) const; + void removeFromCleanupQueue(); void markAsRemoved(); diff --git a/src/Interpreters/Cache/QueryCache.cpp b/src/Interpreters/Cache/QueryCache.cpp index 74a1c3c9b18..f46a10ca51d 100644 --- a/src/Interpreters/Cache/QueryCache.cpp +++ b/src/Interpreters/Cache/QueryCache.cpp @@ -132,6 +132,11 @@ QueryCache::Key::Key( { } +QueryCache::Key::Key(ASTPtr ast_, const String & user_name_) + : QueryCache::Key(ast_, {}, user_name_, false, std::chrono::system_clock::from_time_t(1), false) /// dummy values for everything != AST or user name +{ +} + bool QueryCache::Key::operator==(const Key & other) const { return ast->getTreeHash() == other.ast->getTreeHash(); @@ -387,19 +392,22 @@ QueryCache::Reader::Reader(Cache & cache_, const Key & key, const std::lock_guar return; } - if (!entry->key.is_shared && entry->key.user_name != key.user_name) + const auto & entry_key = entry->key; + const auto & entry_mapped = entry->mapped; + + if (!entry_key.is_shared && entry_key.user_name != key.user_name) { LOG_TRACE(&Poco::Logger::get("QueryCache"), "Inaccessible entry found for query {}", key.queryStringFromAst()); return; } - if (IsStale()(entry->key)) + if (IsStale()(entry_key)) { LOG_TRACE(&Poco::Logger::get("QueryCache"), "Stale entry found for query {}", key.queryStringFromAst()); return; } - if (!entry->key.is_compressed) + if (!entry_key.is_compressed) { // Cloning chunks isn't exactly great. It could be avoided by another indirection, i.e. wrapping Entry's members chunks, totals and // extremes into shared_ptrs and assuming that the lifecycle of these shared_ptrs coincides with the lifecycle of the Entry @@ -408,15 +416,15 @@ QueryCache::Reader::Reader(Cache & cache_, const Key & key, const std::lock_guar // optimization. Chunks cloned_chunks; - for (const auto & chunk : entry->mapped->chunks) + for (const auto & chunk : entry_mapped->chunks) cloned_chunks.push_back(chunk.clone()); - buildSourceFromChunks(entry->key.header, std::move(cloned_chunks), entry->mapped->totals, entry->mapped->extremes); + buildSourceFromChunks(entry_key.header, std::move(cloned_chunks), entry_mapped->totals, entry_mapped->extremes); } else { Chunks decompressed_chunks; - const Chunks & chunks = entry->mapped->chunks; + const Chunks & chunks = entry_mapped->chunks; for (const auto & chunk : chunks) { const Columns & columns = chunk.getColumns(); @@ -430,7 +438,7 @@ QueryCache::Reader::Reader(Cache & cache_, const Key & key, const std::lock_guar decompressed_chunks.push_back(std::move(decompressed_chunk)); } - buildSourceFromChunks(entry->key.header, std::move(decompressed_chunks), entry->mapped->totals, entry->mapped->extremes); + buildSourceFromChunks(entry_key.header, std::move(decompressed_chunks), entry_mapped->totals, entry_mapped->extremes); } LOG_TRACE(&Poco::Logger::get("QueryCache"), "Entry found for query {}", key.queryStringFromAst()); diff --git a/src/Interpreters/Cache/QueryCache.h b/src/Interpreters/Cache/QueryCache.h index bafa78f13d5..6ef7cc60918 100644 --- a/src/Interpreters/Cache/QueryCache.h +++ b/src/Interpreters/Cache/QueryCache.h @@ -49,7 +49,7 @@ public: /// If the associated entry can be read by other users. In general, sharing is a bad idea: First, it is unlikely that different /// users pose the same queries. Second, sharing potentially breaches security. E.g. User A should not be able to bypass row /// policies on some table by running the same queries as user B for whom no row policies exist. - bool is_shared; + const bool is_shared; /// When does the entry expire? const std::chrono::time_point expires_at; @@ -58,12 +58,16 @@ public: /// (we could theoretically apply compression also to the totals and extremes but it's an obscure use case) const bool is_compressed; + /// Ctor to construct a Key for writing into query cache. Key(ASTPtr ast_, Block header_, const String & user_name_, bool is_shared_, std::chrono::time_point expires_at_, bool is_compressed); + /// Ctor to construct a Key for reading from query cache (this operation only needs the AST + user name). + Key(ASTPtr ast_, const String & user_name_); + bool operator==(const Key & other) const; String queryStringFromAst() const; }; diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.h b/src/Interpreters/ClusterProxy/SelectStreamFactory.h index f1a8b3e0984..030c0b77dd5 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.h +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.h @@ -26,6 +26,8 @@ using QueryPlanPtr = std::unique_ptr; struct StorageID; +class PreparedSets; +using PreparedSetsPtr = std::shared_ptr; namespace ClusterProxy { diff --git a/src/Interpreters/ConcurrentHashJoin.cpp b/src/Interpreters/ConcurrentHashJoin.cpp index 03c173a73d9..fc24f0ae029 100644 --- a/src/Interpreters/ConcurrentHashJoin.cpp +++ b/src/Interpreters/ConcurrentHashJoin.cpp @@ -18,6 +18,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 106264320b2..7482450d529 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -1319,6 +1319,21 @@ void Context::addExternalTable(const String & table_name, TemporaryTableHolder & external_tables_mapping.emplace(table_name, std::make_shared(std::move(temporary_table))); } +std::shared_ptr Context::findExternalTable(const String & table_name) const +{ + if (isGlobalContext()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot have external tables"); + + std::shared_ptr holder; + { + auto lock = getLock(); + auto iter = external_tables_mapping.find(table_name); + if (iter == external_tables_mapping.end()) + return {}; + holder = iter->second; + } + return holder; +} std::shared_ptr Context::removeExternalTable(const String & table_name) { diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 194da016ee7..82d5d713d33 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -615,6 +615,7 @@ public: Tables getExternalTables() const; void addExternalTable(const String & table_name, TemporaryTableHolder && temporary_table); + std::shared_ptr findExternalTable(const String & table_name) const; std::shared_ptr removeExternalTable(const String & table_name); const Scalars & getScalars() const; diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index 8d3fa91a7fe..4cb2f6e3b3d 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -110,7 +110,7 @@ TemporaryTableHolder::TemporaryTableHolder( } TemporaryTableHolder::TemporaryTableHolder(TemporaryTableHolder && rhs) noexcept - : WithContext(rhs.context), temporary_tables(rhs.temporary_tables), id(rhs.id) + : WithContext(rhs.context), temporary_tables(rhs.temporary_tables), id(rhs.id), future_set(std::move(rhs.future_set)) { rhs.id = UUIDHelpers::Nil; } @@ -216,8 +216,24 @@ void DatabaseCatalog::shutdownImpl() /// We still hold "databases" (instead of std::move) for Buffer tables to flush data correctly. + /// Delay shutdown of temporary and system databases. They will be shutdown last. + /// Because some databases might use them until their shutdown is called, but calling shutdown + /// on temporary database means clearing its set of tables, which will lead to unnecessary errors like "table not found". + std::vector databases_with_delayed_shutdown; for (auto & database : current_databases) + { + if (database.first == TEMPORARY_DATABASE || database.first == SYSTEM_DATABASE) + { + databases_with_delayed_shutdown.push_back(database.second); + continue; + } database.second->shutdown(); + } + + for (auto & database : databases_with_delayed_shutdown) + { + database->shutdown(); + } { std::lock_guard lock(tables_marked_dropped_mutex); diff --git a/src/Interpreters/DatabaseCatalog.h b/src/Interpreters/DatabaseCatalog.h index 51e9fbdb936..258ea2dee7c 100644 --- a/src/Interpreters/DatabaseCatalog.h +++ b/src/Interpreters/DatabaseCatalog.h @@ -79,6 +79,8 @@ private: using DDLGuardPtr = std::unique_ptr; +class FutureSet; +using FutureSetPtr = std::shared_ptr; /// Creates temporary table in `_temporary_and_external_tables` with randomly generated unique StorageID. /// Such table can be accessed from everywhere by its ID. @@ -111,6 +113,7 @@ struct TemporaryTableHolder : boost::noncopyable, WithContext IDatabase * temporary_tables = nullptr; UUID id = UUIDHelpers::Nil; + FutureSetPtr future_set; }; ///TODO maybe remove shared_ptr from here? diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index 36725f36804..f1c577948eb 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -936,15 +936,12 @@ bool ExpressionActions::checkColumnIsAlwaysFalse(const String & column_name) con for (const auto & action : actions) { if (action.node->type == ActionsDAG::ActionType::COLUMN && action.node->result_name == set_to_check) - { // Constant ColumnSet cannot be empty, so we only need to check non-constant ones. if (const auto * column_set = checkAndGetColumn(action.node->column.get())) - { - auto set = column_set->getData(); - if (set && set->isCreated() && set->getTotalRowCount() == 0) - return true; - } - } + if (auto future_set = column_set->getData()) + if (auto set = future_set->get()) + if (set->getTotalRowCount() == 0) + return true; } } diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 307b46b3a0b..f27d23e8e94 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -450,77 +450,6 @@ void ExpressionAnalyzer::initGlobalSubqueriesAndExternalTables(bool do_global, b } -void ExpressionAnalyzer::tryMakeSetForIndexFromSubquery(const ASTPtr & subquery_or_table_name, const SelectQueryOptions & query_options) -{ - if (!prepared_sets) - return; - - auto set_key = PreparedSetKey::forSubquery(*subquery_or_table_name); - - if (prepared_sets->getFuture(set_key).isValid()) - return; /// Already prepared. - - if (auto set_ptr_from_storage_set = isPlainStorageSetInSubquery(subquery_or_table_name)) - { - prepared_sets->set(set_key, set_ptr_from_storage_set); - return; - } - - auto build_set = [&] () -> SetPtr - { - LOG_TRACE(getLogger(), "Building set, key: {}", set_key.toString()); - - auto interpreter_subquery = interpretSubquery(subquery_or_table_name, getContext(), {}, query_options); - auto io = interpreter_subquery->execute(); - PullingAsyncPipelineExecutor executor(io.pipeline); - - SetPtr set = std::make_shared(settings.size_limits_for_set_used_with_index, true, getContext()->getSettingsRef().transform_null_in); - set->setHeader(executor.getHeader().getColumnsWithTypeAndName()); - - Block block; - while (executor.pull(block)) - { - if (block.rows() == 0) - continue; - - /// If the limits have been exceeded, give up and let the default subquery processing actions take place. - if (!set->insertFromBlock(block.getColumnsWithTypeAndName())) - return nullptr; - } - - set->finishInsert(); - - return set; - }; - - SetPtr set; - - auto set_cache = getContext()->getPreparedSetsCache(); - if (set_cache) - { - auto from_cache = set_cache->findOrPromiseToBuild(set_key.toString()); - if (from_cache.index() == 0) - { - set = build_set(); - std::get<0>(from_cache).set_value(set); - } - else - { - LOG_TRACE(getLogger(), "Waiting for set, key: {}", set_key.toString()); - set = std::get<1>(from_cache).get(); - } - } - else - { - set = build_set(); - } - - if (!set) - return; - - prepared_sets->set(set_key, std::move(set)); -} - SetPtr ExpressionAnalyzer::isPlainStorageSetInSubquery(const ASTPtr & subquery_or_table_name) { const auto * table = subquery_or_table_name->as(); @@ -534,54 +463,6 @@ SetPtr ExpressionAnalyzer::isPlainStorageSetInSubquery(const ASTPtr & subquery_o return storage_set->getSet(); } - -/// Performance optimization for IN() if storage supports it. -void SelectQueryExpressionAnalyzer::makeSetsForIndex(const ASTPtr & node) -{ - if (!node || !storage() || !storage()->supportsIndexForIn()) - return; - - for (auto & child : node->children) - { - /// Don't descend into subqueries. - if (child->as()) - continue; - - /// Don't descend into lambda functions - const auto * func = child->as(); - if (func && func->name == "lambda") - continue; - - makeSetsForIndex(child); - } - - const auto * func = node->as(); - if (func && functionIsInOrGlobalInOperator(func->name)) - { - const IAST & args = *func->arguments; - const ASTPtr & left_in_operand = args.children.at(0); - - if (storage()->mayBenefitFromIndexForIn(left_in_operand, getContext(), metadata_snapshot)) - { - const ASTPtr & arg = args.children.at(1); - if (arg->as() || arg->as()) - { - if (settings.use_index_for_in_with_subqueries) - tryMakeSetForIndexFromSubquery(arg, query_options); - } - else - { - auto temp_actions = std::make_shared(columns_after_join); - getRootActions(left_in_operand, true, temp_actions); - - if (prepared_sets && temp_actions->tryFindInOutputs(left_in_operand->getColumnName())) - makeExplicitSet(func, *temp_actions, true, getContext(), settings.size_limits_for_set, *prepared_sets); - } - } - } -} - - void ExpressionAnalyzer::getRootActions(const ASTPtr & ast, bool no_makeset_for_subqueries, ActionsDAGPtr & actions, bool only_consts) { LogAST log; diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index 00cd353aa66..271c3943afc 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -141,11 +141,6 @@ public: void makeWindowDescriptionFromAST(const Context & context, const WindowDescriptions & existing_descriptions, WindowDescription & desc, const IAST * ast); void makeWindowDescriptions(ActionsDAGPtr actions); - /** Create Set from a subquery or a table expression in the query. The created set is suitable for using the index. - * The set will not be created if its size hits the limit. - */ - void tryMakeSetForIndexFromSubquery(const ASTPtr & subquery_or_table_name, const SelectQueryOptions & query_options = {}); - /** Checks if subquery is not a plain StorageSet. * Because while making set we will read data from StorageSet which is not allowed. * Returns valid SetPtr from StorageSet if the latter is used after IN or nullptr otherwise. @@ -363,9 +358,6 @@ public: /// Deletes all columns except mentioned by SELECT, arranges the remaining columns and renames them to aliases. ActionsDAGPtr appendProjectResult(ExpressionActionsChain & chain) const; - /// Create Set-s that we make from IN section to use index on them. - void makeSetsForIndex(const ASTPtr & node); - private: StorageMetadataPtr metadata_snapshot; /// If non-empty, ignore all expressions not from this list. diff --git a/src/Interpreters/GlobalSubqueriesVisitor.h b/src/Interpreters/GlobalSubqueriesVisitor.h index f5b837fc7f7..5b633fee9b6 100644 --- a/src/Interpreters/GlobalSubqueriesVisitor.h +++ b/src/Interpreters/GlobalSubqueriesVisitor.h @@ -31,6 +31,7 @@ namespace DB namespace ErrorCodes { extern const int WRONG_GLOBAL_SUBQUERY; + extern const int LOGICAL_ERROR; } class GlobalSubqueriesMatcher @@ -161,30 +162,20 @@ public: nullptr, /*create_for_global_subquery*/ true); StoragePtr external_storage = external_storage_holder->getTable(); - external_tables.emplace(external_table_name, external_storage_holder); - /// We need to materialize external tables immediately because reading from distributed - /// tables might generate local plans which can refer to external tables during index - /// analysis. It's too late to populate the external table via CreatingSetsTransform. - if (is_explain) + auto set_key = database_and_table_name->getTreeHash(); + + if (!prepared_sets->findSubquery(set_key)) { - /// Do not materialize external tables if it's explain statement. - } - else if (getContext()->getSettingsRef().use_index_for_in_with_subqueries) - { - auto external_table = external_storage_holder->getTable(); - auto table_out = external_table->write({}, external_table->getInMemoryMetadataPtr(), getContext(), /*async_insert=*/false); - auto io = interpreter->execute(); - io.pipeline.complete(std::move(table_out)); - CompletedPipelineExecutor executor(io.pipeline); - executor.execute(); + std::unique_ptr source = std::make_unique(); + interpreter->buildQueryPlan(*source); + + auto future_set = prepared_sets->addFromSubquery(set_key, std::move(source), std::move(external_storage), nullptr, getContext()->getSettingsRef()); + external_storage_holder->future_set = std::move(future_set); } else - { - auto & subquery_for_set = prepared_sets->getSubquery(external_table_name); - subquery_for_set.createSource(*interpreter, external_storage); - } + throw Exception(ErrorCodes::LOGICAL_ERROR, "Set is already created for GLOBAL IN"); /** NOTE If it was written IN tmp_table - the existing temporary (but not external) table, * then a new temporary table will be created (for example, _data1), diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 1f95b1ebf9f..6ea15312ec4 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -953,10 +953,7 @@ Block InterpreterSelectQuery::getSampleBlockImpl() if (storage && !options.only_analyze) { - query_analyzer->makeSetsForIndex(select_query.where()); - query_analyzer->makeSetsForIndex(select_query.prewhere()); query_info.prepared_sets = query_analyzer->getPreparedSets(); - from_stage = storage->getQueryProcessingStage(context, options.to_stage, storage_snapshot, query_info); } @@ -3151,7 +3148,17 @@ void InterpreterSelectQuery::executeExtremes(QueryPlan & query_plan) void InterpreterSelectQuery::executeSubqueriesInSetsAndJoins(QueryPlan & query_plan) { - addCreatingSetsStep(query_plan, prepared_sets, context); + auto subqueries = prepared_sets->getSubqueries(); + + if (!subqueries.empty()) + { + auto step = std::make_unique( + query_plan.getCurrentDataStream(), + std::move(subqueries), + context); + + query_plan.addStep(std::move(step)); + } } diff --git a/src/Interpreters/InterpreterSelectQueryAnalyzer.h b/src/Interpreters/InterpreterSelectQueryAnalyzer.h index 1e0ac737536..4434fabe746 100644 --- a/src/Interpreters/InterpreterSelectQueryAnalyzer.h +++ b/src/Interpreters/InterpreterSelectQueryAnalyzer.h @@ -69,6 +69,8 @@ public: const Planner & getPlanner() const { return planner; } Planner & getPlanner() { return planner; } + const QueryTreeNodePtr & getQueryTree() const { return query_tree; } + private: ASTPtr query; ContextMutablePtr context; diff --git a/src/Interpreters/MergeTreeTransaction.cpp b/src/Interpreters/MergeTreeTransaction.cpp index 1358e3ed3c2..6b8e09a64f5 100644 --- a/src/Interpreters/MergeTreeTransaction.cpp +++ b/src/Interpreters/MergeTreeTransaction.cpp @@ -326,6 +326,8 @@ void MergeTreeTransaction::afterFinalize() is_read_only = storages.empty(); /// Release shared pointers just in case + creating_parts.clear(); + removing_parts.clear(); storages.clear(); mutations.clear(); finalized = true; diff --git a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp deleted file mode 100644 index a377bb4bba6..00000000000 --- a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp +++ /dev/null @@ -1,199 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - -/** Given a monotonic non-decreasing function f(x), which satisfies f(x) = c for any value x within [b, e). - * We could convert it into its equivalent form, x >= b AND x < e, which is free from the invocation of the function. - * And we could apply the similar transformation to other comparisons. The suggested transformations list: - * - * f(x) == c -> x >= b AND x < e - * f(x) != c -> x < b OR x >= e - * f(x) > c -> x >= e - * f(x) >= c -> x >= b - * f(x) < c -> x < b - * f(x) <= c -> x < e - * - * This function generates a new AST with the transformed relation. - */ -ASTPtr generateOptimizedDateFilterAST(const String & comparator, const NameAndTypePair & column, const std::pair& range) -{ - const DateLUTImpl & date_lut = DateLUT::instance(); - - const String & column_name = column.name; - String start_date_or_date_time; - String end_date_or_date_time; - - if (isDateOrDate32(column.type.get())) - { - start_date_or_date_time = date_lut.dateToString(range.first.get()); - end_date_or_date_time = date_lut.dateToString(range.second.get()); - } - else if (isDateTime(column.type.get()) || isDateTime64(column.type.get())) - { - start_date_or_date_time = date_lut.timeToString(range.first.get()); - end_date_or_date_time = date_lut.timeToString(range.second.get()); - } - else [[unlikely]] return {}; - - if (comparator == "equals") - { - return makeASTFunction("and", - makeASTFunction("greaterOrEquals", - std::make_shared(column_name), - std::make_shared(start_date_or_date_time) - ), - makeASTFunction("less", - std::make_shared(column_name), - std::make_shared(end_date_or_date_time) - ) - ); - } - else if (comparator == "notEquals") - { - return makeASTFunction("or", - makeASTFunction("less", - std::make_shared(column_name), - std::make_shared(start_date_or_date_time) - ), - makeASTFunction("greaterOrEquals", - std::make_shared(column_name), - std::make_shared(end_date_or_date_time) - ) - ); - } - else if (comparator == "greater") - { - return makeASTFunction("greaterOrEquals", - std::make_shared(column_name), - std::make_shared(end_date_or_date_time) - ); - } - else if (comparator == "lessOrEquals") - { - return makeASTFunction("less", - std::make_shared(column_name), - std::make_shared(end_date_or_date_time) - ); - } - else if (comparator == "less" || comparator == "greaterOrEquals") - { - return makeASTFunction(comparator, - std::make_shared(column_name), - std::make_shared(start_date_or_date_time) - ); - } - else [[unlikely]] - { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Expected equals, notEquals, less, lessOrEquals, greater, greaterOrEquals. Actual {}", - comparator); - } -} - -void OptimizeDateOrDateTimeConverterWithPreimageMatcher::visit(const ASTFunction & function, ASTPtr & ast, const Data & data) -{ - const static std::unordered_map swap_relations = { - {"equals", "equals"}, - {"notEquals", "notEquals"}, - {"less", "greater"}, - {"greater", "less"}, - {"lessOrEquals", "greaterOrEquals"}, - {"greaterOrEquals", "lessOrEquals"}, - }; - - if (!swap_relations.contains(function.name)) return; - - if (!function.arguments || function.arguments->children.size() != 2) return; - - size_t func_id = function.arguments->children.size(); - - for (size_t i = 0; i < function.arguments->children.size(); i++) - { - if (const auto * func = function.arguments->children[i]->as()) - { - func_id = i; - } - } - - if (func_id == function.arguments->children.size()) return; - - size_t literal_id = 1 - func_id; - const auto * literal = function.arguments->children[literal_id]->as(); - - if (!literal || literal->value.getType() != Field::Types::UInt64) return; - - String comparator = literal_id > func_id ? function.name : swap_relations.at(function.name); - - const auto * ast_func = function.arguments->children[func_id]->as(); - /// Currently we only handle single-argument functions. - if (!ast_func || !ast_func->arguments || ast_func->arguments->children.size() != 1) return; - - const auto * column_id = ast_func->arguments->children.at(0)->as(); - if (!column_id) return; - - auto pos = IdentifierSemantic::getMembership(*column_id); - if (!pos) - pos = IdentifierSemantic::chooseTableColumnMatch(*column_id, data.tables, true); - if (!pos) - return; - - if (*pos >= data.tables.size()) - return; - - auto data_type_and_name = data.tables[*pos].columns.tryGetByName(column_id->shortName()); - if (!data_type_and_name) return; - - const auto & converter = FunctionFactory::instance().tryGet(ast_func->name, data.context); - if (!converter) return; - - ColumnsWithTypeAndName args; - args.emplace_back(data_type_and_name->type, "tmp"); - auto converter_base = converter->build(args); - if (!converter_base || !converter_base->hasInformationAboutPreimage()) return; - - auto preimage_range = converter_base->getPreimage(*(data_type_and_name->type), literal->value); - if (!preimage_range) return; - - const auto new_ast = generateOptimizedDateFilterAST(comparator, *data_type_and_name, *preimage_range); - if (!new_ast) return; - - ast = new_ast; -} - -bool OptimizeDateOrDateTimeConverterWithPreimageMatcher::needChildVisit(ASTPtr & ast, ASTPtr & /*child*/) -{ - const static std::unordered_set relations = { - "equals", - "notEquals", - "less", - "greater", - "lessOrEquals", - "greaterOrEquals", - }; - - if (const auto * ast_function = ast->as()) - { - return !relations.contains(ast_function->name); - } - - return true; -} - -} diff --git a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.h b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.h deleted file mode 100644 index 778fa462364..00000000000 --- a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.h +++ /dev/null @@ -1,37 +0,0 @@ -#pragma once - -#include -#include - -namespace DB -{ - -class ASTFunction; - -/** Replace predicate having Date/DateTime converters with their preimages to improve performance. - * Given a Date column c, toYear(c) = 2023 -> c >= '2023-01-01' AND c < '2024-01-01' - * Or if c is a DateTime column, toYear(c) = 2023 -> c >= '2023-01-01 00:00:00' AND c < '2024-01-01 00:00:00'. - * The similar optimization also applies to other converters. - */ -class OptimizeDateOrDateTimeConverterWithPreimageMatcher -{ -public: - struct Data - { - const TablesWithColumns & tables; - ContextPtr context; - }; - - static void visit(ASTPtr & ast, Data & data) - { - if (const auto * ast_function = ast->as()) - visit(*ast_function, ast, data); - } - - static void visit(const ASTFunction & function, ASTPtr & ast, const Data & data); - - static bool needChildVisit(ASTPtr & ast, ASTPtr & child); -}; - -using OptimizeDateOrDateTimeConverterWithPreimageVisitor = InDepthNodeVisitor; -} diff --git a/src/Interpreters/PreparedSets.cpp b/src/Interpreters/PreparedSets.cpp index 7b0efddae87..67822ecf440 100644 --- a/src/Interpreters/PreparedSets.cpp +++ b/src/Interpreters/PreparedSets.cpp @@ -1,55 +1,213 @@ #include #include #include -#include -#include #include +#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include namespace DB { -PreparedSetKey PreparedSetKey::forLiteral(const IAST & ast, DataTypes types_) +namespace ErrorCodes { - /// Remove LowCardinality types from type list because Set doesn't support LowCardinality keys now, - /// just converts LowCardinality to ordinary types. - for (auto & type : types_) - type = recursiveRemoveLowCardinality(type); - - PreparedSetKey key; - key.ast_hash = ast.getTreeHash(); - key.types = std::move(types_); - return key; + extern const int LOGICAL_ERROR; } -PreparedSetKey PreparedSetKey::forSubquery(const IAST & ast) +static SizeLimits getSizeLimitsForSet(const Settings & settings) { - PreparedSetKey key; - key.ast_hash = ast.getTreeHash(); - return key; + return SizeLimits(settings.max_rows_in_set, settings.max_bytes_in_set, settings.set_overflow_mode); } -bool PreparedSetKey::operator==(const PreparedSetKey & other) const +static bool equals(const DataTypes & lhs, const DataTypes & rhs) { - if (ast_hash != other.ast_hash) + size_t size = lhs.size(); + if (size != rhs.size()) return false; - if (types.size() != other.types.size()) - return false; - - for (size_t i = 0; i < types.size(); ++i) + for (size_t i = 0; i < size; ++i) { - if (!types[i]->equals(*other.types[i])) + if (!lhs[i]->equals(*rhs[i])) return false; } return true; } -String PreparedSetKey::toString() const + +FutureSetFromStorage::FutureSetFromStorage(SetPtr set_) : set(std::move(set_)) {} +SetPtr FutureSetFromStorage::get() const { return set; } +const DataTypes & FutureSetFromStorage::getTypes() const { return set->getElementsTypes(); } + +SetPtr FutureSetFromStorage::buildOrderedSetInplace(const ContextPtr &) +{ + return set->hasExplicitSetElements() ? set : nullptr; +} + + +FutureSetFromTuple::FutureSetFromTuple(Block block, const Settings & settings) +{ + auto size_limits = getSizeLimitsForSet(settings); + set = std::make_shared(size_limits, settings.use_index_for_in_with_subqueries_max_values, settings.transform_null_in); + set->setHeader(block.cloneEmpty().getColumnsWithTypeAndName()); + + Columns columns; + columns.reserve(block.columns()); + for (const auto & column : block) + columns.emplace_back(column.column); + + set_key_columns.filter = ColumnUInt8::create(block.rows()); + + set->insertFromColumns(columns, set_key_columns); + set->finishInsert(); +} + +const DataTypes & FutureSetFromTuple::getTypes() const { return set->getElementsTypes(); } + +SetPtr FutureSetFromTuple::buildOrderedSetInplace(const ContextPtr & context) +{ + if (set->hasExplicitSetElements()) + return set; + + const auto & settings = context->getSettingsRef(); + size_t max_values = settings.use_index_for_in_with_subqueries_max_values; + bool too_many_values = max_values && max_values < set->getTotalRowCount(); + if (!too_many_values) + { + set->fillSetElements(); + set->appendSetElements(set_key_columns); + } + + return set; +} + + +FutureSetFromSubquery::FutureSetFromSubquery( + String key, + std::unique_ptr source_, + StoragePtr external_table_, + FutureSetPtr external_table_set_, + const Settings & settings) + : external_table(std::move(external_table_)) + , external_table_set(std::move(external_table_set_)) + , source(std::move(source_)) +{ + set_and_key = std::make_shared(); + set_and_key->key = std::move(key); + + auto size_limits = getSizeLimitsForSet(settings); + set_and_key->set = std::make_shared(size_limits, settings.use_index_for_in_with_subqueries_max_values, settings.transform_null_in); + set_and_key->set->setHeader(source->getCurrentDataStream().header.getColumnsWithTypeAndName()); +} + +FutureSetFromSubquery::FutureSetFromSubquery( + String key, + QueryTreeNodePtr query_tree_, + const Settings & settings) + : query_tree(std::move(query_tree_)) +{ + set_and_key = std::make_shared(); + set_and_key->key = std::move(key); + + auto size_limits = getSizeLimitsForSet(settings); + set_and_key->set = std::make_shared(size_limits, settings.use_index_for_in_with_subqueries_max_values, settings.transform_null_in); +} + +SetPtr FutureSetFromSubquery::get() const +{ + if (set_and_key->set != nullptr && set_and_key->set->isCreated()) + return set_and_key->set; + + return nullptr; +} + +void FutureSetFromSubquery::setQueryPlan(std::unique_ptr source_) +{ + source = std::move(source_); + set_and_key->set->setHeader(source->getCurrentDataStream().header.getColumnsWithTypeAndName()); +} + +const DataTypes & FutureSetFromSubquery::getTypes() const +{ + return set_and_key->set->getElementsTypes(); +} + +std::unique_ptr FutureSetFromSubquery::build(const ContextPtr & context) +{ + if (set_and_key->set->isCreated()) + return nullptr; + + const auto & settings = context->getSettingsRef(); + + auto plan = std::move(source); + + if (!plan) + return nullptr; + + auto creating_set = std::make_unique( + plan->getCurrentDataStream(), + set_and_key, + external_table, + SizeLimits(settings.max_rows_to_transfer, settings.max_bytes_to_transfer, settings.transfer_overflow_mode), + context); + creating_set->setStepDescription("Create set for subquery"); + plan->addStep(std::move(creating_set)); + return plan; +} + +SetPtr FutureSetFromSubquery::buildOrderedSetInplace(const ContextPtr & context) +{ + if (!context->getSettingsRef().use_index_for_in_with_subqueries) + return nullptr; + + if (auto set = get()) + { + if (set->hasExplicitSetElements()) + return set; + + return nullptr; + } + + if (external_table_set) + { + auto set = external_table_set->buildOrderedSetInplace(context); + if (set) + return set_and_key->set = set; + } + + auto plan = build(context); + if (!plan) + return nullptr; + + set_and_key->set->fillSetElements(); + auto builder = plan->buildQueryPipeline(QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)); + auto pipeline = QueryPipelineBuilder::getPipeline(std::move(*builder)); + pipeline.complete(std::make_shared(Block())); + + CompletedPipelineExecutor executor(pipeline); + executor.execute(); + + set_and_key->set->checkIsCreated(); + + return set_and_key->set; +} + + +String PreparedSets::toString(const PreparedSets::Hash & key, const DataTypes & types) { WriteBufferFromOwnString buf; - buf << "__set_" << ast_hash.first << "_" << ast_hash.second; + buf << "__set_" << key.first << "_" << key.second; if (!types.empty()) { buf << "("; @@ -66,114 +224,112 @@ String PreparedSetKey::toString() const return buf.str(); } -SubqueryForSet & PreparedSets::createOrGetSubquery(const String & subquery_id, const PreparedSetKey & key, - SizeLimits set_size_limit, bool transform_null_in) +FutureSetPtr PreparedSets::addFromTuple(const Hash & key, Block block, const Settings & settings) { - SubqueryForSet & subquery = subqueries[subquery_id]; + auto from_tuple = std::make_shared(std::move(block), settings); + const auto & set_types = from_tuple->getTypes(); + auto & sets_by_hash = sets_from_tuple[key]; - /// If you already created a Set with the same subquery / table for another ast - /// In that case several PreparedSetKey would share same subquery and set - /// Not sure if it's really possible case (maybe for distributed query when set was filled by external table?) - if (subquery.set.isValid()) - sets[key] = subquery.set; - else - { - subquery.set_in_progress = std::make_shared(set_size_limit, false, transform_null_in); - sets[key] = FutureSet(subquery.promise_to_fill_set.get_future()); - } + for (const auto & set : sets_by_hash) + if (equals(set->getTypes(), set_types)) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Duplicate set: {}", toString(key, set_types)); - if (!subquery.set_in_progress) - { - subquery.key = key.toString(); - subquery.set_in_progress = std::make_shared(set_size_limit, false, transform_null_in); - } - - return subquery; + sets_by_hash.push_back(from_tuple); + return from_tuple; } -/// If the subquery is not associated with any set, create default-constructed SubqueryForSet. -/// It's aimed to fill external table passed to SubqueryForSet::createSource. -SubqueryForSet & PreparedSets::getSubquery(const String & subquery_id) { return subqueries[subquery_id]; } - -void PreparedSets::set(const PreparedSetKey & key, SetPtr set_) { sets[key] = FutureSet(set_); } - -FutureSet PreparedSets::getFuture(const PreparedSetKey & key) const +FutureSetPtr PreparedSets::addFromStorage(const Hash & key, SetPtr set_) { - auto it = sets.find(key); - if (it == sets.end()) - return {}; + auto from_storage = std::make_shared(std::move(set_)); + auto [it, inserted] = sets_from_storage.emplace(key, from_storage); + + if (!inserted) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Duplicate set: {}", toString(key, {})); + + return from_storage; +} + +FutureSetPtr PreparedSets::addFromSubquery( + const Hash & key, + std::unique_ptr source, + StoragePtr external_table, + FutureSetPtr external_table_set, + const Settings & settings) +{ + auto from_subquery = std::make_shared( + toString(key, {}), + std::move(source), + std::move(external_table), + std::move(external_table_set), + settings); + + auto [it, inserted] = sets_from_subqueries.emplace(key, from_subquery); + + if (!inserted) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Duplicate set: {}", toString(key, {})); + + return from_subquery; +} + +FutureSetPtr PreparedSets::addFromSubquery( + const Hash & key, + QueryTreeNodePtr query_tree, + const Settings & settings) +{ + auto from_subquery = std::make_shared( + toString(key, {}), + std::move(query_tree), + settings); + + auto [it, inserted] = sets_from_subqueries.emplace(key, from_subquery); + + if (!inserted) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Duplicate set: {}", toString(key, {})); + + return from_subquery; +} + +FutureSetPtr PreparedSets::findTuple(const Hash & key, const DataTypes & types) const +{ + auto it = sets_from_tuple.find(key); + if (it == sets_from_tuple.end()) + return nullptr; + + for (const auto & set : it->second) + if (equals(set->getTypes(), types)) + return set; + + return nullptr; +} + +std::shared_ptr PreparedSets::findSubquery(const Hash & key) const +{ + auto it = sets_from_subqueries.find(key); + if (it == sets_from_subqueries.end()) + return nullptr; + return it->second; } -SetPtr PreparedSets::get(const PreparedSetKey & key) const +std::shared_ptr PreparedSets::findStorage(const Hash & key) const { - auto it = sets.find(key); - if (it == sets.end() || !it->second.isReady()) + auto it = sets_from_storage.find(key); + if (it == sets_from_storage.end()) return nullptr; - return it->second.get(); + + return it->second; } -std::vector PreparedSets::getByTreeHash(IAST::Hash ast_hash) const +PreparedSets::Subqueries PreparedSets::getSubqueries() { - std::vector res; - for (const auto & it : this->sets) - { - if (it.first.ast_hash == ast_hash) - res.push_back(it.second); - } + PreparedSets::Subqueries res; + res.reserve(sets_from_subqueries.size()); + for (auto & [_, set] : sets_from_subqueries) + res.push_back(set); + return res; } -PreparedSets::SubqueriesForSets PreparedSets::detachSubqueries() -{ - auto res = std::move(subqueries); - subqueries = SubqueriesForSets(); - return res; -} - -bool PreparedSets::empty() const { return sets.empty(); } - -void SubqueryForSet::createSource(InterpreterSelectWithUnionQuery & interpreter, StoragePtr table_) -{ - source = std::make_unique(); - interpreter.buildQueryPlan(*source); - if (table_) - table = table_; -} - -bool SubqueryForSet::hasSource() const -{ - return source != nullptr; -} - -QueryPlanPtr SubqueryForSet::detachSource() -{ - auto res = std::move(source); - source = nullptr; - return res; -} - - -FutureSet::FutureSet(SetPtr set) -{ - std::promise promise; - promise.set_value(set); - *this = FutureSet(promise.get_future()); -} - - -bool FutureSet::isReady() const -{ - return future_set.valid() && - future_set.wait_for(std::chrono::seconds(0)) == std::future_status::ready; -} - -bool FutureSet::isCreated() const -{ - return isReady() && get() != nullptr && get()->isCreated(); -} - - std::variant, SharedSet> PreparedSetsCache::findOrPromiseToBuild(const String & key) { std::lock_guard lock(cache_mutex); diff --git a/src/Interpreters/PreparedSets.h b/src/Interpreters/PreparedSets.h index 4a7d1c3de46..cb240f5260a 100644 --- a/src/Interpreters/PreparedSets.h +++ b/src/Interpreters/PreparedSets.h @@ -2,14 +2,13 @@ #include #include -#include #include #include #include -#include +#include #include -#include -#include +#include +#include namespace DB { @@ -18,121 +17,158 @@ class QueryPlan; class Set; using SetPtr = std::shared_ptr; -class InterpreterSelectWithUnionQuery; +struct SetKeyColumns; + +class IQueryTreeNode; +using QueryTreeNodePtr = std::shared_ptr; + +struct Settings; + +/// This is a structure for prepared sets cache. +/// SetPtr can be taken from cache, so we should pass holder for it. +struct SetAndKey +{ + String key; + SetPtr set; +}; + +using SetAndKeyPtr = std::shared_ptr; /// Represents a set in a query that might be referenced at analysis time and built later during execution. /// Also it can represent a constant set that is ready to use. /// At analysis stage the FutureSets are created but not necessarily filled. Then for non-constant sets there /// must be an explicit step to build them before they can be used. -/// FutureSet objects can be stored in PreparedSets and are not intended to be used from multiple threads. -class FutureSet final +/// Set may be useful for indexes, in this case special ordered set with stored elements is build inplace. +class FutureSet { public: - FutureSet() = default; + virtual ~FutureSet() = default; - /// Create FutureSet from an object that will be created in the future. - explicit FutureSet(const std::shared_future & future_set_) : future_set(future_set_) {} + /// Returns set if set is ready (created and filled) or nullptr if not. + virtual SetPtr get() const = 0; + /// Returns set->getElementsTypes(), even if set is not created yet. + virtual const DataTypes & getTypes() const = 0; + /// If possible, return set with stored elements useful for PK analysis. + virtual SetPtr buildOrderedSetInplace(const ContextPtr & context) = 0; +}; - /// Create FutureSet from a ready set. - explicit FutureSet(SetPtr readySet); +using FutureSetPtr = std::shared_ptr; - /// The set object will be ready in the future, as opposed to 'null' object when FutureSet is default constructed. - bool isValid() const { return future_set.valid(); } +/// Future set from already filled set. +/// Usually it is from StorageSet. +class FutureSetFromStorage final : public FutureSet +{ +public: + FutureSetFromStorage(SetPtr set_); - /// The the value of SetPtr is ready, but the set object might not have been filled yet. - bool isReady() const; - - /// The set object is ready and filled. - bool isCreated() const; - - SetPtr get() const { chassert(isReady()); return future_set.get(); } + SetPtr get() const override; + const DataTypes & getTypes() const override; + SetPtr buildOrderedSetInplace(const ContextPtr &) override; private: - std::shared_future future_set; + SetPtr set; }; -/// Information on how to build set for the [GLOBAL] IN section. -class SubqueryForSet +/// Set from tuple is filled as well as set from storage. +/// Additionally, it can be converted to set useful for PK. +class FutureSetFromTuple final : public FutureSet { public: + FutureSetFromTuple(Block block, const Settings & settings); - void createSource(InterpreterSelectWithUnionQuery & interpreter, StoragePtr table_ = nullptr); + SetPtr get() const override { return set; } + SetPtr buildOrderedSetInplace(const ContextPtr & context) override; - bool hasSource() const; + const DataTypes & getTypes() const override; - /// Returns query plan for the set's source - /// and removes it from SubqueryForSet because we need to build it only once. - std::unique_ptr detachSource(); - - /// Build this set from the result of the subquery. - String key; - SetPtr set_in_progress; - /// After set_in_progress is finished it will be put into promise_to_fill_set and thus all FutureSet's - /// that are referencing this set will be filled. - std::promise promise_to_fill_set; - FutureSet set = FutureSet{promise_to_fill_set.get_future()}; - - /// If set, put the result into the table. - /// This is a temporary table for transferring to remote servers for distributed query processing. - StoragePtr table; - - /// The source is obtained using the InterpreterSelectQuery subquery. - std::unique_ptr source; +private: + SetPtr set; + SetKeyColumns set_key_columns; }; -struct PreparedSetKey +/// Set from subquery can be built inplace for PK or in CreatingSet step. +/// If use_index_for_in_with_subqueries_max_values is reached, set for PK won't be created, +/// but ordinary set would be created instead. +class FutureSetFromSubquery final : public FutureSet { - /// Prepared sets for tuple literals are indexed by the hash of the tree contents and by the desired - /// data types of set elements (two different Sets can be required for two tuples with the same contents - /// if left hand sides of the IN operators have different types). - static PreparedSetKey forLiteral(const IAST & ast, DataTypes types_); +public: + FutureSetFromSubquery( + String key, + std::unique_ptr source_, + StoragePtr external_table_, + FutureSetPtr external_table_set_, + const Settings & settings); - /// Prepared sets for subqueries are indexed only by the AST contents because the type of the resulting - /// set is fully determined by the subquery. - static PreparedSetKey forSubquery(const IAST & ast); + FutureSetFromSubquery( + String key, + QueryTreeNodePtr query_tree_, + const Settings & settings); - IAST::Hash ast_hash; - DataTypes types; /// Empty for subqueries. + SetPtr get() const override; + const DataTypes & getTypes() const override; + SetPtr buildOrderedSetInplace(const ContextPtr & context) override; - bool operator==(const PreparedSetKey & other) const; + std::unique_ptr build(const ContextPtr & context); - String toString() const; + QueryTreeNodePtr detachQueryTree() { return std::move(query_tree); } + void setQueryPlan(std::unique_ptr source_); - struct Hash - { - UInt64 operator()(const PreparedSetKey & key) const { return key.ast_hash.first; } - }; +private: + SetAndKeyPtr set_and_key; + StoragePtr external_table; + FutureSetPtr external_table_set; + + std::unique_ptr source; + QueryTreeNodePtr query_tree; }; +/// Container for all the sets used in query. class PreparedSets { public: - using SubqueriesForSets = std::unordered_map; - SubqueryForSet & createOrGetSubquery(const String & subquery_id, const PreparedSetKey & key, - SizeLimits set_size_limit, bool transform_null_in); - SubqueryForSet & getSubquery(const String & subquery_id); + using Hash = std::pair; + struct Hashing + { + UInt64 operator()(const Hash & key) const { return key.first ^ key.second; } + }; - void set(const PreparedSetKey & key, SetPtr set_); - FutureSet getFuture(const PreparedSetKey & key) const; - SetPtr get(const PreparedSetKey & key) const; + using SetsFromTuple = std::unordered_map>, Hashing>; + using SetsFromStorage = std::unordered_map, Hashing>; + using SetsFromSubqueries = std::unordered_map, Hashing>; - /// Get subqueries and clear them. - /// We need to build a plan for subqueries just once. That's why we can clear them after accessing them. - /// SetPtr would still be available for consumers of PreparedSets. - SubqueriesForSets detachSubqueries(); + FutureSetPtr addFromStorage(const Hash & key, SetPtr set_); + FutureSetPtr addFromTuple(const Hash & key, Block block, const Settings & settings); - /// Returns all sets that match the given ast hash not checking types - /// Used in KeyCondition and MergeTreeIndexConditionBloomFilter to make non exact match for types in PreparedSetKey - std::vector getByTreeHash(IAST::Hash ast_hash) const; + FutureSetPtr addFromSubquery( + const Hash & key, + std::unique_ptr source, + StoragePtr external_table, + FutureSetPtr external_table_set, + const Settings & settings); - bool empty() const; + FutureSetPtr addFromSubquery( + const Hash & key, + QueryTreeNodePtr query_tree, + const Settings & settings); + + FutureSetPtr findTuple(const Hash & key, const DataTypes & types) const; + std::shared_ptr findStorage(const Hash & key) const; + std::shared_ptr findSubquery(const Hash & key) const; + + using Subqueries = std::vector>; + Subqueries getSubqueries(); + + const SetsFromTuple & getSetsFromTuple() const { return sets_from_tuple; } + // const SetsFromStorage & getSetsFromStorage() const { return sets_from_storage; } + // const SetsFromSubqueries & getSetsFromSubquery() const { return sets_from_subqueries; } + + static String toString(const Hash & key, const DataTypes & types); private: - std::unordered_map sets; - - /// This is the information required for building sets - SubqueriesForSets subqueries; + SetsFromTuple sets_from_tuple; + SetsFromStorage sets_from_storage; + SetsFromSubqueries sets_from_subqueries; }; using PreparedSetsPtr = std::shared_ptr; diff --git a/src/Interpreters/ProcessList.cpp b/src/Interpreters/ProcessList.cpp index aca474bf152..daa8d434ab6 100644 --- a/src/Interpreters/ProcessList.cpp +++ b/src/Interpreters/ProcessList.cpp @@ -246,6 +246,7 @@ ProcessList::insert(const String & query_, const IAST * ast, ContextMutablePtr q priorities.insert(static_cast(settings.priority)), std::move(thread_group), query_kind, + settings, watch_start_nanoseconds)); increaseQueryKindAmount(query_kind); @@ -342,6 +343,7 @@ QueryStatus::QueryStatus( QueryPriorities::Handle && priority_handle_, ThreadGroupPtr && thread_group_, IAST::QueryKind query_kind_, + const Settings & query_settings_, UInt64 watch_start_nanoseconds) : WithContext(context_) , query(query_) @@ -353,9 +355,11 @@ QueryStatus::QueryStatus( , query_kind(query_kind_) , num_queries_increment(CurrentMetrics::Query) { - auto settings = getContext()->getSettings(); - limits.max_execution_time = settings.max_execution_time; - overflow_mode = settings.timeout_overflow_mode; + /// We have to pass `query_settings_` to this constructor because we can't use `context_->getSettings().max_execution_time` here: + /// a QueryStatus is created with `ProcessList::mutex` locked (see ProcessList::insert) and calling `context_->getSettings()` + /// would lock the context's lock too, whereas holding two those locks simultaneously is not good. + limits.max_execution_time = query_settings_.max_execution_time; + overflow_mode = query_settings_.timeout_overflow_mode; } QueryStatus::~QueryStatus() @@ -589,10 +593,13 @@ QueryStatusInfo QueryStatus::getInfo(bool get_thread_list, bool get_profile_even res.profile_counters = std::make_shared(thread_group->performance_counters.getPartiallyAtomicSnapshot()); } - if (get_settings && getContext()) + if (get_settings) { - res.query_settings = std::make_shared(getContext()->getSettings()); - res.current_database = getContext()->getCurrentDatabase(); + if (auto ctx = context.lock()) + { + res.query_settings = std::make_shared(ctx->getSettings()); + res.current_database = ctx->getCurrentDatabase(); + } } return res; @@ -601,12 +608,18 @@ QueryStatusInfo QueryStatus::getInfo(bool get_thread_list, bool get_profile_even ProcessList::Info ProcessList::getInfo(bool get_thread_list, bool get_profile_events, bool get_settings) const { + /// We have to copy `processes` first because `process->getInfo()` below can access the context to get the query settings, + /// and it's better not to keep the process list's lock while doing that. + std::vector processes_copy; + + { + auto lock = safeLock(); + processes_copy.assign(processes.begin(), processes.end()); + } + Info per_query_infos; - - auto lock = safeLock(); - - per_query_infos.reserve(processes.size()); - for (const auto & process : processes) + per_query_infos.reserve(processes_copy.size()); + for (const auto & process : processes_copy) per_query_infos.emplace_back(process->getInfo(get_thread_list, get_profile_events, get_settings)); return per_query_infos; diff --git a/src/Interpreters/ProcessList.h b/src/Interpreters/ProcessList.h index b593bcef395..e5a61497ff2 100644 --- a/src/Interpreters/ProcessList.h +++ b/src/Interpreters/ProcessList.h @@ -164,6 +164,7 @@ public: QueryPriorities::Handle && priority_handle_, ThreadGroupPtr && thread_group_, IAST::QueryKind query_kind_, + const Settings & query_settings_, UInt64 watch_start_nanoseconds); ~QueryStatus(); diff --git a/src/Interpreters/Set.cpp b/src/Interpreters/Set.cpp index a7bea63bd99..b8b61c7c11f 100644 --- a/src/Interpreters/Set.cpp +++ b/src/Interpreters/Set.cpp @@ -103,6 +103,21 @@ void NO_INLINE Set::insertFromBlockImplCase( } +DataTypes Set::getElementTypes(DataTypes types, bool transform_null_in) +{ + for (auto & type : types) + { + if (const auto * low_cardinality_type = typeid_cast(type.get())) + type = low_cardinality_type->getDictionaryType(); + + if (!transform_null_in) + type = removeNullable(type); + } + + return types; +} + + void Set::setHeader(const ColumnsWithTypeAndName & header) { std::lock_guard lock(rwlock); @@ -152,46 +167,67 @@ void Set::setHeader(const ColumnsWithTypeAndName & header) extractNestedColumnsAndNullMap(key_columns, null_map); } - if (fill_set_elements) - { - /// Create empty columns with set values in advance. - /// It is needed because set may be empty, so method 'insertFromBlock' will be never called. - set_elements.reserve(keys_size); - for (const auto & type : set_elements_types) - set_elements.emplace_back(type->createColumn()); - } - /// Choose data structure to use for the set. data.init(data.chooseMethod(key_columns, key_sizes)); } +void Set::fillSetElements() +{ + fill_set_elements = true; + set_elements.reserve(keys_size); + for (const auto & type : set_elements_types) + set_elements.emplace_back(type->createColumn()); +} + bool Set::insertFromBlock(const ColumnsWithTypeAndName & columns) { Columns cols; cols.reserve(columns.size()); for (const auto & column : columns) cols.emplace_back(column.column); - return insertFromBlock(cols); + return insertFromColumns(cols); } -bool Set::insertFromBlock(const Columns & columns) +bool Set::insertFromColumns(const Columns & columns) +{ + size_t rows = columns.at(0)->size(); + + SetKeyColumns holder; + /// Filter to extract distinct values from the block. + if (fill_set_elements) + holder.filter = ColumnUInt8::create(rows); + + bool inserted = insertFromColumns(columns, holder); + if (inserted && fill_set_elements) + { + if (max_elements_to_fill && max_elements_to_fill < data.getTotalRowCount()) + { + /// Drop filled elementes + fill_set_elements = false; + set_elements.clear(); + } + else + appendSetElements(holder); + } + + return inserted; +} + +bool Set::insertFromColumns(const Columns & columns, SetKeyColumns & holder) { std::lock_guard lock(rwlock); if (data.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Method Set::setHeader must be called before Set::insertFromBlock"); - ColumnRawPtrs key_columns; - key_columns.reserve(keys_size); - - /// The constant columns to the right of IN are not supported directly. For this, they first materialize. - Columns materialized_columns; + holder.key_columns.reserve(keys_size); + holder.materialized_columns.reserve(keys_size); /// Remember the columns we will work with for (size_t i = 0; i < keys_size; ++i) { - materialized_columns.emplace_back(columns.at(i)->convertToFullIfNeeded()); - key_columns.emplace_back(materialized_columns.back().get()); + holder.materialized_columns.emplace_back(columns.at(i)->convertToFullIfNeeded()); + holder.key_columns.emplace_back(holder.materialized_columns.back().get()); } size_t rows = columns.at(0)->size(); @@ -200,12 +236,7 @@ bool Set::insertFromBlock(const Columns & columns) ConstNullMapPtr null_map{}; ColumnPtr null_map_holder; if (!transform_null_in) - null_map_holder = extractNestedColumnsAndNullMap(key_columns, null_map); - - /// Filter to extract distinct values from the block. - ColumnUInt8::MutablePtr filter; - if (fill_set_elements) - filter = ColumnUInt8::create(rows); + null_map_holder = extractNestedColumnsAndNullMap(holder.key_columns, null_map); switch (data.type) { @@ -213,29 +244,34 @@ bool Set::insertFromBlock(const Columns & columns) break; #define M(NAME) \ case SetVariants::Type::NAME: \ - insertFromBlockImpl(*data.NAME, key_columns, rows, data, null_map, filter ? &filter->getData() : nullptr); \ + insertFromBlockImpl(*data.NAME, holder.key_columns, rows, data, null_map, holder.filter ? &holder.filter->getData() : nullptr); \ break; APPLY_FOR_SET_VARIANTS(M) #undef M } - if (fill_set_elements) - { - for (size_t i = 0; i < keys_size; ++i) - { - auto filtered_column = key_columns[i]->filter(filter->getData(), rows); - if (set_elements[i]->empty()) - set_elements[i] = filtered_column; - else - set_elements[i]->insertRangeFrom(*filtered_column, 0, filtered_column->size()); - if (transform_null_in && null_map_holder) - set_elements[i]->insert(Null{}); - } - } - return limits.check(data.getTotalRowCount(), data.getTotalByteCount(), "IN-set", ErrorCodes::SET_SIZE_LIMIT_EXCEEDED); } +void Set::appendSetElements(SetKeyColumns & holder) +{ + if (holder.key_columns.size() != keys_size || set_elements.size() != keys_size) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid number of key columns for set. Expected {} got {} and {}", + keys_size, holder.key_columns.size(), set_elements.size()); + + size_t rows = holder.key_columns.at(0)->size(); + for (size_t i = 0; i < keys_size; ++i) + { + auto filtered_column = holder.key_columns[i]->filter(holder.filter->getData(), rows); + if (set_elements[i]->empty()) + set_elements[i] = filtered_column; + else + set_elements[i]->insertRangeFrom(*filtered_column, 0, filtered_column->size()); + if (transform_null_in && holder.null_map_holder) + set_elements[i]->insert(Null{}); + } +} + void Set::checkIsCreated() const { if (!is_created.load()) @@ -429,6 +465,11 @@ void Set::checkTypesEqual(size_t set_type_idx, const DataTypePtr & other_type) c MergeTreeSetIndex::MergeTreeSetIndex(const Columns & set_elements, std::vector && indexes_mapping_) : has_all_keys(set_elements.size() == indexes_mapping_.size()), indexes_mapping(std::move(indexes_mapping_)) { + // std::cerr << "MergeTreeSetIndex::MergeTreeSetIndex " + // << set_elements.size() << ' ' << indexes_mapping.size() << std::endl; + // for (const auto & vv : indexes_mapping) + // std::cerr << vv.key_index << ' ' << vv.tuple_index << std::endl; + ::sort(indexes_mapping.begin(), indexes_mapping.end(), [](const KeyTuplePositionMapping & l, const KeyTuplePositionMapping & r) { @@ -471,6 +512,7 @@ MergeTreeSetIndex::MergeTreeSetIndex(const Columns & set_elements, std::vector & key_ranges, const DataTypes & data_types, bool single_point) const { size_t tuple_size = indexes_mapping.size(); + // std::cerr << "MergeTreeSetIndex::checkInRange " << single_point << ' ' << tuple_size << ' ' << has_all_keys << std::endl; FieldValues left_point; FieldValues right_point; diff --git a/src/Interpreters/Set.h b/src/Interpreters/Set.h index fff5fa4e1b1..9ea46e117ef 100644 --- a/src/Interpreters/Set.h +++ b/src/Interpreters/Set.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -30,9 +31,9 @@ public: /// (that is useful only for checking that some value is in the set and may not store the original values), /// store all set elements in explicit form. /// This is needed for subsequent use for index. - Set(const SizeLimits & limits_, bool fill_set_elements_, bool transform_null_in_) + Set(const SizeLimits & limits_, size_t max_elements_to_fill_, bool transform_null_in_) : log(&Poco::Logger::get("Set")), - limits(limits_), fill_set_elements(fill_set_elements_), transform_null_in(transform_null_in_) + limits(limits_), max_elements_to_fill(max_elements_to_fill_), transform_null_in(transform_null_in_) { } @@ -45,9 +46,13 @@ public: void setHeader(const ColumnsWithTypeAndName & header); /// Returns false, if some limit was exceeded and no need to insert more data. - bool insertFromBlock(const Columns & columns); + bool insertFromColumns(const Columns & columns); bool insertFromBlock(const ColumnsWithTypeAndName & columns); + void fillSetElements(); + bool insertFromColumns(const Columns & columns, SetKeyColumns & holder); + void appendSetElements(SetKeyColumns & holder); + /// Call after all blocks were inserted. To get the information that set is already created. void finishInsert() { is_created = true; } @@ -68,13 +73,15 @@ public: const DataTypes & getDataTypes() const { return data_types; } const DataTypes & getElementsTypes() const { return set_elements_types; } - bool hasExplicitSetElements() const { return fill_set_elements; } + bool hasExplicitSetElements() const { return fill_set_elements || (!set_elements.empty() && set_elements.front()->size() == data.getTotalRowCount()); } Columns getSetElements() const { checkIsCreated(); return { set_elements.begin(), set_elements.end() }; } void checkColumnsNumber(size_t num_key_columns) const; bool areTypesEqual(size_t set_type_idx, const DataTypePtr & other_type) const; void checkTypesEqual(size_t set_type_idx, const DataTypePtr & other_type) const; + static DataTypes getElementTypes(DataTypes types, bool transform_null_in); + private: size_t keys_size = 0; Sizes key_sizes; @@ -110,7 +117,8 @@ private: SizeLimits limits; /// Do we need to additionally store all elements of the set in explicit form for subsequent use for index. - bool fill_set_elements; + bool fill_set_elements = false; + size_t max_elements_to_fill; /// If true, insert NULL values to set. bool transform_null_in; diff --git a/src/Interpreters/SetKeys.h b/src/Interpreters/SetKeys.h new file mode 100644 index 00000000000..2cab9849c9b --- /dev/null +++ b/src/Interpreters/SetKeys.h @@ -0,0 +1,18 @@ +#pragma once +#include + +namespace DB +{ + +/// Prepared key columns for set which can be added to fill set elements. +/// Used only to upgrade set from tuple. +struct SetKeyColumns +{ + /// The constant columns to the right of IN are not supported directly. For this, they first materialize. + ColumnRawPtrs key_columns; + Columns materialized_columns; + ColumnPtr null_map_holder; + ColumnUInt8::MutablePtr filter; +}; + +} diff --git a/src/Interpreters/TreeOptimizer.cpp b/src/Interpreters/TreeOptimizer.cpp index fd4d2c9d846..c38b3c79026 100644 --- a/src/Interpreters/TreeOptimizer.cpp +++ b/src/Interpreters/TreeOptimizer.cpp @@ -25,7 +25,6 @@ #include #include #include -#include #include #include @@ -678,21 +677,6 @@ void optimizeInjectiveFunctionsInsideUniq(ASTPtr & query, ContextPtr context) RemoveInjectiveFunctionsVisitor(data).visit(query); } -void optimizeDateFilters(ASTSelectQuery * select_query, const std::vector & tables_with_columns, ContextPtr context) -{ - /// Predicates in HAVING clause has been moved to WHERE clause. - if (select_query->where()) - { - OptimizeDateOrDateTimeConverterWithPreimageVisitor::Data data{tables_with_columns, context}; - OptimizeDateOrDateTimeConverterWithPreimageVisitor(data).visit(select_query->refWhere()); - } - if (select_query->prewhere()) - { - OptimizeDateOrDateTimeConverterWithPreimageVisitor::Data data{tables_with_columns, context}; - OptimizeDateOrDateTimeConverterWithPreimageVisitor(data).visit(select_query->refPrewhere()); - } -} - void transformIfStringsIntoEnum(ASTPtr & query) { std::unordered_set function_names = {"if", "transform"}; @@ -796,9 +780,6 @@ void TreeOptimizer::apply(ASTPtr & query, TreeRewriterResult & result, tables_with_columns, result.storage_snapshot->metadata, result.storage); } - /// Rewrite date filters to avoid the calls of converters such as toYear, toYYYYMM, etc. - optimizeDateFilters(select_query, tables_with_columns, context); - /// GROUP BY injective function elimination. optimizeGroupBy(select_query, context); diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index de2e2b9ad92..c52dab722c9 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -14,7 +15,6 @@ #include #include #include -#include #include #include @@ -645,139 +645,133 @@ static std::tuple executeQueryImpl( } } - bool can_use_query_cache = settings.use_query_cache && !internal && !ast->as(); + QueryCachePtr query_cache = context->getQueryCache(); + const bool can_use_query_cache = query_cache != nullptr && settings.use_query_cache && !internal && (ast->as() || ast->as()); + bool write_into_query_cache = false; if (!async_insert) { - /// We need to start the (implicit) transaction before getting the interpreter as this will get links to the latest snapshots - if (!context->getCurrentTransaction() && settings.implicit_transaction && !ast->as()) + /// If it is a non-internal SELECT, and passive/read use of the query cache is enabled, and the cache knows the query, then set + /// a pipeline with a source populated by the query cache. + auto get_result_from_query_cache = [&]() { - try + if (can_use_query_cache && settings.enable_reads_from_query_cache) { - if (context->isGlobalContext()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot create transactions"); - - execute_implicit_tcl_query(context, ASTTransactionControl::BEGIN); - } - catch (Exception & e) - { - e.addMessage("while starting a transaction with 'implicit_transaction'"); - throw; - } - } - - interpreter = InterpreterFactory::get(ast, context, SelectQueryOptions(stage).setInternal(internal)); - - const auto & query_settings = context->getSettingsRef(); - if (context->getCurrentTransaction() && query_settings.throw_on_unsupported_query_inside_transaction) - { - if (!interpreter->supportsTransactions()) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Transactions are not supported for this type of query ({})", ast->getID()); - - } - - if (!interpreter->ignoreQuota() && !quota_checked) - { - quota = context->getQuota(); - if (quota) - { - if (ast->as() || ast->as()) - { - quota->used(QuotaType::QUERY_SELECTS, 1); - } - else if (ast->as()) - { - quota->used(QuotaType::QUERY_INSERTS, 1); - } - quota->used(QuotaType::QUERIES, 1); - quota->checkExceeded(QuotaType::ERRORS); - } - } - - if (!interpreter->ignoreLimits()) - { - limits.mode = LimitsMode::LIMITS_CURRENT; - limits.size_limits = SizeLimits(settings.max_result_rows, settings.max_result_bytes, settings.result_overflow_mode); - } - - if (auto * insert_interpreter = typeid_cast(&*interpreter)) - { - /// Save insertion table (not table function). TODO: support remote() table function. - auto table_id = insert_interpreter->getDatabaseTable(); - if (!table_id.empty()) - context->setInsertionTable(std::move(table_id)); - - if (insert_data_buffer_holder) - insert_interpreter->addBuffer(std::move(insert_data_buffer_holder)); - } - - { - std::unique_ptr span; - if (OpenTelemetry::CurrentContext().isTraceEnabled()) - { - auto * raw_interpreter_ptr = interpreter.get(); - String class_name(demangle(typeid(*raw_interpreter_ptr).name())); - span = std::make_unique(class_name + "::execute()"); - } - - res = interpreter->execute(); - - /// If - /// - it is a SELECT query, - /// - passive (read) use of the query cache is enabled, and - /// - the query cache knows the query result - /// then replace the pipeline by a new pipeline with a single source that is populated from the query cache - auto query_cache = context->getQueryCache(); - bool read_result_from_query_cache = false; /// a query must not read from *and* write to the query cache at the same time - if (query_cache != nullptr - && (can_use_query_cache && settings.enable_reads_from_query_cache) - && res.pipeline.pulling()) - { - QueryCache::Key key( - ast, res.pipeline.getHeader(), - context->getUserName(), /*dummy for is_shared*/ false, - /*dummy value for expires_at*/ std::chrono::system_clock::from_time_t(1), - /*dummy value for is_compressed*/ false); + QueryCache::Key key(ast, context->getUserName()); QueryCache::Reader reader = query_cache->createReader(key); if (reader.hasCacheEntryForKey()) { QueryPipeline pipeline; pipeline.readFromQueryCache(reader.getSource(), reader.getSourceTotals(), reader.getSourceExtremes()); res.pipeline = std::move(pipeline); - read_result_from_query_cache = true; + return true; } } + return false; + }; - /// If - /// - it is a SELECT query, and - /// - active (write) use of the query cache is enabled - /// then add a processor on top of the pipeline which stores the result in the query cache. - if (!read_result_from_query_cache - && query_cache != nullptr - && can_use_query_cache && settings.enable_writes_to_query_cache - && res.pipeline.pulling() - && (!astContainsNonDeterministicFunctions(ast, context) || settings.query_cache_store_results_of_queries_with_nondeterministic_functions)) + if (!get_result_from_query_cache()) + { + /// We need to start the (implicit) transaction before getting the interpreter as this will get links to the latest snapshots + if (!context->getCurrentTransaction() && settings.implicit_transaction && !ast->as()) { - QueryCache::Key key( - ast, res.pipeline.getHeader(), - context->getUserName(), settings.query_cache_share_between_users, - std::chrono::system_clock::now() + std::chrono::seconds(settings.query_cache_ttl), - settings.query_cache_compress_entries); - - const size_t num_query_runs = query_cache->recordQueryRun(key); - if (num_query_runs > settings.query_cache_min_query_runs) + try { - auto query_cache_writer = std::make_shared(query_cache->createWriter( - key, - std::chrono::milliseconds(settings.query_cache_min_query_duration.totalMilliseconds()), - settings.query_cache_squash_partial_results, - settings.max_block_size, - settings.query_cache_max_size_in_bytes, - settings.query_cache_max_entries)); - res.pipeline.writeResultIntoQueryCache(query_cache_writer); + if (context->isGlobalContext()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Global context cannot create transactions"); + + execute_implicit_tcl_query(context, ASTTransactionControl::BEGIN); + } + catch (Exception & e) + { + e.addMessage("while starting a transaction with 'implicit_transaction'"); + throw; } } + interpreter = InterpreterFactory::get(ast, context, SelectQueryOptions(stage).setInternal(internal)); + + const auto & query_settings = context->getSettingsRef(); + if (context->getCurrentTransaction() && query_settings.throw_on_unsupported_query_inside_transaction) + { + if (!interpreter->supportsTransactions()) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Transactions are not supported for this type of query ({})", ast->getID()); + + } + + if (!interpreter->ignoreQuota() && !quota_checked) + { + quota = context->getQuota(); + if (quota) + { + if (ast->as() || ast->as()) + { + quota->used(QuotaType::QUERY_SELECTS, 1); + } + else if (ast->as()) + { + quota->used(QuotaType::QUERY_INSERTS, 1); + } + quota->used(QuotaType::QUERIES, 1); + quota->checkExceeded(QuotaType::ERRORS); + } + } + + if (!interpreter->ignoreLimits()) + { + limits.mode = LimitsMode::LIMITS_CURRENT; + limits.size_limits = SizeLimits(settings.max_result_rows, settings.max_result_bytes, settings.result_overflow_mode); + } + + if (auto * insert_interpreter = typeid_cast(&*interpreter)) + { + /// Save insertion table (not table function). TODO: support remote() table function. + auto table_id = insert_interpreter->getDatabaseTable(); + if (!table_id.empty()) + context->setInsertionTable(std::move(table_id)); + + if (insert_data_buffer_holder) + insert_interpreter->addBuffer(std::move(insert_data_buffer_holder)); + } + + { + std::unique_ptr span; + if (OpenTelemetry::CurrentContext().isTraceEnabled()) + { + auto * raw_interpreter_ptr = interpreter.get(); + String class_name(demangle(typeid(*raw_interpreter_ptr).name())); + span = std::make_unique(class_name + "::execute()"); + } + + res = interpreter->execute(); + + /// If it is a non-internal SELECT query, and active/write use of the query cache is enabled, then add a processor on + /// top of the pipeline which stores the result in the query cache. + if (can_use_query_cache && settings.enable_writes_to_query_cache + && (!astContainsNonDeterministicFunctions(ast, context) || settings.query_cache_store_results_of_queries_with_nondeterministic_functions)) + { + QueryCache::Key key( + ast, res.pipeline.getHeader(), + context->getUserName(), settings.query_cache_share_between_users, + std::chrono::system_clock::now() + std::chrono::seconds(settings.query_cache_ttl), + settings.query_cache_compress_entries); + + const size_t num_query_runs = query_cache->recordQueryRun(key); + if (num_query_runs > settings.query_cache_min_query_runs) + { + auto query_cache_writer = std::make_shared(query_cache->createWriter( + key, + std::chrono::milliseconds(settings.query_cache_min_query_duration.totalMilliseconds()), + settings.query_cache_squash_partial_results, + settings.max_block_size, + settings.query_cache_max_size_in_bytes, + settings.query_cache_max_entries)); + res.pipeline.writeResultIntoQueryCache(query_cache_writer); + write_into_query_cache = true; + } + } + + } } } @@ -930,9 +924,7 @@ static std::tuple executeQueryImpl( auto finish_callback = [elem, context, ast, - my_can_use_query_cache = can_use_query_cache, - enable_writes_to_query_cache = settings.enable_writes_to_query_cache, - query_cache_store_results_of_queries_with_nondeterministic_functions = settings.query_cache_store_results_of_queries_with_nondeterministic_functions, + write_into_query_cache, log_queries, log_queries_min_type = settings.log_queries_min_type, log_queries_min_query_duration_ms = settings.log_queries_min_query_duration_ms.totalMilliseconds(), @@ -943,16 +935,10 @@ static std::tuple executeQueryImpl( pulling_pipeline = pipeline.pulling(), query_span](QueryPipeline & query_pipeline) mutable { - /// If active (write) use of the query cache is enabled and the query is eligible for result caching, then store the query - /// result buffered in the special-purpose cache processor (added on top of the pipeline) into the cache. - auto query_cache = context->getQueryCache(); - if (query_cache != nullptr - && pulling_pipeline - && my_can_use_query_cache && enable_writes_to_query_cache - && (!astContainsNonDeterministicFunctions(ast, context) || query_cache_store_results_of_queries_with_nondeterministic_functions)) - { + if (write_into_query_cache) + /// Trigger the actual write of the buffered query result into the query cache. This is done explicitly to prevent + /// partial/garbage results in case of exceptions during query execution. query_pipeline.finalizeWriteInQueryCache(); - } QueryStatusPtr process_list_elem = context->getProcessListElement(); diff --git a/src/Parsers/ASTQueryWithOutput.cpp b/src/Parsers/ASTQueryWithOutput.cpp index 5f717715a69..4bf1e6cb231 100644 --- a/src/Parsers/ASTQueryWithOutput.cpp +++ b/src/Parsers/ASTQueryWithOutput.cpp @@ -39,6 +39,8 @@ void ASTQueryWithOutput::formatImpl(const FormatSettings & s, FormatState & stat s.ostr << (s.hilite ? hilite_keyword : ""); if (is_outfile_append) s.ostr << " APPEND"; + if (is_outfile_truncate) + s.ostr << " TRUNCATE"; if (is_into_outfile_with_stdout) s.ostr << " AND STDOUT"; s.ostr << (s.hilite ? hilite_none : ""); diff --git a/src/Parsers/ASTQueryWithOutput.h b/src/Parsers/ASTQueryWithOutput.h index 7db021405e7..6f9cafc89a9 100644 --- a/src/Parsers/ASTQueryWithOutput.h +++ b/src/Parsers/ASTQueryWithOutput.h @@ -17,6 +17,7 @@ public: ASTPtr out_file; bool is_into_outfile_with_stdout = false; bool is_outfile_append = false; + bool is_outfile_truncate = false; ASTPtr format; ASTPtr settings_ast; ASTPtr compression; diff --git a/src/Parsers/ParserQueryWithOutput.cpp b/src/Parsers/ParserQueryWithOutput.cpp index 6796f4528c4..2bfe7353be4 100644 --- a/src/Parsers/ParserQueryWithOutput.cpp +++ b/src/Parsers/ParserQueryWithOutput.cpp @@ -109,6 +109,12 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec query_with_output.is_outfile_append = true; } + ParserKeyword s_truncate("TRUNCATE"); + if (s_truncate.ignore(pos, expected)) + { + query_with_output.is_outfile_truncate = true; + } + ParserKeyword s_stdout("AND STDOUT"); if (s_stdout.ignore(pos, expected)) { diff --git a/src/Planner/CollectSets.cpp b/src/Planner/CollectSets.cpp index eb2b02c7ccb..8dd7c6637bf 100644 --- a/src/Planner/CollectSets.cpp +++ b/src/Planner/CollectSets.cpp @@ -8,9 +8,13 @@ #include #include #include +#include #include #include #include +#include +#include +#include namespace DB { @@ -41,11 +45,7 @@ public: auto in_second_argument_node_type = in_second_argument->getNodeType(); const auto & settings = planner_context.getQueryContext()->getSettingsRef(); - - String set_key = planner_context.createSetKey(in_second_argument); - - if (planner_context.hasSet(set_key)) - return; + auto & sets = planner_context.getPreparedSets(); /// Tables and table functions are replaced with subquery at Analysis stage, except special Set table. auto * second_argument_table = in_second_argument->as(); @@ -54,23 +54,69 @@ public: if (storage_set) { /// Handle storage_set as ready set. - planner_context.registerSet(set_key, PlannerSet(FutureSet(storage_set->getSet()))); + auto set_key = in_second_argument->getTreeHash(); + sets.addFromStorage(set_key, storage_set->getSet()); } else if (const auto * constant_node = in_second_argument->as()) { - auto set = makeSetForConstantValue( + auto set = getSetElementsForConstantValue( in_first_argument->getResultType(), constant_node->getValue(), constant_node->getResultType(), - settings); + settings.transform_null_in); - planner_context.registerSet(set_key, PlannerSet(FutureSet(std::move(set)))); + DataTypes set_element_types = {in_first_argument->getResultType()}; + const auto * left_tuple_type = typeid_cast(set_element_types.front().get()); + if (left_tuple_type && left_tuple_type->getElements().size() != 1) + set_element_types = left_tuple_type->getElements(); + + set_element_types = Set::getElementTypes(std::move(set_element_types), settings.transform_null_in); + auto set_key = in_second_argument->getTreeHash(); + + if (sets.findTuple(set_key, set_element_types)) + return; + + sets.addFromTuple(set_key, std::move(set), settings); } else if (in_second_argument_node_type == QueryTreeNodeType::QUERY || in_second_argument_node_type == QueryTreeNodeType::UNION || in_second_argument_node_type == QueryTreeNodeType::TABLE) { - planner_context.registerSet(set_key, PlannerSet(in_second_argument)); + auto set_key = in_second_argument->getTreeHash(); + if (sets.findSubquery(set_key)) + return; + + auto subquery_to_execute = in_second_argument; + + if (auto * table_node = in_second_argument->as()) + { + auto storage_snapshot = table_node->getStorageSnapshot(); + auto columns_to_select = storage_snapshot->getColumns(GetColumnsOptions(GetColumnsOptions::Ordinary)); + + size_t columns_to_select_size = columns_to_select.size(); + + auto column_nodes_to_select = std::make_shared(); + column_nodes_to_select->getNodes().reserve(columns_to_select_size); + + NamesAndTypes projection_columns; + projection_columns.reserve(columns_to_select_size); + + for (auto & column : columns_to_select) + { + column_nodes_to_select->getNodes().emplace_back(std::make_shared(column, subquery_to_execute)); + projection_columns.emplace_back(column.name, column.type); + } + + auto subquery_for_table = std::make_shared(Context::createCopy(planner_context.getQueryContext())); + subquery_for_table->setIsSubquery(true); + subquery_for_table->getProjectionNode() = std::move(column_nodes_to_select); + subquery_for_table->getJoinTree() = std::move(subquery_to_execute); + subquery_for_table->resolveProjectionColumns(std::move(projection_columns)); + + subquery_to_execute = std::move(subquery_for_table); + } + + sets.addFromSubquery(set_key, std::move(subquery_to_execute), settings); } else { diff --git a/src/Planner/CollectSets.h b/src/Planner/CollectSets.h index 94f792e877b..5f9f7a5a466 100644 --- a/src/Planner/CollectSets.h +++ b/src/Planner/CollectSets.h @@ -7,6 +7,8 @@ namespace DB { +struct SelectQueryOptions; + /** Collect prepared sets and sets for subqueries that are necessary to execute IN function and its variations. * Collected sets are registered in planner context. */ diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 5abf3ec5a80..9f6c22f90f3 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -3,11 +3,13 @@ #include #include #include +#include #include #include #include +#include #include #include @@ -894,79 +896,72 @@ void addOffsetStep(QueryPlan & query_plan, const QueryAnalysisResult & query_ana query_plan.addStep(std::move(offsets_step)); } -void addBuildSubqueriesForSetsStepIfNeeded(QueryPlan & query_plan, +void collectSetsFromActionsDAG(const ActionsDAGPtr & dag, std::unordered_set & useful_sets) +{ + for (const auto & node : dag->getNodes()) + { + if (node.column) + { + const IColumn * column = node.column.get(); + if (const auto * column_const = typeid_cast(column)) + column = &column_const->getDataColumn(); + + if (const auto * column_set = typeid_cast(column)) + useful_sets.insert(column_set->getData().get()); + } + + if (node.type == ActionsDAG::ActionType::FUNCTION && node.function_base->getName() == "indexHint") + { + ActionsDAG::NodeRawConstPtrs children; + if (const auto * adaptor = typeid_cast(node.function_base.get())) + { + if (const auto * index_hint = typeid_cast(adaptor->getFunction().get())) + { + collectSetsFromActionsDAG(index_hint->getActions(), useful_sets); + } + } + } + } +} + +void addBuildSubqueriesForSetsStepIfNeeded( + QueryPlan & query_plan, const SelectQueryOptions & select_query_options, const PlannerContextPtr & planner_context, const std::vector & result_actions_to_execute) { - PreparedSets::SubqueriesForSets subqueries_for_sets; + auto subqueries = planner_context->getPreparedSets().getSubqueries(); + std::unordered_set useful_sets; for (const auto & actions_to_execute : result_actions_to_execute) + collectSetsFromActionsDAG(actions_to_execute, useful_sets); + + auto predicate = [&useful_sets](const auto & set) { return !useful_sets.contains(set.get()); }; + auto it = std::remove_if(subqueries.begin(), subqueries.end(), std::move(predicate)); + subqueries.erase(it, subqueries.end()); + + for (auto & subquery : subqueries) { - for (const auto & node : actions_to_execute->getNodes()) - { - const auto & set_key = node.result_name; - auto * planner_set = planner_context->getSetOrNull(set_key); - if (!planner_set) - continue; + auto query_tree = subquery->detachQueryTree(); + auto subquery_options = select_query_options.subquery(); + Planner subquery_planner( + query_tree, + subquery_options, + planner_context->getGlobalPlannerContext()); + subquery_planner.buildQueryPlanIfNeeded(); - auto subquery_to_execute = planner_set->getSubqueryNode(); - - if (planner_set->getSet().isCreated() || !subquery_to_execute) - continue; - - if (auto * table_node = subquery_to_execute->as()) - { - auto storage_snapshot = table_node->getStorageSnapshot(); - auto columns_to_select = storage_snapshot->getColumns(GetColumnsOptions(GetColumnsOptions::Ordinary)); - - size_t columns_to_select_size = columns_to_select.size(); - - auto column_nodes_to_select = std::make_shared(); - column_nodes_to_select->getNodes().reserve(columns_to_select_size); - - NamesAndTypes projection_columns; - projection_columns.reserve(columns_to_select_size); - - for (auto & column : columns_to_select) - { - column_nodes_to_select->getNodes().emplace_back(std::make_shared(column, subquery_to_execute)); - projection_columns.emplace_back(column.name, column.type); - } - - auto subquery_for_table = std::make_shared(Context::createCopy(planner_context->getQueryContext())); - subquery_for_table->setIsSubquery(true); - subquery_for_table->getProjectionNode() = std::move(column_nodes_to_select); - subquery_for_table->getJoinTree() = std::move(subquery_to_execute); - subquery_for_table->resolveProjectionColumns(std::move(projection_columns)); - - subquery_to_execute = std::move(subquery_for_table); - } - - auto subquery_options = select_query_options.subquery(); - Planner subquery_planner( - subquery_to_execute, - subquery_options, - planner_context->getGlobalPlannerContext()); - subquery_planner.buildQueryPlanIfNeeded(); - - const auto & settings = planner_context->getQueryContext()->getSettingsRef(); - SizeLimits size_limits_for_set = {settings.max_rows_in_set, settings.max_bytes_in_set, settings.set_overflow_mode}; - bool tranform_null_in = settings.transform_null_in; - auto set = std::make_shared(size_limits_for_set, false /*fill_set_elements*/, tranform_null_in); - - SubqueryForSet subquery_for_set; - subquery_for_set.key = set_key; - subquery_for_set.set_in_progress = set; - subquery_for_set.set = planner_set->getSet(); - subquery_for_set.promise_to_fill_set = planner_set->extractPromiseToBuildSet(); - subquery_for_set.source = std::make_unique(std::move(subquery_planner).extractQueryPlan()); - - subqueries_for_sets.emplace(set_key, std::move(subquery_for_set)); - } + subquery->setQueryPlan(std::make_unique(std::move(subquery_planner).extractQueryPlan())); } - addCreatingSetsStep(query_plan, std::move(subqueries_for_sets), planner_context->getQueryContext()); + if (!subqueries.empty()) + { + auto step = std::make_unique( + query_plan.getCurrentDataStream(), + std::move(subqueries), + planner_context->getQueryContext()); + + query_plan.addStep(std::move(step)); + } } /// Support for `additional_result_filter` setting diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index c64d82299ca..7575828e64d 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -16,6 +16,8 @@ #include #include +#include +#include #include #include @@ -623,33 +625,67 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::makeSetForInFunction(const QueryTreeNodePtr & node) { const auto & function_node = node->as(); + auto in_first_argument = function_node.getArguments().getNodes().at(0); auto in_second_argument = function_node.getArguments().getNodes().at(1); - auto set_key = planner_context->createSetKey(in_second_argument); - const auto & planner_set = planner_context->getSetOrThrow(set_key); + //auto set_key = planner_context->createSetKey(in_second_argument); + + DataTypes set_element_types; + + auto in_second_argument_node_type = in_second_argument->getNodeType(); + + bool subquery_or_table = + in_second_argument_node_type == QueryTreeNodeType::QUERY || + in_second_argument_node_type == QueryTreeNodeType::UNION || + in_second_argument_node_type == QueryTreeNodeType::TABLE; + + FutureSetPtr set; + auto set_key = in_second_argument->getTreeHash(); + + if (!subquery_or_table) + { + set_element_types = {in_first_argument->getResultType()}; + const auto * left_tuple_type = typeid_cast(set_element_types.front().get()); + if (left_tuple_type && left_tuple_type->getElements().size() != 1) + set_element_types = left_tuple_type->getElements(); + + set_element_types = Set::getElementTypes(std::move(set_element_types), planner_context->getQueryContext()->getSettingsRef().transform_null_in); + set = planner_context->getPreparedSets().findTuple(set_key, set_element_types); + } + else + { + set = planner_context->getPreparedSets().findSubquery(set_key); + if (!set) + set = planner_context->getPreparedSets().findStorage(set_key); + } + + if (!set) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "No set is registered for key {}", + PreparedSets::toString(set_key, set_element_types)); ColumnWithTypeAndName column; - column.name = set_key; + column.name = planner_context->createSetKey(in_second_argument); column.type = std::make_shared(); - bool set_is_created = planner_set.getSet().isCreated(); - auto column_set = ColumnSet::create(1, planner_set.getSet()); + bool set_is_created = set->get() != nullptr; + auto column_set = ColumnSet::create(1, std::move(set)); if (set_is_created) column.column = ColumnConst::create(std::move(column_set), 1); else column.column = std::move(column_set); - actions_stack[0].addConstantIfNecessary(set_key, column); + actions_stack[0].addConstantIfNecessary(column.name, column); size_t actions_stack_size = actions_stack.size(); for (size_t i = 1; i < actions_stack_size; ++i) { auto & actions_stack_node = actions_stack[i]; - actions_stack_node.addInputConstantColumnIfNecessary(set_key, column); + actions_stack_node.addInputConstantColumnIfNecessary(column.name, column); } - return {set_key, 0}; + return {column.name, 0}; } PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::visitIndexHintFunction(const QueryTreeNodePtr & node) diff --git a/src/Planner/PlannerContext.cpp b/src/Planner/PlannerContext.cpp index 708dab04d02..3c75d4fbea8 100644 --- a/src/Planner/PlannerContext.cpp +++ b/src/Planner/PlannerContext.cpp @@ -118,50 +118,4 @@ PlannerContext::SetKey PlannerContext::createSetKey(const QueryTreeNodePtr & set return "__set_" + toString(set_source_hash.first) + '_' + toString(set_source_hash.second); } -void PlannerContext::registerSet(const SetKey & key, PlannerSet planner_set) -{ - if (!planner_set.getSet().isValid()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Set must be initialized"); - - const auto & subquery_node = planner_set.getSubqueryNode(); - if (subquery_node) - { - auto node_type = subquery_node->getNodeType(); - - if (node_type != QueryTreeNodeType::QUERY && - node_type != QueryTreeNodeType::UNION && - node_type != QueryTreeNodeType::TABLE) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Invalid node for set table expression. Expected query or union. Actual {}", - subquery_node->formatASTForErrorMessage()); - } - - set_key_to_set.emplace(key, std::move(planner_set)); -} - -bool PlannerContext::hasSet(const SetKey & key) const -{ - return set_key_to_set.contains(key); -} - -const PlannerSet & PlannerContext::getSetOrThrow(const SetKey & key) const -{ - auto it = set_key_to_set.find(key); - if (it == set_key_to_set.end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "No set is registered for key {}", - key); - - return it->second; -} - -PlannerSet * PlannerContext::getSetOrNull(const SetKey & key) -{ - auto it = set_key_to_set.find(key); - if (it == set_key_to_set.end()) - return nullptr; - - return &it->second; -} - } diff --git a/src/Planner/PlannerContext.h b/src/Planner/PlannerContext.h index 4199c863033..aceb313d4b5 100644 --- a/src/Planner/PlannerContext.h +++ b/src/Planner/PlannerContext.h @@ -44,52 +44,6 @@ private: using GlobalPlannerContextPtr = std::shared_ptr; -/** PlannerSet is wrapper around Set that is used during query planning. - * - * If subquery node is null, such set is already prepared for execution. - * - * If subquery node is not null, then set must be build from the result of the subquery. - * If subquery node is not null, it must have QUERY or UNION type. - */ -class PlannerSet -{ -public: - /// Construct planner set that is ready for execution - explicit PlannerSet(FutureSet set_) - : set(std::move(set_)) - {} - - /// Construct planner set with set and subquery node - explicit PlannerSet(QueryTreeNodePtr subquery_node_) - : set(promise_to_build_set.get_future()) - , subquery_node(std::move(subquery_node_)) - {} - - /// Get a reference to a set that might be not built yet - const FutureSet & getSet() const - { - return set; - } - - /// Get subquery node - const QueryTreeNodePtr & getSubqueryNode() const - { - return subquery_node; - } - - /// This promise will be fulfilled when set is built and all FutureSet objects will become ready - std::promise extractPromiseToBuildSet() - { - return std::move(promise_to_build_set); - } - -private: - std::promise promise_to_build_set; - FutureSet set; - - QueryTreeNodePtr subquery_node; -}; - class PlannerContext { public: @@ -177,28 +131,10 @@ public: using SetKey = std::string; - using SetKeyToSet = std::unordered_map; - /// Create set key for set source node static SetKey createSetKey(const QueryTreeNodePtr & set_source_node); - /// Register set for set key - void registerSet(const SetKey & key, PlannerSet planner_set); - - /// Returns true if set is registered for key, false otherwise - bool hasSet(const SetKey & key) const; - - /// Get set for key, if no set is registered logical exception is thrown - const PlannerSet & getSetOrThrow(const SetKey & key) const; - - /// Get set for key, if no set is registered null is returned - PlannerSet * getSetOrNull(const SetKey & key); - - /// Get registered sets - const SetKeyToSet & getRegisteredSets() const - { - return set_key_to_set; - } + PreparedSets & getPreparedSets() { return prepared_sets; } private: /// Query context @@ -214,8 +150,7 @@ private: std::unordered_map table_expression_node_to_data; /// Set key to set - SetKeyToSet set_key_to_set; - + PreparedSets prepared_sets; }; using PlannerContextPtr = std::shared_ptr; diff --git a/src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.cpp b/src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.cpp index fe362f64b96..c54d32c1385 100644 --- a/src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.cpp +++ b/src/Processors/QueryPlan/CreateSetAndFilterOnTheFlyStep.cpp @@ -105,7 +105,7 @@ CreateSetAndFilterOnTheFlyStep::CreateSetAndFilterOnTheFlyStep( : ITransformingStep(input_stream_, input_stream_.header, getTraits()) , column_names(column_names_) , max_rows_in_set(max_rows_in_set_) - , own_set(std::make_shared(SizeLimits(max_rows_in_set, 0, OverflowMode::BREAK), false, true)) + , own_set(std::make_shared(SizeLimits(max_rows_in_set, 0, OverflowMode::BREAK), 0, true)) , filtering_set(nullptr) , crosswise_connection(crosswise_connection_) , position(position_) diff --git a/src/Processors/QueryPlan/CreatingSetsStep.cpp b/src/Processors/QueryPlan/CreatingSetsStep.cpp index 459092c88ad..3e4dfb0c7d1 100644 --- a/src/Processors/QueryPlan/CreatingSetsStep.cpp +++ b/src/Processors/QueryPlan/CreatingSetsStep.cpp @@ -1,6 +1,8 @@ #include #include #include +//#include +#include #include #include #include @@ -34,21 +36,21 @@ static ITransformingStep::Traits getTraits() CreatingSetStep::CreatingSetStep( const DataStream & input_stream_, - String description_, - SubqueryForSet subquery_for_set_, + SetAndKeyPtr set_and_key_, + StoragePtr external_table_, SizeLimits network_transfer_limits_, ContextPtr context_) : ITransformingStep(input_stream_, Block{}, getTraits()) - , WithContext(context_) - , description(std::move(description_)) - , subquery_for_set(std::move(subquery_for_set_)) + , set_and_key(std::move(set_and_key_)) + , external_table(std::move(external_table_)) , network_transfer_limits(std::move(network_transfer_limits_)) + , context(std::move(context_)) { } void CreatingSetStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { - pipeline.addCreatingSetsTransform(getOutputStream().header, std::move(subquery_for_set), network_transfer_limits, getContext()); + pipeline.addCreatingSetsTransform(getOutputStream().header, std::move(set_and_key), std::move(external_table), network_transfer_limits, context->getPreparedSetsCache()); } void CreatingSetStep::updateOutputStream() @@ -61,16 +63,16 @@ void CreatingSetStep::describeActions(FormatSettings & settings) const String prefix(settings.offset, ' '); settings.out << prefix; - if (subquery_for_set.set_in_progress) + if (set_and_key->set) settings.out << "Set: "; - settings.out << description << '\n'; + settings.out << set_and_key->key << '\n'; } void CreatingSetStep::describeActions(JSONBuilder::JSONMap & map) const { - if (subquery_for_set.set_in_progress) - map.add("Set", description); + if (set_and_key->set) + map.add("Set", set_and_key->key); } @@ -122,7 +124,7 @@ void CreatingSetsStep::describePipeline(FormatSettings & settings) const IQueryPlanStep::describePipeline(processors, settings); } -void addCreatingSetsStep(QueryPlan & query_plan, PreparedSets::SubqueriesForSets subqueries_for_sets, ContextPtr context) +void addCreatingSetsStep(QueryPlan & query_plan, PreparedSets::Subqueries subqueries, ContextPtr context) { DataStreams input_streams; input_streams.emplace_back(query_plan.getCurrentDataStream()); @@ -131,26 +133,14 @@ void addCreatingSetsStep(QueryPlan & query_plan, PreparedSets::SubqueriesForSets plans.emplace_back(std::make_unique(std::move(query_plan))); query_plan = QueryPlan(); - for (auto & [description, subquery_for_set] : subqueries_for_sets) + for (auto & future_set : subqueries) { - if (!subquery_for_set.hasSource()) - { - subquery_for_set.promise_to_fill_set.set_exception(std::make_exception_ptr( - Exception(ErrorCodes::LOGICAL_ERROR, "Subquery for set {} has no source", subquery_for_set.key))); + if (future_set->get()) continue; - } - auto plan = subquery_for_set.detachSource(); - - const Settings & settings = context->getSettingsRef(); - auto creating_set = std::make_unique( - plan->getCurrentDataStream(), - description, - std::move(subquery_for_set), - SizeLimits(settings.max_rows_to_transfer, settings.max_bytes_to_transfer, settings.transfer_overflow_mode), - context); - creating_set->setStepDescription("Create set for subquery"); - plan->addStep(std::move(creating_set)); + auto plan = future_set->build(context); + if (!plan) + continue; input_streams.emplace_back(plan->getCurrentDataStream()); plans.emplace_back(std::move(plan)); @@ -167,12 +157,52 @@ void addCreatingSetsStep(QueryPlan & query_plan, PreparedSets::SubqueriesForSets query_plan.unitePlans(std::move(creating_sets), std::move(plans)); } +std::vector> DelayedCreatingSetsStep::makePlansForSets(DelayedCreatingSetsStep && step) +{ + std::vector> plans; + + for (auto & future_set : step.subqueries) + { + if (future_set->get()) + continue; + + auto plan = future_set->build(step.context); + if (!plan) + continue; + + plan->optimize(QueryPlanOptimizationSettings::fromContext(step.context)); + + plans.emplace_back(std::move(plan)); + } + + return plans; +} + void addCreatingSetsStep(QueryPlan & query_plan, PreparedSetsPtr prepared_sets, ContextPtr context) { - if (!prepared_sets || prepared_sets->empty()) + if (!prepared_sets) return; - addCreatingSetsStep(query_plan, prepared_sets->detachSubqueries(), context); + auto subqueries = prepared_sets->getSubqueries(); + if (subqueries.empty()) + return; + + addCreatingSetsStep(query_plan, std::move(subqueries), context); +} + +DelayedCreatingSetsStep::DelayedCreatingSetsStep( + DataStream input_stream, PreparedSets::Subqueries subqueries_, ContextPtr context_) + : subqueries(std::move(subqueries_)), context(std::move(context_)) +{ + input_streams = {input_stream}; + output_stream = std::move(input_stream); +} + +QueryPipelineBuilderPtr DelayedCreatingSetsStep::updatePipeline(QueryPipelineBuilders, const BuildQueryPipelineSettings &) +{ + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Cannot build pipeline in DelayedCreatingSets. This step should be optimized out."); } } diff --git a/src/Processors/QueryPlan/CreatingSetsStep.h b/src/Processors/QueryPlan/CreatingSetsStep.h index b4777578a30..a90b70a2fa4 100644 --- a/src/Processors/QueryPlan/CreatingSetsStep.h +++ b/src/Processors/QueryPlan/CreatingSetsStep.h @@ -9,15 +9,15 @@ namespace DB { /// Creates sets for subqueries and JOIN. See CreatingSetsTransform. -class CreatingSetStep : public ITransformingStep, WithContext +class CreatingSetStep : public ITransformingStep { public: CreatingSetStep( - const DataStream & input_stream_, - String description_, - SubqueryForSet subquery_for_set_, - SizeLimits network_transfer_limits_, - ContextPtr context_); + const DataStream & input_stream_, + SetAndKeyPtr set_and_key_, + StoragePtr external_table_, + SizeLimits network_transfer_limits_, + ContextPtr context_); String getName() const override { return "CreatingSet"; } @@ -29,9 +29,10 @@ public: private: void updateOutputStream() override; - String description; - SubqueryForSet subquery_for_set; + SetAndKeyPtr set_and_key; + StoragePtr external_table; SizeLimits network_transfer_limits; + ContextPtr context; }; class CreatingSetsStep : public IQueryPlanStep @@ -46,7 +47,28 @@ public: void describePipeline(FormatSettings & settings) const override; }; -void addCreatingSetsStep(QueryPlan & query_plan, PreparedSets::SubqueriesForSets subqueries_for_sets, ContextPtr context); +/// This is a temporary step which is converted to CreatingSetStep after plan optimization. +/// Can't be used by itself. +class DelayedCreatingSetsStep final : public IQueryPlanStep +{ +public: + DelayedCreatingSetsStep(DataStream input_stream, PreparedSets::Subqueries subqueries_, ContextPtr context_); + + String getName() const override { return "DelayedCreatingSets"; } + + QueryPipelineBuilderPtr updatePipeline(QueryPipelineBuilders, const BuildQueryPipelineSettings &) override; + + static std::vector> makePlansForSets(DelayedCreatingSetsStep && step); + + ContextPtr getContext() const { return context; } + PreparedSets::Subqueries detachSets() { return std::move(subqueries); } + +private: + PreparedSets::Subqueries subqueries; + ContextPtr context; +}; + +void addCreatingSetsStep(QueryPlan & query_plan, PreparedSets::Subqueries subqueries, ContextPtr context); void addCreatingSetsStep(QueryPlan & query_plan, PreparedSetsPtr prepared_sets, ContextPtr context); diff --git a/src/Processors/QueryPlan/DistributedCreateLocalPlan.h b/src/Processors/QueryPlan/DistributedCreateLocalPlan.h index 1afdc07fa4d..c08b9bdf67e 100644 --- a/src/Processors/QueryPlan/DistributedCreateLocalPlan.h +++ b/src/Processors/QueryPlan/DistributedCreateLocalPlan.h @@ -10,6 +10,9 @@ namespace DB { +class PreparedSets; +using PreparedSetsPtr = std::shared_ptr; + std::unique_ptr createLocalPlan( const ASTPtr & query_ast, const Block & header, diff --git a/src/Processors/QueryPlan/Optimizations/Optimizations.h b/src/Processors/QueryPlan/Optimizations/Optimizations.h index de1d43bed1b..2b934ec440b 100644 --- a/src/Processors/QueryPlan/Optimizations/Optimizations.h +++ b/src/Processors/QueryPlan/Optimizations/Optimizations.h @@ -110,6 +110,7 @@ void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes); void optimizeAggregationInOrder(QueryPlan::Node & node, QueryPlan::Nodes &); bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & nodes); bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes); +bool addPlansForSets(QueryPlan::Node & node, QueryPlan::Nodes & nodes); /// Enable memory bound merging of aggregation states for remote queries /// in case it was enabled for local plan diff --git a/src/Processors/QueryPlan/Optimizations/addPlansForSets.cpp b/src/Processors/QueryPlan/Optimizations/addPlansForSets.cpp new file mode 100644 index 00000000000..e9100ae9d02 --- /dev/null +++ b/src/Processors/QueryPlan/Optimizations/addPlansForSets.cpp @@ -0,0 +1,35 @@ +#include +#include +#include +#include + +namespace DB::QueryPlanOptimizations +{ + +bool addPlansForSets(QueryPlan::Node & node, QueryPlan::Nodes & nodes) +{ + auto * delayed = typeid_cast(node.step.get()); + if (!delayed) + return false; + + auto plans = DelayedCreatingSetsStep::makePlansForSets(std::move(*delayed)); + node.children.reserve(1 + plans.size()); + + DataStreams input_streams; + input_streams.reserve(1 + plans.size()); + input_streams.push_back(node.children.front()->step->getOutputStream()); + + for (const auto & plan : plans) + { + input_streams.push_back(plan->getCurrentDataStream()); + node.children.push_back(plan->getRootNode()); + nodes.splice(nodes.end(), QueryPlan::detachNodes(std::move(*plan))); + } + + auto creating_sets = std::make_unique(std::move(input_streams)); + creating_sets->setStepDescription("Create sets before main query execution"); + node.step = std::move(creating_sets); + return true; +} + +} diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index db29038999b..4336de41b7b 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -268,6 +268,19 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes return 2; } + if (auto * delayed = typeid_cast(child.get())) + { + /// CreatingSets does not change header. + /// We can push down filter and update header. + /// Filter - DelayedCreatingSets - Something + child = std::make_unique(filter->getOutputStream(), delayed->detachSets(), delayed->getContext()); + std::swap(parent, child); + std::swap(parent_node->children, child_node->children); + std::swap(parent_node->children.front(), child_node->children.front()); + /// DelayedCreatingSets - Filter - Something + return 2; + } + if (auto * totals_having = typeid_cast(child.get())) { /// If totals step has HAVING expression, skip it for now. diff --git a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp index e98386a6ee9..5ef786ff975 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizePrimaryKeyCondition.cpp @@ -28,6 +28,8 @@ void optimizePrimaryKeyCondition(const Stack & stack) else break; } + + source_step_with_filter->onAddFilterFinish(); } } diff --git a/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp b/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp index 73632d34671..d620170e29c 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp @@ -167,6 +167,7 @@ void optimizeTreeSecondPass(const QueryPlanOptimizationSettings & optimization_s optimizePrewhere(stack, nodes); optimizePrimaryKeyCondition(stack); enableMemoryBoundMerging(*stack.back().node, nodes); + addPlansForSets(*stack.back().node, nodes); stack.pop_back(); } diff --git a/src/Processors/QueryPlan/QueryPlan.cpp b/src/Processors/QueryPlan/QueryPlan.cpp index 3fbe3d89845..cb732e58855 100644 --- a/src/Processors/QueryPlan/QueryPlan.cpp +++ b/src/Processors/QueryPlan/QueryPlan.cpp @@ -541,4 +541,9 @@ void QueryPlan::explainEstimate(MutableColumns & columns) } } +QueryPlan::Nodes QueryPlan::detachNodes(QueryPlan && plan) +{ + return std::move(plan.nodes); +} + } diff --git a/src/Processors/QueryPlan/QueryPlan.h b/src/Processors/QueryPlan/QueryPlan.h index 19d87b101de..d89bdc534be 100644 --- a/src/Processors/QueryPlan/QueryPlan.h +++ b/src/Processors/QueryPlan/QueryPlan.h @@ -105,10 +105,11 @@ public: std::vector children = {}; }; - const Node * getRootNode() const { return root; } - using Nodes = std::list; + Node * getRootNode() const { return root; } + static Nodes detachNodes(QueryPlan && plan); + private: QueryPlanResourceHolder resources; Nodes nodes; diff --git a/src/Processors/QueryPlan/ReadFromMemoryStorageStep.cpp b/src/Processors/QueryPlan/ReadFromMemoryStorageStep.cpp index 2080c31d253..188956b34fc 100644 --- a/src/Processors/QueryPlan/ReadFromMemoryStorageStep.cpp +++ b/src/Processors/QueryPlan/ReadFromMemoryStorageStep.cpp @@ -96,11 +96,13 @@ private: }; ReadFromMemoryStorageStep::ReadFromMemoryStorageStep(const Names & columns_to_read_, + StoragePtr storage_, const StorageSnapshotPtr & storage_snapshot_, const size_t num_streams_, const bool delay_read_for_global_sub_queries_) : SourceStepWithFilter(DataStream{.header=storage_snapshot_->getSampleBlockForColumns(columns_to_read_)}), columns_to_read(columns_to_read_), + storage(std::move(storage_)), storage_snapshot(storage_snapshot_), num_streams(num_streams_), delay_read_for_global_sub_queries(delay_read_for_global_sub_queries_) @@ -142,9 +144,9 @@ Pipe ReadFromMemoryStorageStep::makePipe() storage_snapshot, nullptr /* data */, nullptr /* parallel execution index */, - [current_data](std::shared_ptr & data_to_initialize) + [my_storage = storage](std::shared_ptr & data_to_initialize) { - data_to_initialize = current_data; + data_to_initialize = assert_cast(*my_storage).data.get(); })); } diff --git a/src/Processors/QueryPlan/ReadFromMemoryStorageStep.h b/src/Processors/QueryPlan/ReadFromMemoryStorageStep.h index cec523ed58b..1122bfbb2a5 100644 --- a/src/Processors/QueryPlan/ReadFromMemoryStorageStep.h +++ b/src/Processors/QueryPlan/ReadFromMemoryStorageStep.h @@ -16,6 +16,7 @@ class ReadFromMemoryStorageStep final : public SourceStepWithFilter { public: ReadFromMemoryStorageStep(const Names & columns_to_read_, + StoragePtr storage_, const StorageSnapshotPtr & storage_snapshot_, size_t num_streams_, bool delay_read_for_global_sub_queries_); @@ -35,6 +36,7 @@ private: static constexpr auto name = "ReadFromMemoryStorage"; Names columns_to_read; + StoragePtr storage; StorageSnapshotPtr storage_snapshot; size_t num_streams; bool delay_read_for_global_sub_queries; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 3c38ecbbd3f..1e21d13e2b1 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -37,6 +37,8 @@ #include #include #include +#include +#include #include #include @@ -100,6 +102,7 @@ namespace ErrorCodes extern const int INDEX_NOT_USED; extern const int LOGICAL_ERROR; extern const int TOO_MANY_ROWS; + extern const int CANNOT_PARSE_TEXT; } static MergeTreeReaderSettings getMergeTreeReaderSettings( @@ -249,7 +252,7 @@ ReadFromMergeTree::ReadFromMergeTree( { /// build sort description for output stream SortDescription sort_description; - const Names & sorting_key_columns = storage_snapshot->getMetadataForQuery()->getSortingKeyColumns(); + const Names & sorting_key_columns = metadata_for_reading->getSortingKeyColumns(); const Block & header = output_stream->header; const int sort_direction = getSortDirection(); for (const auto & column_name : sorting_key_columns) @@ -1122,7 +1125,7 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( prewhere_info, filter_nodes, storage_snapshot->metadata, - storage_snapshot->getMetadataForQuery(), + metadata_for_reading, query_info, context, requested_num_streams, @@ -1130,7 +1133,179 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( data, real_column_names, sample_factor_column_queried, - log); + log, + indexes); +} + +static ActionsDAGPtr buildFilterDAG( + const ContextPtr & context, + const PrewhereInfoPtr & prewhere_info, + const ActionDAGNodes & added_filter_nodes, + const SelectQueryInfo & query_info) +{ + const auto & settings = context->getSettingsRef(); + ActionsDAG::NodeRawConstPtrs nodes; + + if (prewhere_info) + { + { + const auto & node = prewhere_info->prewhere_actions->findInOutputs(prewhere_info->prewhere_column_name); + nodes.push_back(&node); + } + + if (prewhere_info->row_level_filter) + { + const auto & node = prewhere_info->row_level_filter->findInOutputs(prewhere_info->row_level_column_name); + nodes.push_back(&node); + } + } + + for (const auto & node : added_filter_nodes.nodes) + nodes.push_back(node); + + std::unordered_map node_name_to_input_node_column; + + if (settings.allow_experimental_analyzer && query_info.planner_context) + { + const auto & table_expression_data = query_info.planner_context->getTableExpressionDataOrThrow(query_info.table_expression); + for (const auto & [column_identifier, column_name] : table_expression_data.getColumnIdentifierToColumnName()) + { + const auto & column = table_expression_data.getColumnOrThrow(column_name); + node_name_to_input_node_column.emplace(column_identifier, ColumnWithTypeAndName(column.type, column_name)); + } + } + + return ActionsDAG::buildFilterActionsDAG(nodes, node_name_to_input_node_column, context); +} + +static void buildIndexes( + std::optional & indexes, + ActionsDAGPtr filter_actions_dag, + const MergeTreeData & data, + const ContextPtr & context, + const SelectQueryInfo & query_info, + const StorageMetadataPtr & metadata_snapshot) +{ + indexes.reset(); + + // Build and check if primary key is used when necessary + const auto & primary_key = metadata_snapshot->getPrimaryKey(); + const Names & primary_key_column_names = primary_key.column_names; + + const auto & settings = context->getSettingsRef(); + if (settings.query_plan_optimize_primary_key) + { + NameSet array_join_name_set; + if (query_info.syntax_analyzer_result) + array_join_name_set = query_info.syntax_analyzer_result->getArrayJoinSourceNameSet(); + + indexes.emplace(ReadFromMergeTree::Indexes{{ + filter_actions_dag, + context, + primary_key_column_names, + primary_key.expression, + array_join_name_set}, {}, {}, {}, false}); + } + else + { + indexes.emplace(ReadFromMergeTree::Indexes{{ + query_info, + context, + primary_key_column_names, + primary_key.expression}, {}, {}, {}, false}); + } + + if (metadata_snapshot->hasPartitionKey()) + { + const auto & partition_key = metadata_snapshot->getPartitionKey(); + auto minmax_columns_names = data.getMinMaxColumnsNames(partition_key); + auto minmax_expression_actions = data.getMinMaxExpr(partition_key, ExpressionActionsSettings::fromContext(context)); + + indexes->minmax_idx_condition.emplace(filter_actions_dag, context, minmax_columns_names, minmax_expression_actions, NameSet()); + indexes->partition_pruner.emplace(metadata_snapshot, filter_actions_dag, context, false /* strict */); + } + + indexes->use_skip_indexes = settings.use_skip_indexes; + bool final = query_info.isFinal(); + + if (final && !settings.use_skip_indexes_if_final) + indexes->use_skip_indexes = false; + + if (!indexes->use_skip_indexes) + return; + + const SelectQueryInfo * info = &query_info; + std::optional info_copy; + if (settings.allow_experimental_analyzer) + { + info_copy.emplace(query_info); + info_copy->filter_actions_dag = filter_actions_dag; + info = &*info_copy; + } + + std::unordered_set ignored_index_names; + + if (settings.ignore_data_skipping_indices.changed) + { + const auto & indices = settings.ignore_data_skipping_indices.toString(); + Tokens tokens(indices.data(), indices.data() + indices.size(), settings.max_query_size); + IParser::Pos pos(tokens, static_cast(settings.max_parser_depth)); + Expected expected; + + /// Use an unordered list rather than string vector + auto parse_single_id_or_literal = [&] + { + String str; + if (!parseIdentifierOrStringLiteral(pos, expected, str)) + return false; + + ignored_index_names.insert(std::move(str)); + return true; + }; + + if (!ParserList::parseUtil(pos, expected, parse_single_id_or_literal, false)) + throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, "Cannot parse ignore_data_skipping_indices ('{}')", indices); + } + + UsefulSkipIndexes skip_indexes; + using Key = std::pair; + std::map merged; + + for (const auto & index : metadata_snapshot->getSecondaryIndices()) + { + if (!ignored_index_names.contains(index.name)) + { + auto index_helper = MergeTreeIndexFactory::instance().get(index); + if (index_helper->isMergeable()) + { + auto [it, inserted] = merged.emplace(Key{index_helper->index.type, index_helper->getGranularity()}, skip_indexes.merged_indices.size()); + if (inserted) + { + skip_indexes.merged_indices.emplace_back(); + skip_indexes.merged_indices.back().condition = index_helper->createIndexMergedCondition(*info, metadata_snapshot); + } + + skip_indexes.merged_indices[it->second].addIndex(index_helper); + } + else + { + auto condition = index_helper->createIndexCondition(*info, context); + if (!condition->alwaysUnknownOrTrue()) + skip_indexes.useful_indices.emplace_back(index_helper, condition); + } + } + } + + indexes->skip_indexes = std::move(skip_indexes); +} + +void ReadFromMergeTree::onAddFilterFinish() +{ + if (!filter_nodes.nodes.empty()) + { + auto filter_actions_dag = buildFilterDAG(context, prewhere_info, filter_nodes, query_info); + buildIndexes(indexes, filter_actions_dag, data, context, query_info, metadata_for_reading); + } } MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( @@ -1147,44 +1322,14 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( const MergeTreeData & data, const Names & real_column_names, bool sample_factor_column_queried, - Poco::Logger * log) + Poco::Logger * log, + std::optional & indexes) { const auto & settings = context->getSettingsRef(); if (settings.allow_experimental_analyzer || settings.query_plan_optimize_primary_key) { - ActionsDAG::NodeRawConstPtrs nodes; - - if (prewhere_info) - { - { - const auto & node = prewhere_info->prewhere_actions->findInOutputs(prewhere_info->prewhere_column_name); - nodes.push_back(&node); - } - - if (prewhere_info->row_level_filter) - { - const auto & node = prewhere_info->row_level_filter->findInOutputs(prewhere_info->row_level_column_name); - nodes.push_back(&node); - } - } - - for (const auto & node : added_filter_nodes.nodes) - nodes.push_back(node); - - std::unordered_map node_name_to_input_node_column; - - if (settings.allow_experimental_analyzer && query_info.planner_context) - { - const auto & table_expression_data = query_info.planner_context->getTableExpressionDataOrThrow(query_info.table_expression); - for (const auto & [column_identifier, column_name] : table_expression_data.getColumnIdentifierToColumnName()) - { - const auto & column = table_expression_data.getColumnOrThrow(column_name); - node_name_to_input_node_column.emplace(column_identifier, ColumnWithTypeAndName(column.type, column_name)); - } - } - auto updated_query_info_with_filter_dag = query_info; - updated_query_info_with_filter_dag.filter_actions_dag = ActionsDAG::buildFilterActionsDAG(nodes, node_name_to_input_node_column, context); + updated_query_info_with_filter_dag.filter_actions_dag = buildFilterDAG(context, prewhere_info, added_filter_nodes, query_info); return selectRangesToReadImpl( std::move(parts), @@ -1198,7 +1343,8 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( data, real_column_names, sample_factor_column_queried, - log); + log, + indexes); } return selectRangesToReadImpl( @@ -1213,7 +1359,8 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( data, real_column_names, sample_factor_column_queried, - log); + log, + indexes); } MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToReadImpl( @@ -1228,7 +1375,8 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToReadImpl( const MergeTreeData & data, const Names & real_column_names, bool sample_factor_column_queried, - Poco::Logger * log) + Poco::Logger * log, + std::optional & indexes) { AnalysisResult result; const auto & settings = context->getSettingsRef(); @@ -1249,31 +1397,14 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToReadImpl( result.column_names_to_read.push_back(ExpressionActions::getSmallestColumn(available_real_columns).name); } - // storage_snapshot->check(result.column_names_to_read); - // Build and check if primary key is used when necessary const auto & primary_key = metadata_snapshot->getPrimaryKey(); const Names & primary_key_column_names = primary_key.column_names; - std::optional key_condition; - if (settings.query_plan_optimize_primary_key) - { - NameSet array_join_name_set; - if (query_info.syntax_analyzer_result) - array_join_name_set = query_info.syntax_analyzer_result->getArrayJoinSourceNameSet(); + if (!indexes) + buildIndexes(indexes, query_info.filter_actions_dag, data, context, query_info, metadata_snapshot); - key_condition.emplace(query_info.filter_actions_dag, - context, - primary_key_column_names, - primary_key.expression, - array_join_name_set); - } - else - { - key_condition.emplace(query_info, context, primary_key_column_names, primary_key.expression); - } - - if (settings.force_primary_key && key_condition->alwaysUnknownOrTrue()) + if (settings.force_primary_key && indexes->key_condition.alwaysUnknownOrTrue()) { return std::make_shared(MergeTreeDataSelectAnalysisResult{ .result = std::make_exception_ptr(Exception( @@ -1281,9 +1412,9 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToReadImpl( "Primary key ({}) is not used and setting 'force_primary_key' is set", fmt::join(primary_key_column_names, ", ")))}); } - LOG_DEBUG(log, "Key condition: {}", key_condition->toString()); + LOG_DEBUG(log, "Key condition: {}", indexes->key_condition.toString()); - if (key_condition->alwaysFalse()) + if (indexes->key_condition.alwaysFalse()) return std::make_shared(MergeTreeDataSelectAnalysisResult{.result = std::move(result)}); size_t total_marks_pk = 0; @@ -1291,12 +1422,13 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToReadImpl( try { MergeTreeDataSelectExecutor::filterPartsByPartition( + indexes->partition_pruner, + indexes->minmax_idx_condition, parts, alter_conversions, part_values, metadata_snapshot_base, data, - query_info, context, max_block_numbers_to_read.get(), log, @@ -1306,7 +1438,7 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToReadImpl( query_info, metadata_snapshot->getColumns().getAllPhysical(), parts, - *key_condition, + indexes->key_condition, data, metadata_snapshot, context, @@ -1322,24 +1454,18 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToReadImpl( auto reader_settings = getMergeTreeReaderSettings(context, query_info); - bool use_skip_indexes = settings.use_skip_indexes; - bool final = InterpreterSelectQuery::isQueryWithFinal(query_info); - - if (final && !settings.use_skip_indexes_if_final) - use_skip_indexes = false; - result.parts_with_ranges = MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipIndexes( std::move(parts), std::move(alter_conversions), metadata_snapshot, - query_info, context, - *key_condition, + indexes->key_condition, + indexes->skip_indexes, reader_settings, log, num_streams, result.index_stats, - use_skip_indexes); + indexes->use_skip_indexes); } catch (...) { @@ -1387,7 +1513,7 @@ bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, /// Disable read-in-order optimization for reverse order with final. /// Otherwise, it can lead to incorrect final behavior because the implementation may rely on the reading in direct order). - if (direction != 1 && isQueryWithFinal()) + if (direction != 1 && query_info.isFinal()) return false; auto order_info = std::make_shared(SortDescription{}, prefix_size, direction, limit); @@ -1405,7 +1531,7 @@ bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, /// update sort info for output stream SortDescription sort_description; - const Names & sorting_key_columns = storage_snapshot->getMetadataForQuery()->getSortingKeyColumns(); + const Names & sorting_key_columns = metadata_for_reading->getSortingKeyColumns(); const Block & header = output_stream->header; const int sort_direction = getSortDirection(); for (const auto & column_name : sorting_key_columns) @@ -1515,7 +1641,7 @@ ReadFromMergeTree::AnalysisResult ReadFromMergeTree::getAnalysisResult() const bool ReadFromMergeTree::isQueryWithFinal() const { - return InterpreterSelectQuery::isQueryWithFinal(query_info); + return query_info.isFinal(); } bool ReadFromMergeTree::isQueryWithSampling() const @@ -1736,6 +1862,7 @@ void ReadFromMergeTree::initializePipeline(QueryPipelineBuilder & pipeline, cons processors.emplace_back(processor); pipeline.init(std::move(pipe)); + pipeline.addContext(context); // Attach QueryIdHolder if needed if (query_id_holder) pipeline.setQueryIdHolder(std::move(query_id_holder)); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 99cbe9d9e50..06021cb1c5b 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -6,6 +6,7 @@ #include #include #include +#include namespace DB { @@ -25,6 +26,35 @@ struct MergeTreeDataSelectSamplingData ActionsDAGPtr filter_expression; }; +struct UsefulSkipIndexes +{ + struct DataSkippingIndexAndCondition + { + MergeTreeIndexPtr index; + MergeTreeIndexConditionPtr condition; + + DataSkippingIndexAndCondition(MergeTreeIndexPtr index_, MergeTreeIndexConditionPtr condition_) + : index(index_), condition(condition_) + { + } + }; + + struct MergedDataSkippingIndexAndCondition + { + std::vector indices; + MergeTreeIndexMergedConditionPtr condition; + + void addIndex(const MergeTreeIndexPtr & index) + { + indices.push_back(index); + condition->addIndex(indices.back()); + } + }; + + std::vector useful_indices; + std::vector merged_indices; +}; + struct MergeTreeDataSelectAnalysisResult; using MergeTreeDataSelectAnalysisResultPtr = std::shared_ptr; @@ -134,6 +164,15 @@ public: UInt64 getSelectedRows() const { return selected_rows; } UInt64 getSelectedMarks() const { return selected_marks; } + struct Indexes + { + KeyCondition key_condition; + std::optional partition_pruner; + std::optional minmax_idx_condition; + UsefulSkipIndexes skip_indexes; + bool use_skip_indexes; + }; + static MergeTreeDataSelectAnalysisResultPtr selectRangesToRead( MergeTreeData::DataPartsVector parts, std::vector alter_conversions, @@ -148,7 +187,8 @@ public: const MergeTreeData & data, const Names & real_column_names, bool sample_factor_column_queried, - Poco::Logger * log); + Poco::Logger * log, + std::optional & indexes); MergeTreeDataSelectAnalysisResultPtr selectRangesToRead( MergeTreeData::DataPartsVector parts, @@ -164,7 +204,6 @@ public: bool readsInOrder() const; void updatePrewhereInfo(const PrewhereInfoPtr & prewhere_info_value); - bool isQueryWithFinal() const; bool isQueryWithSampling() const; @@ -187,6 +226,8 @@ public: size_t getNumStreams() const { return requested_num_streams; } bool isParallelReadingEnabled() const { return read_task_callback != std::nullopt; } + void onAddFilterFinish() override; + private: static MergeTreeDataSelectAnalysisResultPtr selectRangesToReadImpl( MergeTreeData::DataPartsVector parts, @@ -200,7 +241,8 @@ private: const MergeTreeData & data, const Names & real_column_names, bool sample_factor_column_queried, - Poco::Logger * log); + Poco::Logger * log, + std::optional & indexes); int getSortDirection() const { @@ -241,6 +283,9 @@ private: std::shared_ptr max_block_numbers_to_read; + /// Pre-computed value, needed to trigger sets creating for PK + mutable std::optional indexes; + Poco::Logger * log; UInt64 selected_parts = 0; UInt64 selected_rows = 0; diff --git a/src/Processors/QueryPlan/SourceStepWithFilter.h b/src/Processors/QueryPlan/SourceStepWithFilter.h index a363451fff2..34b6e3c6a7b 100644 --- a/src/Processors/QueryPlan/SourceStepWithFilter.h +++ b/src/Processors/QueryPlan/SourceStepWithFilter.h @@ -37,6 +37,8 @@ public: filter_dags.push_back(std::move(filter_dag)); } + virtual void onAddFilterFinish() {} + protected: std::vector filter_dags; ActionDAGNodes filter_nodes; diff --git a/src/Processors/Transforms/CreateSetAndFilterOnTheFlyTransform.cpp b/src/Processors/Transforms/CreateSetAndFilterOnTheFlyTransform.cpp index 4278eb8e8b2..59c4b9a6a87 100644 --- a/src/Processors/Transforms/CreateSetAndFilterOnTheFlyTransform.cpp +++ b/src/Processors/Transforms/CreateSetAndFilterOnTheFlyTransform.cpp @@ -106,7 +106,7 @@ void CreatingSetsOnTheFlyTransform::transform(Chunk & chunk) if (chunk.getNumRows()) { Columns key_columns = getColumnsByIndices(chunk, key_column_indices); - bool limit_exceeded = !set->insertFromBlock(key_columns); + bool limit_exceeded = !set->insertFromColumns(key_columns); if (limit_exceeded) { auto prev_state = set->state.exchange(SetWithState::State::Suspended); diff --git a/src/Processors/Transforms/CreatingSetsTransform.cpp b/src/Processors/Transforms/CreatingSetsTransform.cpp index c6ac8bea5ba..afce1355f7a 100644 --- a/src/Processors/Transforms/CreatingSetsTransform.cpp +++ b/src/Processors/Transforms/CreatingSetsTransform.cpp @@ -25,13 +25,15 @@ CreatingSetsTransform::~CreatingSetsTransform() = default; CreatingSetsTransform::CreatingSetsTransform( Block in_header_, Block out_header_, - SubqueryForSet subquery_for_set_, + SetAndKeyPtr set_and_key_, + StoragePtr external_table_, SizeLimits network_transfer_limits_, - ContextPtr context_) + PreparedSetsCachePtr prepared_sets_cache_) : IAccumulatingTransform(std::move(in_header_), std::move(out_header_)) - , WithContext(context_) - , subquery(std::move(subquery_for_set_)) + , set_and_key(std::move(set_and_key_)) + , external_table(std::move(external_table_)) , network_transfer_limits(std::move(network_transfer_limits_)) + , prepared_sets_cache(std::move(prepared_sets_cache_)) { } @@ -52,31 +54,30 @@ void CreatingSetsTransform::work() void CreatingSetsTransform::startSubquery() { /// Lookup the set in the cache if we don't need to build table. - auto ctx = context.lock(); - if (ctx && ctx->getPreparedSetsCache() && !subquery.table) + if (prepared_sets_cache && !external_table) { /// Try to find the set in the cache and wait for it to be built. /// Retry if the set from cache fails to be built. while (true) { - auto from_cache = ctx->getPreparedSetsCache()->findOrPromiseToBuild(subquery.key); + auto from_cache = prepared_sets_cache->findOrPromiseToBuild(set_and_key->key); if (from_cache.index() == 0) { + LOG_TRACE(log, "Building set, key: {}", set_and_key->key); promise_to_build = std::move(std::get<0>(from_cache)); } else { - LOG_TRACE(log, "Waiting for set to be build by another thread, key: {}", subquery.key); + LOG_TRACE(log, "Waiting for set to be build by another thread, key: {}", set_and_key->key); SharedSet set_built_by_another_thread = std::move(std::get<1>(from_cache)); const SetPtr & ready_set = set_built_by_another_thread.get(); if (!ready_set) { - LOG_TRACE(log, "Failed to use set from cache, key: {}", subquery.key); + LOG_TRACE(log, "Failed to use set from cache, key: {}", set_and_key->key); continue; } - subquery.promise_to_fill_set.set_value(ready_set); - subquery.set_in_progress.reset(); + set_and_key->set = ready_set; done_with_set = true; set_from_cache = true; } @@ -84,19 +85,19 @@ void CreatingSetsTransform::startSubquery() } } - if (subquery.set_in_progress) - LOG_TRACE(log, "Creating set, key: {}", subquery.key); - if (subquery.table) + if (set_and_key->set && !set_from_cache) + LOG_TRACE(log, "Creating set, key: {}", set_and_key->key); + if (external_table) LOG_TRACE(log, "Filling temporary table."); - if (subquery.table) + if (external_table) /// TODO: make via port - table_out = QueryPipeline(subquery.table->write({}, subquery.table->getInMemoryMetadataPtr(), getContext(), /*async_insert=*/false)); + table_out = QueryPipeline(external_table->write({}, external_table->getInMemoryMetadataPtr(), nullptr, /*async_insert=*/false)); - done_with_set = !subquery.set_in_progress; - done_with_table = !subquery.table; + done_with_set = !set_and_key->set || set_from_cache; + done_with_table = !external_table; - if ((done_with_set && !set_from_cache) /*&& done_with_join*/ && done_with_table) + if ((done_with_set && !set_from_cache) && done_with_table) throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error: nothing to do with subquery"); if (table_out.initialized()) @@ -116,9 +117,9 @@ void CreatingSetsTransform::finishSubquery() } else if (read_rows != 0) { - if (subquery.set_in_progress) - LOG_DEBUG(log, "Created Set with {} entries from {} rows in {} sec.", subquery.set_in_progress->getTotalRowCount(), read_rows, seconds); - if (subquery.table) + if (set_and_key->set) + LOG_DEBUG(log, "Created Set with {} entries from {} rows in {} sec.", set_and_key->set->getTotalRowCount(), read_rows, seconds); + if (external_table) LOG_DEBUG(log, "Created Table with {} rows in {} sec.", read_rows, seconds); } else @@ -131,11 +132,6 @@ void CreatingSetsTransform::init() { is_initialized = true; - if (subquery.set_in_progress) - { - subquery.set_in_progress->setHeader(getInputPort().getHeader().getColumnsWithTypeAndName()); - } - watch.restart(); startSubquery(); } @@ -147,7 +143,7 @@ void CreatingSetsTransform::consume(Chunk chunk) if (!done_with_set) { - if (!subquery.set_in_progress->insertFromBlock(block.getColumnsWithTypeAndName())) + if (!set_and_key->set->insertFromBlock(block.getColumnsWithTypeAndName())) done_with_set = true; } @@ -170,12 +166,11 @@ void CreatingSetsTransform::consume(Chunk chunk) Chunk CreatingSetsTransform::generate() { - if (subquery.set_in_progress) + if (set_and_key->set && !set_from_cache) { - subquery.set_in_progress->finishInsert(); - subquery.promise_to_fill_set.set_value(subquery.set_in_progress); + set_and_key->set->finishInsert(); if (promise_to_build) - promise_to_build->set_value(subquery.set_in_progress); + promise_to_build->set_value(set_and_key->set); } if (table_out.initialized()) diff --git a/src/Processors/Transforms/CreatingSetsTransform.h b/src/Processors/Transforms/CreatingSetsTransform.h index 26bbc45933d..d1ec7dcbca7 100644 --- a/src/Processors/Transforms/CreatingSetsTransform.h +++ b/src/Processors/Transforms/CreatingSetsTransform.h @@ -23,15 +23,16 @@ class PushingPipelineExecutor; /// Don't return any data. Sets are created when Finish status is returned. /// In general, several work() methods need to be called to finish. /// Independent processors is created for each subquery. -class CreatingSetsTransform : public IAccumulatingTransform, WithContext +class CreatingSetsTransform : public IAccumulatingTransform { public: CreatingSetsTransform( Block in_header_, Block out_header_, - SubqueryForSet subquery_for_set_, + SetAndKeyPtr set_and_key_, + StoragePtr external_table_, SizeLimits network_transfer_limits_, - ContextPtr context_); + PreparedSetsCachePtr prepared_sets_cache_); ~CreatingSetsTransform() override; @@ -42,7 +43,8 @@ public: Chunk generate() override; private: - SubqueryForSet subquery; + SetAndKeyPtr set_and_key; + StoragePtr external_table; std::optional> promise_to_build; QueryPipeline table_out; @@ -55,6 +57,7 @@ private: bool done_with_table = true; SizeLimits network_transfer_limits; + PreparedSetsCachePtr prepared_sets_cache; size_t rows_to_transfer = 0; size_t bytes_to_transfer = 0; diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index a4edf107b2f..dedf85e409c 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -569,16 +569,22 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe return left; } -void QueryPipelineBuilder::addCreatingSetsTransform(const Block & res_header, SubqueryForSet subquery_for_set, const SizeLimits & limits, ContextPtr context) +void QueryPipelineBuilder::addCreatingSetsTransform( + const Block & res_header, + SetAndKeyPtr set_and_key, + StoragePtr external_table, + const SizeLimits & limits, + PreparedSetsCachePtr prepared_sets_cache) { resize(1); auto transform = std::make_shared( getHeader(), res_header, - std::move(subquery_for_set), + std::move(set_and_key), + std::move(external_table), limits, - context); + std::move(prepared_sets_cache)); InputPort * totals_port = nullptr; diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index 3a5d65d4388..e744e3612ce 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -33,6 +33,12 @@ class TableJoin; class QueryPipelineBuilder; using QueryPipelineBuilderPtr = std::unique_ptr; +struct SetAndKey; +using SetAndKeyPtr = std::shared_ptr; + +class PreparedSetsCache; +using PreparedSetsCachePtr = std::shared_ptr; + class QueryPipelineBuilder { public: @@ -138,7 +144,12 @@ public: /// This is used for CreatingSets. void addPipelineBefore(QueryPipelineBuilder pipeline); - void addCreatingSetsTransform(const Block & res_header, SubqueryForSet subquery_for_set, const SizeLimits & limits, ContextPtr context); + void addCreatingSetsTransform( + const Block & res_header, + SetAndKeyPtr set_and_key, + StoragePtr external_table, + const SizeLimits & limits, + PreparedSetsCachePtr prepared_sets_cache); PipelineExecutorPtr execute(); @@ -174,6 +185,7 @@ public: void addResources(QueryPlanResourceHolder resources_) { resources = std::move(resources_); } void setQueryIdHolder(std::shared_ptr query_id_holder) { resources.query_id_holders.emplace_back(std::move(query_id_holder)); } + void addContext(ContextPtr context) { resources.interpreter_context.emplace_back(std::move(context)); } /// Convert query pipeline to pipe. static Pipe getPipe(QueryPipelineBuilder pipeline, QueryPlanResourceHolder & resources); diff --git a/src/QueryPipeline/RemoteQueryExecutor.cpp b/src/QueryPipeline/RemoteQueryExecutor.cpp index 233dfe13339..cd6f65b7b43 100644 --- a/src/QueryPipeline/RemoteQueryExecutor.cpp +++ b/src/QueryPipeline/RemoteQueryExecutor.cpp @@ -434,11 +434,13 @@ RemoteQueryExecutor::ReadResult RemoteQueryExecutor::processPacket(Packet packet switch (packet.type) { case Protocol::Server::MergeTreeReadTaskRequest: - processMergeTreeReadTaskRequest(packet.request); + chassert(packet.request.has_value()); + processMergeTreeReadTaskRequest(packet.request.value()); return ReadResult(ReadResult::Type::ParallelReplicasToken); case Protocol::Server::MergeTreeAllRangesAnnounecement: - processMergeTreeInitialReadAnnounecement(packet.announcement); + chassert(packet.announcement.has_value()); + processMergeTreeInitialReadAnnounecement(packet.announcement.value()); return ReadResult(ReadResult::Type::ParallelReplicasToken); case Protocol::Server::ReadTaskRequest: diff --git a/src/Storages/KVStorageUtils.cpp b/src/Storages/KVStorageUtils.cpp index 370327e4c7e..16ab99d03b4 100644 --- a/src/Storages/KVStorageUtils.cpp +++ b/src/Storages/KVStorageUtils.cpp @@ -66,13 +66,20 @@ bool traverseASTFilter( return false; value = args.children.at(1); - PreparedSetKey set_key; - if ((value->as() || value->as())) - set_key = PreparedSetKey::forSubquery(*value); - else - set_key = PreparedSetKey::forLiteral(*value, {primary_key_type}); + PreparedSets::Hash set_key = value->getTreeHash(); + FutureSetPtr future_set; - SetPtr set = prepared_sets->get(set_key); + if ((value->as() || value->as())) + future_set = prepared_sets->findSubquery(set_key); + else + future_set = prepared_sets->findTuple(set_key, {primary_key_type}); + + if (!future_set) + return false; + + future_set->buildOrderedSetInplace(context); + + auto set = future_set->get(); if (!set) return false; diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 02ef7e6bebd..f909d854cf6 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -1241,7 +1241,11 @@ bool KeyCondition::tryPrepareSetIndex( const auto right_arg = func.getArgumentAt(1); - auto prepared_set = right_arg.tryGetPreparedSet(indexes_mapping, data_types); + auto future_set = right_arg.tryGetPreparedSet(indexes_mapping, data_types); + if (!future_set) + return false; + + auto prepared_set = future_set->buildOrderedSetInplace(right_arg.getTreeContext().getQueryContext()); if (!prepared_set) return false; @@ -1254,7 +1258,6 @@ bool KeyCondition::tryPrepareSetIndex( prepared_set->checkTypesEqual(indexes_mapping[i].tuple_index, data_types[i]); out.set_index = std::make_shared(prepared_set->getSetElements(), std::move(indexes_mapping)); - return true; } diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index c5871f7d93d..ee515106591 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -797,12 +797,13 @@ std::optional> MergeTreeDataSelectExecutor::filterPar } void MergeTreeDataSelectExecutor::filterPartsByPartition( + std::optional & partition_pruner, + std::optional & minmax_idx_condition, MergeTreeData::DataPartsVector & parts, std::vector & alter_conversions, const std::optional> & part_values, const StorageMetadataPtr & metadata_snapshot, const MergeTreeData & data, - const SelectQueryInfo & query_info, const ContextPtr & context, const PartitionIdToMaxBlock * max_block_numbers_to_read, Poco::Logger * log, @@ -811,27 +812,16 @@ void MergeTreeDataSelectExecutor::filterPartsByPartition( chassert(alter_conversions.empty() || parts.size() == alter_conversions.size()); const Settings & settings = context->getSettingsRef(); - - std::optional partition_pruner; - std::optional minmax_idx_condition; DataTypes minmax_columns_types; if (metadata_snapshot->hasPartitionKey()) { const auto & partition_key = metadata_snapshot->getPartitionKey(); - auto minmax_columns_names = data.getMinMaxColumnsNames(partition_key); - auto minmax_expression_actions = data.getMinMaxExpr(partition_key, ExpressionActionsSettings::fromContext(context)); minmax_columns_types = data.getMinMaxColumnsTypes(partition_key); - if (context->getSettingsRef().allow_experimental_analyzer) - minmax_idx_condition.emplace(query_info.filter_actions_dag, context, minmax_columns_names, minmax_expression_actions, NameSet()); - else - minmax_idx_condition.emplace(query_info, context, minmax_columns_names, minmax_expression_actions); - - partition_pruner.emplace(metadata_snapshot, query_info, context, false /* strict */); - if (settings.force_index_by_date && (minmax_idx_condition->alwaysUnknownOrTrue() && partition_pruner->isUseless())) { + auto minmax_columns_names = data.getMinMaxColumnsNames(partition_key); throw Exception(ErrorCodes::INDEX_NOT_USED, "Neither MinMax index by columns ({}) nor partition expr is used and setting 'force_index_by_date' is set", fmt::join(minmax_columns_names, ", ")); @@ -897,9 +887,9 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd MergeTreeData::DataPartsVector && parts, std::vector && alter_conversions, StorageMetadataPtr metadata_snapshot, - const SelectQueryInfo & query_info, const ContextPtr & context, const KeyCondition & key_condition, + const UsefulSkipIndexes & skip_indexes, const MergeTreeReaderSettings & reader_settings, Poco::Logger * log, size_t num_streams, @@ -912,93 +902,6 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd parts_with_ranges.resize(parts.size()); const Settings & settings = context->getSettingsRef(); - /// Let's start analyzing all useful indices - - struct IndexStat - { - std::atomic total_granules{0}; - std::atomic granules_dropped{0}; - std::atomic total_parts{0}; - std::atomic parts_dropped{0}; - }; - - struct DataSkippingIndexAndCondition - { - MergeTreeIndexPtr index; - MergeTreeIndexConditionPtr condition; - IndexStat stat; - - DataSkippingIndexAndCondition(MergeTreeIndexPtr index_, MergeTreeIndexConditionPtr condition_) - : index(index_), condition(condition_) - { - } - }; - - struct MergedDataSkippingIndexAndCondition - { - std::vector indices; - MergeTreeIndexMergedConditionPtr condition; - IndexStat stat; - - void addIndex(const MergeTreeIndexPtr & index) - { - indices.push_back(index); - condition->addIndex(indices.back()); - } - }; - - std::list useful_indices; - std::map, MergedDataSkippingIndexAndCondition> merged_indices; - std::unordered_set ignored_index_names; - - if (use_skip_indexes && settings.ignore_data_skipping_indices.changed) - { - const auto & indices = settings.ignore_data_skipping_indices.toString(); - Tokens tokens(indices.data(), indices.data() + indices.size(), settings.max_query_size); - IParser::Pos pos(tokens, static_cast(settings.max_parser_depth)); - Expected expected; - - /// Use an unordered list rather than string vector - auto parse_single_id_or_literal = [&] - { - String str; - if (!parseIdentifierOrStringLiteral(pos, expected, str)) - return false; - - ignored_index_names.insert(std::move(str)); - return true; - }; - - if (!ParserList::parseUtil(pos, expected, parse_single_id_or_literal, false)) - throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, "Cannot parse ignore_data_skipping_indices ('{}')", indices); - } - - if (use_skip_indexes) - { - for (const auto & index : metadata_snapshot->getSecondaryIndices()) - { - - auto index_helper = MergeTreeIndexFactory::instance().get(index); - if (!ignored_index_names.contains(index.name)) - { - if (index_helper->isMergeable()) - { - auto [it, inserted] = merged_indices.try_emplace({index_helper->index.type, index_helper->getGranularity()}); - if (inserted) - it->second.condition = index_helper->createIndexMergedCondition(query_info, metadata_snapshot); - - it->second.addIndex(index_helper); - } - else - { - auto condition = index_helper->createIndexCondition(query_info, context); - if (!condition->alwaysUnknownOrTrue()) - useful_indices.emplace_back(index_helper, condition); - } - } - } - } - if (use_skip_indexes && settings.force_data_skipping_indices.changed) { const auto & indices = settings.force_data_skipping_indices.toString(); @@ -1016,7 +919,7 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd throw Exception(ErrorCodes::CANNOT_PARSE_TEXT, "No indices parsed from force_data_skipping_indices ('{}')", indices); std::unordered_set useful_indices_names; - for (const auto & useful_index : useful_indices) + for (const auto & useful_index : skip_indexes.useful_indices) useful_indices_names.insert(useful_index.index->index.name); for (const auto & index_name : forced_indices) @@ -1031,6 +934,17 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd } } + struct IndexStat + { + std::atomic total_granules{0}; + std::atomic granules_dropped{0}; + std::atomic total_parts{0}; + std::atomic parts_dropped{0}; + }; + + std::vector useful_indices_stat(skip_indexes.useful_indices.size()); + std::vector merged_indices_stat(skip_indexes.merged_indices.size()); + std::atomic sum_marks_pk = 0; std::atomic sum_parts_pk = 0; @@ -1059,13 +973,15 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd if (!ranges.ranges.empty()) sum_parts_pk.fetch_add(1, std::memory_order_relaxed); - for (auto & index_and_condition : useful_indices) + for (size_t idx = 0; idx < skip_indexes.useful_indices.size(); ++idx) { if (ranges.ranges.empty()) break; - index_and_condition.stat.total_parts.fetch_add(1, std::memory_order_relaxed); - index_and_condition.stat.total_granules.fetch_add(ranges.ranges.getNumberOfMarks(), std::memory_order_relaxed); + const auto & index_and_condition = skip_indexes.useful_indices[idx]; + auto & stat = useful_indices_stat[idx]; + stat.total_parts.fetch_add(1, std::memory_order_relaxed); + stat.total_granules.fetch_add(ranges.ranges.getNumberOfMarks(), std::memory_order_relaxed); size_t granules_dropped = 0; ranges.ranges = filterMarksUsingIndex( @@ -1080,17 +996,19 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd uncompressed_cache.get(), log); - index_and_condition.stat.granules_dropped.fetch_add(granules_dropped, std::memory_order_relaxed); + stat.granules_dropped.fetch_add(granules_dropped, std::memory_order_relaxed); if (ranges.ranges.empty()) - index_and_condition.stat.parts_dropped.fetch_add(1, std::memory_order_relaxed); + stat.parts_dropped.fetch_add(1, std::memory_order_relaxed); } - for (auto & [_, indices_and_condition] : merged_indices) + for (size_t idx = 0; idx < skip_indexes.merged_indices.size(); ++idx) { if (ranges.ranges.empty()) break; - indices_and_condition.stat.total_parts.fetch_add(1, std::memory_order_relaxed); + const auto & indices_and_condition = skip_indexes.merged_indices[idx]; + auto & stat = merged_indices_stat[idx]; + stat.total_parts.fetch_add(1, std::memory_order_relaxed); size_t total_granules = 0; size_t granules_dropped = 0; @@ -1101,11 +1019,11 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd total_granules, granules_dropped, mark_cache.get(), uncompressed_cache.get(), log); - indices_and_condition.stat.total_granules.fetch_add(total_granules, std::memory_order_relaxed); - indices_and_condition.stat.granules_dropped.fetch_add(granules_dropped, std::memory_order_relaxed); + stat.total_granules.fetch_add(total_granules, std::memory_order_relaxed); + stat.granules_dropped.fetch_add(granules_dropped, std::memory_order_relaxed); if (ranges.ranges.empty()) - indices_and_condition.stat.parts_dropped.fetch_add(1, std::memory_order_relaxed); + stat.parts_dropped.fetch_add(1, std::memory_order_relaxed); } if (!ranges.ranges.empty()) @@ -1172,15 +1090,17 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd .num_granules_after = sum_marks_pk.load(std::memory_order_relaxed)}); } - for (const auto & index_and_condition : useful_indices) + for (size_t idx = 0; idx < skip_indexes.useful_indices.size(); ++idx) { + const auto & index_and_condition = skip_indexes.useful_indices[idx]; + const auto & stat = useful_indices_stat[idx]; const auto & index_name = index_and_condition.index->index.name; LOG_DEBUG( log, "Index {} has dropped {}/{} granules.", backQuote(index_name), - index_and_condition.stat.granules_dropped, - index_and_condition.stat.total_granules); + stat.granules_dropped, + stat.total_granules); std::string description = index_and_condition.index->index.type + " GRANULARITY " + std::to_string(index_and_condition.index->index.granularity); @@ -1189,25 +1109,27 @@ RangesInDataParts MergeTreeDataSelectExecutor::filterPartsByPrimaryKeyAndSkipInd .type = ReadFromMergeTree::IndexType::Skip, .name = index_name, .description = std::move(description), - .num_parts_after = index_and_condition.stat.total_parts - index_and_condition.stat.parts_dropped, - .num_granules_after = index_and_condition.stat.total_granules - index_and_condition.stat.granules_dropped}); + .num_parts_after = stat.total_parts - stat.parts_dropped, + .num_granules_after = stat.total_granules - stat.granules_dropped}); } - for (const auto & [type_with_granularity, index_and_condition] : merged_indices) + for (size_t idx = 0; idx < skip_indexes.merged_indices.size(); ++idx) { + const auto & index_and_condition = skip_indexes.merged_indices[idx]; + const auto & stat = merged_indices_stat[idx]; const auto & index_name = "Merged"; LOG_DEBUG(log, "Index {} has dropped {}/{} granules.", backQuote(index_name), - index_and_condition.stat.granules_dropped, index_and_condition.stat.total_granules); + stat.granules_dropped, stat.total_granules); - std::string description = "MERGED GRANULARITY " + std::to_string(type_with_granularity.second); + std::string description = "MERGED GRANULARITY " + std::to_string(index_and_condition.indices.at(0)->index.granularity); index_stats.emplace_back(ReadFromMergeTree::IndexStat{ .type = ReadFromMergeTree::IndexType::Skip, .name = index_name, .description = std::move(description), - .num_parts_after = index_and_condition.stat.total_parts - index_and_condition.stat.parts_dropped, - .num_granules_after = index_and_condition.stat.total_granules - index_and_condition.stat.granules_dropped}); + .num_parts_after = stat.total_parts - stat.parts_dropped, + .num_granules_after = stat.total_granules - stat.granules_dropped}); } return parts_with_ranges; @@ -1329,6 +1251,7 @@ MergeTreeDataSelectAnalysisResultPtr MergeTreeDataSelectExecutor::estimateNumMar selectColumnNames(column_names_to_return, data, real_column_names, virt_column_names, sample_factor_column_queried); + std::optional indexes; return ReadFromMergeTree::selectRangesToRead( std::move(parts), /*alter_conversions=*/ {}, @@ -1343,7 +1266,8 @@ MergeTreeDataSelectAnalysisResultPtr MergeTreeDataSelectExecutor::estimateNumMar data, real_column_names, sample_factor_column_queried, - log); + log, + indexes); } QueryPlanStepPtr MergeTreeDataSelectExecutor::readFromParts( diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h index 18fe312598c..a5dcbfe6650 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -175,12 +175,13 @@ public: /// Filter parts using minmax index and partition key. static void filterPartsByPartition( + std::optional & partition_pruner, + std::optional & minmax_idx_condition, MergeTreeData::DataPartsVector & parts, std::vector & alter_conversions, const std::optional> & part_values, const StorageMetadataPtr & metadata_snapshot, const MergeTreeData & data, - const SelectQueryInfo & query_info, const ContextPtr & context, const PartitionIdToMaxBlock * max_block_numbers_to_read, Poco::Logger * log, @@ -193,9 +194,9 @@ public: MergeTreeData::DataPartsVector && parts, std::vector && alter_conversions, StorageMetadataPtr metadata_snapshot, - const SelectQueryInfo & query_info, const ContextPtr & context, const KeyCondition & key_condition, + const UsefulSkipIndexes & skip_indexes, const MergeTreeReaderSettings & reader_settings, Poco::Logger * log, size_t num_streams, diff --git a/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp index 235d90bb974..398a85e92ac 100644 --- a/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexConditionBloomFilter.cpp @@ -36,6 +36,7 @@ ColumnWithTypeAndName getPreparedSetInfo(const ConstSetPtr & prepared_set) Columns set_elements; for (auto & set_element : prepared_set->getSetElements()) + set_elements.emplace_back(set_element->convertToFullColumnIfConst()); return {ColumnTuple::create(set_elements), std::make_shared(prepared_set->getElementsTypes()), "dummy"}; @@ -310,13 +311,17 @@ bool MergeTreeIndexConditionBloomFilter::traverseFunction(const RPNBuilderTreeNo if (functionIsInOrGlobalInOperator(function_name)) { - ConstSetPtr prepared_set = rhs_argument.tryGetPreparedSet(); - - if (prepared_set && prepared_set->hasExplicitSetElements()) + if (auto future_set = rhs_argument.tryGetPreparedSet(); future_set) { - const auto prepared_info = getPreparedSetInfo(prepared_set); - if (traverseTreeIn(function_name, lhs_argument, prepared_set, prepared_info.type, prepared_info.column, out)) - maybe_useful = true; + if (auto prepared_set = future_set->buildOrderedSetInplace(rhs_argument.getTreeContext().getQueryContext()); prepared_set) + { + if (prepared_set->hasExplicitSetElements()) + { + const auto prepared_info = getPreparedSetInfo(prepared_set); + if (traverseTreeIn(function_name, lhs_argument, prepared_set, prepared_info.type, prepared_info.column, out)) + maybe_useful = true; + } + } } } else if (function_name == "equals" || diff --git a/src/Storages/MergeTree/MergeTreeIndexFullText.cpp b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp index b15bf4d6811..b6a2cafe245 100644 --- a/src/Storages/MergeTree/MergeTreeIndexFullText.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp @@ -624,7 +624,11 @@ bool MergeTreeConditionFullText::tryPrepareSetBloomFilter( if (key_tuple_mapping.empty()) return false; - auto prepared_set = right_argument.tryGetPreparedSet(data_types); + auto future_set = right_argument.tryGetPreparedSet(data_types); + if (!future_set) + return false; + + auto prepared_set = future_set->buildOrderedSetInplace(right_argument.getTreeContext().getQueryContext()); if (!prepared_set || !prepared_set->hasExplicitSetElements()) return false; diff --git a/src/Storages/MergeTree/MergeTreeIndexInverted.cpp b/src/Storages/MergeTree/MergeTreeIndexInverted.cpp index e19187646cd..6b4919c545d 100644 --- a/src/Storages/MergeTree/MergeTreeIndexInverted.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexInverted.cpp @@ -655,7 +655,11 @@ bool MergeTreeConditionInverted::tryPrepareSetGinFilter( if (key_tuple_mapping.empty()) return false; - ConstSetPtr prepared_set = rhs.tryGetPreparedSet(); + auto future_set = rhs.tryGetPreparedSet(); + if (!future_set) + return false; + + auto prepared_set = future_set->buildOrderedSetInplace(rhs.getTreeContext().getQueryContext()); if (!prepared_set || !prepared_set->hasExplicitSetElements()) return false; diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index 120b3e43472..9c34a149128 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -554,7 +554,10 @@ void MergeTreeIndexConditionSet::traverseAST(ASTPtr & node) const if (atomFromAST(node)) { if (node->as() || node->as()) - node = makeASTFunction("__bitWrapperFunc", node); + /// __bitWrapperFunc* uses default implementation for Nullable types + /// Here we additionally convert Null to 0, + /// otherwise condition 'something OR NULL' will always return Null and filter everything. + node = makeASTFunction("__bitWrapperFunc", makeASTFunction("ifNull", node, std::make_shared(Field(0)))); } else node = std::make_shared(UNKNOWN_FIELD); diff --git a/src/Storages/MergeTree/MergeTreeReadPool.cpp b/src/Storages/MergeTree/MergeTreeReadPool.cpp index ba8c2c6385f..2ab90189f9d 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.cpp +++ b/src/Storages/MergeTree/MergeTreeReadPool.cpp @@ -6,6 +6,7 @@ #include #include #include +#include namespace ProfileEvents @@ -433,8 +434,12 @@ MergeTreeReadTaskPtr MergeTreeReadPoolParallelReplicas::getTask(size_t thread) if (buffered_ranges.empty()) { - auto result = extension.callback(ParallelReadRequest{ - .replica_num = extension.number_of_current_replica, .min_number_of_marks = min_marks_for_concurrent_read * threads}); + auto result = extension.callback(ParallelReadRequest( + CoordinationMode::Default, + extension.number_of_current_replica, + min_marks_for_concurrent_read * threads, + /// For Default coordination mode we don't need to pass part names. + RangesInDataPartsDescription{})); if (!result || result->finish) { @@ -529,12 +534,12 @@ MarkRanges MergeTreeInOrderReadPoolParallelReplicas::getNewTask(RangesInDataPart if (no_more_tasks) return {}; - auto response = extension.callback(ParallelReadRequest{ - .mode = mode, - .replica_num = extension.number_of_current_replica, - .min_number_of_marks = min_marks_for_concurrent_read * request.size(), - .description = request, - }); + auto response = extension.callback(ParallelReadRequest( + mode, + extension.number_of_current_replica, + min_marks_for_concurrent_read * request.size(), + request + )); if (!response || response->description.empty() || response->finish) { diff --git a/src/Storages/MergeTree/MergeTreeReadPool.h b/src/Storages/MergeTree/MergeTreeReadPool.h index 21273904e00..68d5438cb3d 100644 --- a/src/Storages/MergeTree/MergeTreeReadPool.h +++ b/src/Storages/MergeTree/MergeTreeReadPool.h @@ -193,10 +193,11 @@ public: predict_block_size_bytes, column_names, virtual_column_names, prewhere_info, actions_settings, reader_settings, per_part_params); - extension.all_callback({ - .description = parts_ranges.getDescriptions(), - .replica_num = extension.number_of_current_replica - }); + extension.all_callback(InitialAllRangesAnnouncement( + CoordinationMode::Default, + parts_ranges.getDescriptions(), + extension.number_of_current_replica + )); } ~MergeTreeReadPoolParallelReplicas() override; @@ -253,10 +254,11 @@ public: for (const auto & part : parts_ranges) buffered_tasks.push_back({part.data_part->info, MarkRanges{}}); - extension.all_callback({ - .description = parts_ranges.getDescriptions(), - .replica_num = extension.number_of_current_replica - }); + extension.all_callback(InitialAllRangesAnnouncement( + mode, + parts_ranges.getDescriptions(), + extension.number_of_current_replica + )); } MarkRanges getNewTask(RangesInDataPartDescription description); diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index e27354f9d16..5a6d59bf0be 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp index 2814d13cff0..57cd91cc995 100644 --- a/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp +++ b/src/Storages/MergeTree/ParallelReplicasReadingCoordinator.cpp @@ -102,7 +102,6 @@ public: explicit DefaultCoordinator(size_t replicas_count_) : ParallelReplicasReadingCoordinator::ImplInterface(replicas_count_) - , announcements(replicas_count_) , reading_state(replicas_count_) { } @@ -119,7 +118,6 @@ public: PartitionToBlockRanges partitions; size_t sent_initial_requests{0}; - std::vector announcements; Parts all_parts_to_read; /// Contains only parts which we haven't started to read from diff --git a/src/Storages/MergeTree/PartitionPruner.cpp b/src/Storages/MergeTree/PartitionPruner.cpp index 35b2d5db3b5..97bb9f3b4d4 100644 --- a/src/Storages/MergeTree/PartitionPruner.cpp +++ b/src/Storages/MergeTree/PartitionPruner.cpp @@ -24,6 +24,13 @@ PartitionPruner::PartitionPruner(const StorageMetadataPtr & metadata, const Sele { } +PartitionPruner::PartitionPruner(const StorageMetadataPtr & metadata, ActionsDAGPtr filter_actions_dag, ContextPtr context, bool strict) + : partition_key(MergeTreePartition::adjustPartitionKey(metadata, context)) + , partition_condition(filter_actions_dag, context, partition_key.column_names, partition_key.expression, {}, true /* single_point */, strict) + , useless(strict ? partition_condition.anyUnknownOrAlwaysTrue() : partition_condition.alwaysUnknownOrTrue()) +{ +} + bool PartitionPruner::canBePruned(const IMergeTreeDataPart & part) { if (part.isEmpty()) diff --git a/src/Storages/MergeTree/PartitionPruner.h b/src/Storages/MergeTree/PartitionPruner.h index 3a986923321..7f1b74795c4 100644 --- a/src/Storages/MergeTree/PartitionPruner.h +++ b/src/Storages/MergeTree/PartitionPruner.h @@ -14,6 +14,7 @@ class PartitionPruner { public: PartitionPruner(const StorageMetadataPtr & metadata, const SelectQueryInfo & query_info, ContextPtr context, bool strict); + PartitionPruner(const StorageMetadataPtr & metadata, ActionsDAGPtr filter_actions_dag, ContextPtr context, bool strict); bool canBePruned(const IMergeTreeDataPart & part); diff --git a/src/Storages/MergeTree/RPNBuilder.cpp b/src/Storages/MergeTree/RPNBuilder.cpp index e49459d3d17..0b42111be45 100644 --- a/src/Storages/MergeTree/RPNBuilder.cpp +++ b/src/Storages/MergeTree/RPNBuilder.cpp @@ -9,6 +9,7 @@ #include #include +#include #include #include @@ -275,7 +276,7 @@ bool RPNBuilderTreeNode::tryGetConstant(Field & output_value, DataTypePtr & outp namespace { -ConstSetPtr tryGetSetFromDAGNode(const ActionsDAG::Node * dag_node) +FutureSetPtr tryGetSetFromDAGNode(const ActionsDAG::Node * dag_node) { if (!dag_node->column) return {}; @@ -285,28 +286,26 @@ ConstSetPtr tryGetSetFromDAGNode(const ActionsDAG::Node * dag_node) column = &column_const->getDataColumn(); if (const auto * column_set = typeid_cast(column)) - { - auto set = column_set->getData(); - - if (set && set->isCreated()) - return set; - } + return column_set->getData(); return {}; } } -ConstSetPtr RPNBuilderTreeNode::tryGetPreparedSet() const +FutureSetPtr RPNBuilderTreeNode::tryGetPreparedSet() const { const auto & prepared_sets = getTreeContext().getPreparedSets(); if (ast_node && prepared_sets) { - auto prepared_sets_with_same_hash = prepared_sets->getByTreeHash(ast_node->getTreeHash()); - for (auto & set : prepared_sets_with_same_hash) - if (set.isCreated()) - return set.get(); + auto key = ast_node->getTreeHash(); + const auto & sets = prepared_sets->getSetsFromTuple(); + auto it = sets.find(key); + if (it != sets.end() && !it->second.empty()) + return it->second.at(0); + + return prepared_sets->findSubquery(key); } else if (dag_node) { @@ -317,16 +316,16 @@ ConstSetPtr RPNBuilderTreeNode::tryGetPreparedSet() const return {}; } -ConstSetPtr RPNBuilderTreeNode::tryGetPreparedSet(const DataTypes & data_types) const +FutureSetPtr RPNBuilderTreeNode::tryGetPreparedSet(const DataTypes & data_types) const { const auto & prepared_sets = getTreeContext().getPreparedSets(); if (prepared_sets && ast_node) { if (ast_node->as() || ast_node->as()) - return prepared_sets->get(PreparedSetKey::forSubquery(*ast_node)); + return prepared_sets->findSubquery(ast_node->getTreeHash()); - return prepared_sets->get(PreparedSetKey::forLiteral(*ast_node, data_types)); + return prepared_sets->findTuple(ast_node->getTreeHash(), data_types); } else if (dag_node) { @@ -337,46 +336,59 @@ ConstSetPtr RPNBuilderTreeNode::tryGetPreparedSet(const DataTypes & data_types) return nullptr; } -ConstSetPtr RPNBuilderTreeNode::tryGetPreparedSet( +FutureSetPtr RPNBuilderTreeNode::tryGetPreparedSet( const std::vector & indexes_mapping, const DataTypes & data_types) const { const auto & prepared_sets = getTreeContext().getPreparedSets(); + /// We have `PreparedSetKey::forLiteral` but it is useless here as we don't have enough information + /// about types in left argument of the IN operator. Instead, we manually iterate through all the sets + /// and find the one for the right arg based on the AST structure (getTreeHash), after that we check + /// that the types it was prepared with are compatible with the types of the primary key. + auto types_match = [&indexes_mapping, &data_types](const DataTypes & set_types) + { + assert(indexes_mapping.size() == data_types.size()); + + for (size_t i = 0; i < indexes_mapping.size(); ++i) + { + if (indexes_mapping[i].tuple_index >= set_types.size()) + return false; + + auto lhs = removeNullable(recursiveRemoveLowCardinality(data_types[i])); + auto rhs = removeNullable(recursiveRemoveLowCardinality(set_types[indexes_mapping[i].tuple_index])); + + if (!lhs->equals(*rhs)) + return false; + } + + return true; + }; + if (prepared_sets && ast_node) { if (ast_node->as() || ast_node->as()) - return prepared_sets->get(PreparedSetKey::forSubquery(*ast_node)); - - /// We have `PreparedSetKey::forLiteral` but it is useless here as we don't have enough information - /// about types in left argument of the IN operator. Instead, we manually iterate through all the sets - /// and find the one for the right arg based on the AST structure (getTreeHash), after that we check - /// that the types it was prepared with are compatible with the types of the primary key. - auto types_match = [&indexes_mapping, &data_types](const SetPtr & candidate_set) - { - assert(indexes_mapping.size() == data_types.size()); - - for (size_t i = 0; i < indexes_mapping.size(); ++i) - { - if (!candidate_set->areTypesEqual(indexes_mapping[i].tuple_index, data_types[i])) - return false; - } - - return true; - }; + return prepared_sets->findSubquery(ast_node->getTreeHash()); auto tree_hash = ast_node->getTreeHash(); - for (const auto & set : prepared_sets->getByTreeHash(tree_hash)) - { - if (set.isCreated() && types_match(set.get())) - return set.get(); - } + const auto & sets = prepared_sets->getSetsFromTuple(); + auto it = sets.find(tree_hash); + if (it == sets.end()) + return nullptr; + + for (const auto & future_set : it->second) + if (types_match(future_set->getTypes())) + return future_set; } else { const auto * node_without_alias = getNodeWithoutAlias(dag_node); if (node_without_alias->column) - return tryGetSetFromDAGNode(node_without_alias); + { + auto future_set = tryGetSetFromDAGNode(node_without_alias); + if (types_match(future_set->getTypes())) + return future_set; + } } return nullptr; diff --git a/src/Storages/MergeTree/RPNBuilder.h b/src/Storages/MergeTree/RPNBuilder.h index 626eb288493..6f624d93cd6 100644 --- a/src/Storages/MergeTree/RPNBuilder.h +++ b/src/Storages/MergeTree/RPNBuilder.h @@ -109,13 +109,13 @@ public: bool tryGetConstant(Field & output_value, DataTypePtr & output_type) const; /// Try get prepared set from node - ConstSetPtr tryGetPreparedSet() const; + FutureSetPtr tryGetPreparedSet() const; /// Try get prepared set from node that match data types - ConstSetPtr tryGetPreparedSet(const DataTypes & data_types) const; + FutureSetPtr tryGetPreparedSet(const DataTypes & data_types) const; /// Try get prepared set from node that match indexes mapping and data types - ConstSetPtr tryGetPreparedSet( + FutureSetPtr tryGetPreparedSet( const std::vector & indexes_mapping, const DataTypes & data_types) const; diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 03ded2ef260..3ba3048b812 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -1530,7 +1530,7 @@ bool ReplicatedMergeTreeQueue::shouldExecuteLogEntry( Int64 ReplicatedMergeTreeQueue::getCurrentMutationVersion( - const String & partition_id, Int64 data_version, std::lock_guard & /* state_lock */) const + const String & partition_id, Int64 data_version) const { auto in_partition = mutations_by_partition.find(partition_id); if (in_partition == mutations_by_partition.end()) @@ -2108,24 +2108,19 @@ ReplicatedMergeTreeQueue::QueueLocks ReplicatedMergeTreeQueue::lockQueue() } LocalMergePredicate::LocalMergePredicate(ReplicatedMergeTreeQueue & queue_) - : queue(queue_) { + /// Use only information that can be quickly accessed locally without querying ZooKeeper + virtual_parts_ = &queue_.virtual_parts; + mutations_state_ = &queue_; + virtual_parts_mutex = &queue_.state_mutex; } -ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate( - ReplicatedMergeTreeQueue & queue_, zkutil::ZooKeeperPtr & zookeeper, std::optional && partition_ids_hint_) - : nested_pred(queue_) - , queue(queue_) - , partition_ids_hint(std::move(partition_ids_hint_)) - , prev_virtual_parts(queue.format_version) -{ - { - std::lock_guard lock(queue.state_mutex); - prev_virtual_parts = queue.virtual_parts; - } - /// Load current quorum status. - auto quorum_status_future = zookeeper->asyncTryGet(fs::path(queue.zookeeper_path) / "quorum" / "status"); +template +CommittingBlocks BaseMergePredicate::getCommittingBlocks( + zkutil::ZooKeeperPtr & zookeeper, const std::string & zookeeper_path, Poco::Logger * log_) +{ + CommittingBlocks committing_blocks; /// Load current inserts /// Hint avoids listing partitions that we don't really need. @@ -2133,14 +2128,14 @@ ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate( /// so without hint it can do a few thousands requests (if not using MultiRead). Strings partitions; if (!partition_ids_hint) - partitions = zookeeper->getChildren(fs::path(queue.zookeeper_path) / "block_numbers"); + partitions = zookeeper->getChildren(fs::path(zookeeper_path) / "block_numbers"); else std::copy(partition_ids_hint->begin(), partition_ids_hint->end(), std::back_inserter(partitions)); std::vector paths; paths.reserve(partitions.size()); for (const String & partition : partitions) - paths.push_back(fs::path(queue.zookeeper_path) / "block_numbers" / partition); + paths.push_back(fs::path(zookeeper_path) / "block_numbers" / partition); auto locks_children = zookeeper->tryGetChildren(paths); @@ -2153,22 +2148,40 @@ ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate( if (response.error != Coordination::Error::ZOK) { /// Probably a wrong hint was provided (it's ok if a user passed non-existing partition to OPTIMIZE) - LOG_WARNING(queue.log, "Partition id '{}' was provided as a hint, but there's not such partition in ZooKeeper", partitions[i]); + LOG_WARNING(log_, "Partition id '{}' was provided as a hint, but there's not such partition in ZooKeeper", partitions[i]); partition_ids_hint->erase(partitions[i]); continue; } - Strings partition_block_numbers = response.names; + Strings partition_block_numbers = locks_children[i].names; for (const String & entry : partition_block_numbers) { if (!startsWith(entry, "block-")) continue; + Int64 block_number = parse(entry.substr(strlen("block-"))); - String zk_path = fs::path(queue.zookeeper_path) / "block_numbers" / partitions[i] / entry; committing_blocks[partitions[i]].insert(block_number); } } + return committing_blocks; +} + +ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate( + ReplicatedMergeTreeQueue & queue_, zkutil::ZooKeeperPtr & zookeeper, std::optional && partition_ids_hint_) + : BaseMergePredicate(std::move(partition_ids_hint_)) + , queue(queue_) +{ + { + std::lock_guard lock(queue.state_mutex); + prev_virtual_parts = std::make_shared(queue.virtual_parts); + } + + /// Load current quorum status. + auto quorum_status_future = zookeeper->asyncTryGet(fs::path(queue.zookeeper_path) / "quorum" / "status"); + + committing_blocks = std::make_shared(getCommittingBlocks(zookeeper, queue.zookeeper_path, queue.log)); + merges_version = queue_.pullLogsToQueue(zookeeper, {}, ReplicatedMergeTreeQueue::MERGE_PREDICATE); { @@ -2179,7 +2192,8 @@ ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate( /// /// If pinned parts are fetched after logs are pulled then we can safely say that it contains all locks up to `merges_version`. String s = zookeeper->get(queue.zookeeper_path + "/pinned_part_uuids"); - pinned_part_uuids.fromString(s); + pinned_part_uuids = std::make_shared(); + pinned_part_uuids->fromString(s); } Coordination::GetResponse quorum_status_response = quorum_status_future.get(); @@ -2187,13 +2201,21 @@ ReplicatedMergeTreeMergePredicate::ReplicatedMergeTreeMergePredicate( { ReplicatedMergeTreeQuorumEntry quorum_status; quorum_status.fromString(quorum_status_response.data); - inprogress_quorum_part = quorum_status.part_name; + inprogress_quorum_part = std::make_shared(quorum_status.part_name); } - else - inprogress_quorum_part.clear(); + + /// Use all information about parts + prev_virtual_parts_ = prev_virtual_parts.get(); + virtual_parts_ = &queue.virtual_parts; + committing_blocks_ = committing_blocks.get(); + pinned_part_uuids_ = pinned_part_uuids.get(); + inprogress_quorum_part_ = inprogress_quorum_part.get(); + mutations_state_ = &queue; + virtual_parts_mutex = &queue.state_mutex; } -bool LocalMergePredicate::operator()( +template +bool BaseMergePredicate::operator()( const MergeTreeData::DataPartPtr & left, const MergeTreeData::DataPartPtr & right, const MergeTreeTransaction *, @@ -2205,20 +2227,8 @@ bool LocalMergePredicate::operator()( return canMergeSinglePart(right, out_reason); } -bool ReplicatedMergeTreeMergePredicate::operator()( - const MergeTreeData::DataPartPtr & left, - const MergeTreeData::DataPartPtr & right, - const MergeTreeTransaction *, - String * out_reason) const -{ - if (left) - return canMergeTwoParts(left, right, out_reason); - else - return canMergeSinglePart(right, out_reason); -} - - -bool ReplicatedMergeTreeMergePredicate::canMergeTwoParts( +template +bool BaseMergePredicate::canMergeTwoParts( const MergeTreeData::DataPartPtr & left, const MergeTreeData::DataPartPtr & right, String * out_reason) const @@ -2263,21 +2273,21 @@ bool ReplicatedMergeTreeMergePredicate::canMergeTwoParts( for (const MergeTreeData::DataPartPtr & part : {left, right}) { - if (pinned_part_uuids.part_uuids.contains(part->uuid)) + if (pinned_part_uuids_ && pinned_part_uuids_->part_uuids.contains(part->uuid)) { if (out_reason) *out_reason = "Part " + part->name + " has uuid " + toString(part->uuid) + " which is currently pinned"; return false; } - if (part->name == inprogress_quorum_part) + if (inprogress_quorum_part_ && part->name == *inprogress_quorum_part_) { if (out_reason) *out_reason = "Quorum insert for part " + part->name + " is currently in progress"; return false; } - if (prev_virtual_parts.getContainingPart(part->info).empty()) + if (prev_virtual_parts_ && prev_virtual_parts_->getContainingPart(part->info).empty()) { if (out_reason) *out_reason = "Entry for part " + part->name + " hasn't been read from the replication log yet"; @@ -2290,7 +2300,7 @@ bool ReplicatedMergeTreeMergePredicate::canMergeTwoParts( if (left_max_block > right_min_block) std::swap(left_max_block, right_min_block); - if (left_max_block + 1 < right_min_block) + if (committing_blocks_ && left_max_block + 1 < right_min_block) { if (partition_ids_hint && !partition_ids_hint->contains(left->info.partition_id)) { @@ -2299,8 +2309,8 @@ bool ReplicatedMergeTreeMergePredicate::canMergeTwoParts( return false; } - auto committing_blocks_in_partition = committing_blocks.find(left->info.partition_id); - if (committing_blocks_in_partition != committing_blocks.end()) + auto committing_blocks_in_partition = committing_blocks_->find(left->info.partition_id); + if (committing_blocks_in_partition != committing_blocks_->end()) { const std::set & block_numbers = committing_blocks_in_partition->second; @@ -2316,109 +2326,107 @@ bool ReplicatedMergeTreeMergePredicate::canMergeTwoParts( } } - return nested_pred.canMergeTwoParts(left, right, out_reason); -} + std::unique_lock lock; + if (virtual_parts_mutex) + lock = std::unique_lock(*virtual_parts_mutex); -bool LocalMergePredicate::canMergeTwoParts( - const MergeTreeData::DataPartPtr & left, - const MergeTreeData::DataPartPtr & right, - String * out_reason) const -{ - Int64 left_max_block = left->info.max_block; - Int64 right_min_block = right->info.min_block; - - std::lock_guard lock(queue.state_mutex); - - for (const MergeTreeData::DataPartPtr & part : {left, right}) + if (virtual_parts_) { - /// We look for containing parts in queue.virtual_parts (and not in prev_virtual_parts) because queue.virtual_parts is newer - /// and it is guaranteed that it will contain all merges assigned before this object is constructed. - String containing_part = queue.virtual_parts.getContainingPart(part->info); - if (containing_part != part->name) + for (const MergeTreeData::DataPartPtr & part : {left, right}) { - if (out_reason) - *out_reason = "Part " + part->name + " has already been assigned a merge into " + containing_part; - return false; + /// We look for containing parts in queue.virtual_parts (and not in prev_virtual_parts) because queue.virtual_parts is newer + /// and it is guaranteed that it will contain all merges assigned before this object is constructed. + String containing_part = virtual_parts_->getContainingPart(part->info); + if (containing_part != part->name) + { + if (out_reason) + *out_reason = "Part " + part->name + " has already been assigned a merge into " + containing_part; + return false; + } + } + + if (left_max_block + 1 < right_min_block) + { + /// Fake part which will appear as merge result + MergeTreePartInfo gap_part_info( + left->info.partition_id, left_max_block + 1, right_min_block - 1, + MergeTreePartInfo::MAX_LEVEL, MergeTreePartInfo::MAX_BLOCK_NUMBER); + + /// We don't select parts if any smaller part covered by our merge must exist after + /// processing replication log up to log_pointer. + Strings covered = virtual_parts_->getPartsCoveredBy(gap_part_info); + if (!covered.empty()) + { + if (out_reason) + *out_reason = "There are " + toString(covered.size()) + " parts (from " + covered.front() + + " to " + covered.back() + ") that are still not present or being processed by " + + " other background process on this replica between " + left->name + " and " + right->name; + return false; + } } } - if (left_max_block + 1 < right_min_block) + if (mutations_state_) { - /// Fake part which will appear as merge result - MergeTreePartInfo gap_part_info( - left->info.partition_id, left_max_block + 1, right_min_block - 1, - MergeTreePartInfo::MAX_LEVEL, MergeTreePartInfo::MAX_BLOCK_NUMBER); + Int64 left_mutation_ver = mutations_state_->getCurrentMutationVersion( + left->info.partition_id, left->info.getDataVersion()); - /// We don't select parts if any smaller part covered by our merge must exist after - /// processing replication log up to log_pointer. - Strings covered = queue.virtual_parts.getPartsCoveredBy(gap_part_info); - if (!covered.empty()) + Int64 right_mutation_ver = mutations_state_->getCurrentMutationVersion( + left->info.partition_id, right->info.getDataVersion()); + + if (left_mutation_ver != right_mutation_ver) { if (out_reason) - *out_reason = "There are " + toString(covered.size()) + " parts (from " + covered.front() - + " to " + covered.back() + ") that are still not present or being processed by " - + " other background process on this replica between " + left->name + " and " + right->name; + *out_reason = "Current mutation versions of parts " + left->name + " and " + right->name + " differ: " + + toString(left_mutation_ver) + " and " + toString(right_mutation_ver) + " respectively"; return false; } } - Int64 left_mutation_ver = queue.getCurrentMutationVersion( - left->info.partition_id, left->info.getDataVersion(), lock); - - Int64 right_mutation_ver = queue.getCurrentMutationVersion( - left->info.partition_id, right->info.getDataVersion(), lock); - - if (left_mutation_ver != right_mutation_ver) - { - if (out_reason) - *out_reason = "Current mutation versions of parts " + left->name + " and " + right->name + " differ: " - + toString(left_mutation_ver) + " and " + toString(right_mutation_ver) + " respectively"; - return false; - } - return MergeTreeData::partsContainSameProjections(left, right); } -bool ReplicatedMergeTreeMergePredicate::canMergeSinglePart( +template +bool BaseMergePredicate::canMergeSinglePart( const MergeTreeData::DataPartPtr & part, String * out_reason) const { - if (pinned_part_uuids.part_uuids.contains(part->uuid)) + if (pinned_part_uuids_ && pinned_part_uuids_->part_uuids.contains(part->uuid)) { if (out_reason) *out_reason = fmt::format("Part {} has uuid {} which is currently pinned", part->name, part->uuid); return false; } - if (part->name == inprogress_quorum_part) + if (inprogress_quorum_part_ && part->name == *inprogress_quorum_part_) { if (out_reason) *out_reason = fmt::format("Quorum insert for part {} is currently in progress", part->name); return false; } - if (prev_virtual_parts.getContainingPart(part->info).empty()) + if (prev_virtual_parts_ && prev_virtual_parts_->getContainingPart(part->info).empty()) { if (out_reason) *out_reason = fmt::format("Entry for part {} hasn't been read from the replication log yet", part->name); return false; } - return nested_pred.canMergeSinglePart(part, out_reason); -} + std::unique_lock lock; + if (virtual_parts_mutex) + lock = std::unique_lock(*virtual_parts_mutex); -bool LocalMergePredicate::canMergeSinglePart(const MergeTreeData::DataPartPtr & part, String * out_reason) const -{ - std::lock_guard lock(queue.state_mutex); - - /// We look for containing parts in queue.virtual_parts (and not in prev_virtual_parts) because queue.virtual_parts is newer - /// and it is guaranteed that it will contain all merges assigned before this object is constructed. - String containing_part = queue.virtual_parts.getContainingPart(part->info); - if (containing_part != part->name) + if (virtual_parts_) { - if (out_reason) - *out_reason = fmt::format("Part {} has already been assigned a merge into {}", part->name, containing_part); - return false; + /// We look for containing parts in queue.virtual_parts (and not in prev_virtual_parts) because queue.virtual_parts is newer + /// and it is guaranteed that it will contain all merges assigned before this object is constructed. + String containing_part = virtual_parts_->getContainingPart(part->info); + if (containing_part != part->name) + { + if (out_reason) + *out_reason = fmt::format("Part {} has already been assigned a merge into {}", part->name, containing_part); + return false; + } } return true; @@ -2459,7 +2467,7 @@ std::optional> ReplicatedMergeTreeMergePredicate::getDesir /// We cannot mutate part if it's being inserted with quorum and it's not /// already reached. - if (part->name == inprogress_quorum_part) + if (inprogress_quorum_part && part->name == *inprogress_quorum_part) return {}; std::lock_guard lock(queue.state_mutex); @@ -2474,7 +2482,7 @@ std::optional> ReplicatedMergeTreeMergePredicate::getDesir UInt64 mutations_limit = queue.storage.getSettings()->replicated_max_mutations_in_one_entry; UInt64 mutations_count = 0; - Int64 current_version = queue.getCurrentMutationVersion(part->info.partition_id, part->info.getDataVersion(), lock); + Int64 current_version = queue.getCurrentMutationVersion(part->info.partition_id, part->info.getDataVersion()); Int64 max_version = in_partition->second.begin()->first; int alter_version = -1; @@ -2548,8 +2556,8 @@ bool ReplicatedMergeTreeMergePredicate::isMutationFinished(const std::string & z if (partition_ids_hint && !partition_ids_hint->contains(partition_id)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Partition id {} was not provided as hint, it's a bug", partition_id); - auto partition_it = committing_blocks.find(partition_id); - if (partition_it != committing_blocks.end()) + auto partition_it = committing_blocks->find(partition_id); + if (partition_it != committing_blocks->end()) { size_t blocks_count = std::distance( partition_it->second.begin(), partition_it->second.lower_bound(block_num)); @@ -2671,4 +2679,6 @@ void ReplicatedMergeTreeQueue::removeCurrentPartsFromMutations() removeCoveredPartsFromMutations(part_name, /*remove_part = */ false, /*remove_covered_parts = */ true); } +template class BaseMergePredicate; + } diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h index 79572e13963..f205526a660 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.h @@ -34,6 +34,7 @@ private: friend class CurrentlyExecuting; friend class LocalMergePredicate; friend class ReplicatedMergeTreeMergePredicate; + template friend class BaseMergePredicate; friend class MergeFromLogEntryTask; friend class ReplicatedMergeMutateTaskBase; @@ -212,7 +213,7 @@ private: /// with getDataVersion() == data_version. (Either this mutation was already applied or the part /// was created after the mutation). /// If there is no such mutation or it has already been executed and deleted, return 0. - Int64 getCurrentMutationVersion(const String & partition_id, Int64 data_version, std::lock_guard & /* state_lock */) const; + Int64 getCurrentMutationVersion(const String & partition_id, Int64 data_version) const; /** Check that part isn't in currently generating parts and isn't covered by them. * Should be called under state_mutex. @@ -491,33 +492,14 @@ public: void createLogEntriesToFetchBrokenParts(); }; -/// Lightweight version of ReplicatedMergeTreeMergePredicate that do not make any ZooKeeper requests, -/// but may return false-positive results. Checks only a subset of required conditions. -class LocalMergePredicate +using CommittingBlocks = std::unordered_map>; + +template +class BaseMergePredicate { public: - LocalMergePredicate(ReplicatedMergeTreeQueue & queue_); - - bool operator()(const MergeTreeData::DataPartPtr & left, - const MergeTreeData::DataPartPtr & right, - const MergeTreeTransaction * txn, - String * out_reason = nullptr) const; - - bool canMergeTwoParts(const MergeTreeData::DataPartPtr & left, - const MergeTreeData::DataPartPtr & right, - String * out_reason = nullptr) const; - - bool canMergeSinglePart(const MergeTreeData::DataPartPtr & part, String * out_reason) const; - -private: - const ReplicatedMergeTreeQueue & queue; -}; - -class ReplicatedMergeTreeMergePredicate -{ -public: - ReplicatedMergeTreeMergePredicate(ReplicatedMergeTreeQueue & queue_, zkutil::ZooKeeperPtr & zookeeper, - std::optional && partition_ids_hint_); + BaseMergePredicate() = default; + BaseMergePredicate(std::optional && partition_ids_hint_) : partition_ids_hint(std::move(partition_ids_hint_)) {} /// Depending on the existence of left part checks a merge predicate for two parts or for single part. bool operator()(const MergeTreeData::DataPartPtr & left, @@ -537,6 +519,46 @@ public: /// This predicate is checked for the first part of each range. bool canMergeSinglePart(const MergeTreeData::DataPartPtr & part, String * out_reason) const; + CommittingBlocks getCommittingBlocks(zkutil::ZooKeeperPtr & zookeeper, const std::string & zookeeper_path, Poco::Logger * log_); + +protected: + /// A list of partitions that can be used in the merge predicate + std::optional partition_ids_hint; + + /// A snapshot of active parts that would appear if the replica executes all log entries in its queue. + const VirtualPartsT * prev_virtual_parts_ = nullptr; + const VirtualPartsT * virtual_parts_ = nullptr; + + /// partition ID -> block numbers of the inserts and mutations that are about to commit + /// (loaded at some later time than prev_virtual_parts). + const CommittingBlocks * committing_blocks_ = nullptr; + + /// List of UUIDs for parts that have their identity "pinned". + const PinnedPartUUIDs * pinned_part_uuids_ = nullptr; + + /// Quorum state taken at some later time than prev_virtual_parts. + const String * inprogress_quorum_part_ = nullptr; + + /// An object that provides current mutation version for a part + const MutationsStateT * mutations_state_ = nullptr; + + std::mutex * virtual_parts_mutex = nullptr; +}; + +/// Lightweight version of ReplicatedMergeTreeMergePredicate that do not make any ZooKeeper requests, +/// but may return false-positive results. Checks only a subset of required conditions. +class LocalMergePredicate : public BaseMergePredicate +{ +public: + LocalMergePredicate(ReplicatedMergeTreeQueue & queue_); +}; + +class ReplicatedMergeTreeMergePredicate : public BaseMergePredicate +{ +public: + ReplicatedMergeTreeMergePredicate(ReplicatedMergeTreeQueue & queue_, zkutil::ZooKeeperPtr & zookeeper, + std::optional && partition_ids_hint_); + /// Returns true if part is needed for some REPLACE_RANGE entry. /// We should not drop part in this case, because replication queue may stuck without that part. bool partParticipatesInReplaceRange(const MergeTreeData::DataPartPtr & part, String * out_reason) const; @@ -561,28 +583,17 @@ public: String getCoveringVirtualPart(const String & part_name) const; private: - LocalMergePredicate nested_pred; - const ReplicatedMergeTreeQueue & queue; - std::optional partition_ids_hint; - - /// A snapshot of active parts that would appear if the replica executes all log entries in its queue. - ActiveDataPartSet prev_virtual_parts; - /// partition ID -> block numbers of the inserts and mutations that are about to commit - /// (loaded at some later time than prev_virtual_parts). - std::unordered_map> committing_blocks; - - /// List of UUIDs for parts that have their identity "pinned". - PinnedPartUUIDs pinned_part_uuids; - - /// Quorum state taken at some later time than prev_virtual_parts. - String inprogress_quorum_part; + /// We copy a merge predicate when we cast it to AllowedMergingPredicate, let's keep the pointers valid + std::shared_ptr prev_virtual_parts; + std::shared_ptr committing_blocks; + std::shared_ptr pinned_part_uuids; + std::shared_ptr inprogress_quorum_part; int32_t merges_version = -1; }; - /** Convert a number to a string in the format of the suffixes of auto-incremental nodes in ZooKeeper. * Negative numbers are also supported - for them the name of the node looks somewhat silly * and does not match any auto-incremented node in ZK. diff --git a/src/Storages/MergeTree/RequestResponse.cpp b/src/Storages/MergeTree/RequestResponse.cpp index 05930d5a4c4..2ce0e20dcd2 100644 --- a/src/Storages/MergeTree/RequestResponse.cpp +++ b/src/Storages/MergeTree/RequestResponse.cpp @@ -51,7 +51,7 @@ String ParallelReadRequest::describe() const return result; } -void ParallelReadRequest::deserialize(ReadBuffer & in) +ParallelReadRequest ParallelReadRequest::deserialize(ReadBuffer & in) { UInt64 version; readIntBinary(version, in); @@ -60,12 +60,24 @@ void ParallelReadRequest::deserialize(ReadBuffer & in) "from replicas differ. Got: {}, supported version: {}", version, DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION); + CoordinationMode mode; + size_t replica_num; + size_t min_number_of_marks; + RangesInDataPartsDescription description; + uint8_t mode_candidate; readIntBinary(mode_candidate, in); mode = validateAndGet(mode_candidate); readIntBinary(replica_num, in); readIntBinary(min_number_of_marks, in); description.deserialize(in); + + return ParallelReadRequest( + mode, + replica_num, + min_number_of_marks, + std::move(description) + ); } void ParallelReadRequest::merge(ParallelReadRequest & other) @@ -125,7 +137,7 @@ String InitialAllRangesAnnouncement::describe() return result; } -void InitialAllRangesAnnouncement::deserialize(ReadBuffer & in) +InitialAllRangesAnnouncement InitialAllRangesAnnouncement::deserialize(ReadBuffer & in) { UInt64 version; readIntBinary(version, in); @@ -134,11 +146,21 @@ void InitialAllRangesAnnouncement::deserialize(ReadBuffer & in) "from replicas differ. Got: {}, supported version: {}", version, DBMS_PARALLEL_REPLICAS_PROTOCOL_VERSION); + CoordinationMode mode; + RangesInDataPartsDescription description; + size_t replica_num; + uint8_t mode_candidate; readIntBinary(mode_candidate, in); mode = validateAndGet(mode_candidate); description.deserialize(in); readIntBinary(replica_num, in); + + return InitialAllRangesAnnouncement { + mode, + description, + replica_num + }; } } diff --git a/src/Storages/MergeTree/RequestResponse.h b/src/Storages/MergeTree/RequestResponse.h index 7e5563c0553..3a5bfde6c20 100644 --- a/src/Storages/MergeTree/RequestResponse.h +++ b/src/Storages/MergeTree/RequestResponse.h @@ -40,21 +40,40 @@ struct PartBlockRange } }; +/// ParallelReadRequest is used by remote replicas during parallel read +/// to signal an initiator that they need more marks to read. struct ParallelReadRequest { + /// No default constructor, you must initialize all fields at once. + + ParallelReadRequest( + CoordinationMode mode_, + size_t replica_num_, + size_t min_number_of_marks_, + RangesInDataPartsDescription description_) + : mode(mode_) + , replica_num(replica_num_) + , min_number_of_marks(min_number_of_marks_) + , description(std::move(description_)) + {} + CoordinationMode mode; size_t replica_num; size_t min_number_of_marks; - - /// Extension for ordered mode + /// Extension for Ordered (InOrder or ReverseOrder) mode + /// Contains only data part names without mark ranges. RangesInDataPartsDescription description; void serialize(WriteBuffer & out) const; String describe() const; - void deserialize(ReadBuffer & in); + static ParallelReadRequest deserialize(ReadBuffer & in); void merge(ParallelReadRequest & other); }; +/// ParallelReadResponse is used by an initiator to tell +/// remote replicas about what to read during parallel reading. +/// Additionally contains information whether there are more available +/// marks to read (whether it is the last packet or not). struct ParallelReadResponse { bool finish{false}; @@ -66,15 +85,30 @@ struct ParallelReadResponse }; +/// The set of parts (their names) along with ranges to read which is sent back +/// to the initiator by remote replicas during parallel reading. +/// Additionally contains an identifier (replica_num) plus +/// the reading algorithm chosen (Default, InOrder or ReverseOrder). struct InitialAllRangesAnnouncement { + /// No default constructor, you must initialize all fields at once. + + InitialAllRangesAnnouncement( + CoordinationMode mode_, + RangesInDataPartsDescription description_, + size_t replica_num_) + : mode(mode_) + , description(description_) + , replica_num(replica_num_) + {} + CoordinationMode mode; RangesInDataPartsDescription description; size_t replica_num; void serialize(WriteBuffer & out) const; String describe(); - void deserialize(ReadBuffer & in); + static InitialAllRangesAnnouncement deserialize(ReadBuffer & in); }; diff --git a/src/Storages/SelectQueryInfo.cpp b/src/Storages/SelectQueryInfo.cpp new file mode 100644 index 00000000000..665da7fee70 --- /dev/null +++ b/src/Storages/SelectQueryInfo.cpp @@ -0,0 +1,16 @@ +#include +#include + +namespace DB +{ + +bool SelectQueryInfo::isFinal() const +{ + if (table_expression_modifiers) + return table_expression_modifiers->hasFinal(); + + const auto & select = query->as(); + return select.final(); +} + +} diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index b3dfd44b2ad..8fbc64b7a24 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -265,5 +265,7 @@ struct SelectQueryInfo { return input_order_info ? input_order_info : (projection ? projection->input_order_info : nullptr); } + + bool isFinal() const; }; } diff --git a/src/Storages/StorageAzureBlob.cpp b/src/Storages/StorageAzureBlob.cpp index 133dbb6740f..0880d058cb5 100644 --- a/src/Storages/StorageAzureBlob.cpp +++ b/src/Storages/StorageAzureBlob.cpp @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -624,19 +625,19 @@ Pipe StorageAzureBlob::read( requested_virtual_columns.push_back(virtual_column); } - std::shared_ptr iterator_wrapper; + std::shared_ptr iterator_wrapper; if (configuration.withGlobs()) { /// Iterate through disclosed globs and make a source for each file - iterator_wrapper = std::make_shared( - object_storage.get(), configuration.container, std::nullopt, - configuration.blob_path, query_info.query, virtual_block, local_context, nullptr); + iterator_wrapper = std::make_shared( + object_storage.get(), configuration.container, configuration.blob_path, + query_info.query, virtual_block, local_context, nullptr); } else { - iterator_wrapper = std::make_shared( + iterator_wrapper = std::make_shared( object_storage.get(), configuration.container, configuration.blobs_paths, - std::nullopt, query_info.query, virtual_block, local_context, nullptr); + query_info.query, virtual_block, local_context, nullptr); } ColumnsDescription columns_description; @@ -799,202 +800,129 @@ static void addPathToVirtualColumns(Block & block, const String & path, size_t i block.getByName("_idx").column->assumeMutableRef().insert(idx); } -StorageAzureBlobSource::Iterator::Iterator( +StorageAzureBlobSource::GlobIterator::GlobIterator( AzureObjectStorage * object_storage_, const std::string & container_, - std::optional keys_, - std::optional blob_path_with_globs_, + String blob_path_with_globs_, ASTPtr query_, const Block & virtual_header_, ContextPtr context_, RelativePathsWithMetadata * outer_blobs_) - : WithContext(context_) + : IIterator(context_) , object_storage(object_storage_) , container(container_) - , keys(keys_) , blob_path_with_globs(blob_path_with_globs_) , query(query_) , virtual_header(virtual_header_) , outer_blobs(outer_blobs_) { - if (keys.has_value() && blob_path_with_globs.has_value()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot specify keys and glob simultaneously it's a bug"); - if (!keys.has_value() && !blob_path_with_globs.has_value()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Both keys and glob mask are not specified"); + const String key_prefix = blob_path_with_globs.substr(0, blob_path_with_globs.find_first_of("*?{")); - if (keys) + /// We don't have to list bucket, because there is no asterisks. + if (key_prefix.size() == blob_path_with_globs.size()) { - Strings all_keys = *keys; - - blobs_with_metadata.emplace(); - /// Create a virtual block with one row to construct filter - if (query && virtual_header && !all_keys.empty()) - { - /// Append "idx" column as the filter result - virtual_header.insert({ColumnUInt64::create(), std::make_shared(), "_idx"}); - - auto block = virtual_header.cloneEmpty(); - addPathToVirtualColumns(block, fs::path(container) / all_keys.front(), 0); - - VirtualColumnUtils::prepareFilterBlockWithQuery(query, getContext(), block, filter_ast); - - if (filter_ast) - { - block = virtual_header.cloneEmpty(); - for (size_t i = 0; i < all_keys.size(); ++i) - addPathToVirtualColumns(block, fs::path(container) / all_keys[i], i); - - VirtualColumnUtils::filterBlockWithQuery(query, block, getContext(), filter_ast); - const auto & idxs = typeid_cast(*block.getByName("_idx").column); - - Strings filtered_keys; - filtered_keys.reserve(block.rows()); - for (UInt64 idx : idxs.getData()) - filtered_keys.emplace_back(std::move(all_keys[idx])); - - all_keys = std::move(filtered_keys); - } - } - - for (auto && key : all_keys) - { - ObjectMetadata object_metadata = object_storage->getObjectMetadata(key); - total_size += object_metadata.size_bytes; - blobs_with_metadata->emplace_back(RelativePathWithMetadata{key, object_metadata}); - if (outer_blobs) - outer_blobs->emplace_back(blobs_with_metadata->back()); - } - } - else - { - const String key_prefix = blob_path_with_globs->substr(0, blob_path_with_globs->find_first_of("*?{")); - - /// We don't have to list bucket, because there is no asterisks. - if (key_prefix.size() == blob_path_with_globs->size()) - { - ObjectMetadata object_metadata = object_storage->getObjectMetadata(*blob_path_with_globs); - blobs_with_metadata->emplace_back(*blob_path_with_globs, object_metadata); - if (outer_blobs) - outer_blobs->emplace_back(blobs_with_metadata->back()); - return; - } - - object_storage_iterator = object_storage->iterate(key_prefix); - - matcher = std::make_unique(makeRegexpPatternFromGlobs(*blob_path_with_globs)); - - if (!matcher->ok()) - throw Exception(ErrorCodes::CANNOT_COMPILE_REGEXP, - "Cannot compile regex from glob ({}): {}", *blob_path_with_globs, matcher->error()); - - recursive = *blob_path_with_globs == "/**" ? true : false; + ObjectMetadata object_metadata = object_storage->getObjectMetadata(blob_path_with_globs); + blobs_with_metadata.emplace_back(blob_path_with_globs, object_metadata); + if (outer_blobs) + outer_blobs->emplace_back(blobs_with_metadata.back()); + return; } + object_storage_iterator = object_storage->iterate(key_prefix); + + matcher = std::make_unique(makeRegexpPatternFromGlobs(blob_path_with_globs)); + + if (!matcher->ok()) + throw Exception( + ErrorCodes::CANNOT_COMPILE_REGEXP, "Cannot compile regex from glob ({}): {}", blob_path_with_globs, matcher->error()); + + recursive = blob_path_with_globs == "/**" ? true : false; } -RelativePathWithMetadata StorageAzureBlobSource::Iterator::next() +RelativePathWithMetadata StorageAzureBlobSource::GlobIterator::next() { + std::lock_guard lock(next_mutex); + if (is_finished) return {}; - if (keys) + bool need_new_batch = blobs_with_metadata.empty() || index >= blobs_with_metadata.size(); + + if (need_new_batch) { - size_t current_index = index.fetch_add(1, std::memory_order_relaxed); - if (current_index >= blobs_with_metadata->size()) + RelativePathsWithMetadata new_batch; + while (new_batch.empty()) { - is_finished = true; - return {}; - } - - return (*blobs_with_metadata)[current_index]; - } - else - { - bool need_new_batch = false; - { - std::lock_guard lock(next_mutex); - need_new_batch = !blobs_with_metadata || index >= blobs_with_metadata->size(); - } - - if (need_new_batch) - { - RelativePathsWithMetadata new_batch; - while (new_batch.empty()) + auto result = object_storage_iterator->getCurrrentBatchAndScheduleNext(); + if (result.has_value()) { - if (object_storage_iterator->isValid()) - { - new_batch = object_storage_iterator->currentBatch(); - object_storage_iterator->nextBatch(); - } - else - { - is_finished = true; - return {}; - } - - for (auto it = new_batch.begin(); it != new_batch.end();) - { - if (!recursive && !re2::RE2::FullMatch(it->relative_path, *matcher)) - it = new_batch.erase(it); - else - ++it; - } - } - - index.store(0, std::memory_order_relaxed); - if (!is_initialized) - { - createFilterAST(new_batch.front().relative_path); - is_initialized = true; - } - - if (filter_ast) - { - auto block = virtual_header.cloneEmpty(); - for (size_t i = 0; i < new_batch.size(); ++i) - addPathToVirtualColumns(block, fs::path(container) / new_batch[i].relative_path, i); - - VirtualColumnUtils::filterBlockWithQuery(query, block, getContext(), filter_ast); - const auto & idxs = typeid_cast(*block.getByName("_idx").column); - - std::lock_guard lock(next_mutex); - blob_path_with_globs.reset(); - blob_path_with_globs.emplace(); - for (UInt64 idx : idxs.getData()) - { - total_size.fetch_add(new_batch[idx].metadata.size_bytes, std::memory_order_relaxed); - blobs_with_metadata->emplace_back(std::move(new_batch[idx])); - if (outer_blobs) - outer_blobs->emplace_back(blobs_with_metadata->back()); - } + new_batch = result.value(); } else { - if (outer_blobs) - outer_blobs->insert(outer_blobs->end(), new_batch.begin(), new_batch.end()); + is_finished = true; + return {}; + } - std::lock_guard lock(next_mutex); - blobs_with_metadata = std::move(new_batch); - for (const auto & [_, info] : *blobs_with_metadata) - total_size.fetch_add(info.size_bytes, std::memory_order_relaxed); + for (auto it = new_batch.begin(); it != new_batch.end();) + { + if (!recursive && !re2::RE2::FullMatch(it->relative_path, *matcher)) + it = new_batch.erase(it); + else + ++it; } } - size_t current_index = index.fetch_add(1, std::memory_order_relaxed); + index = 0; + if (!is_initialized) + { + createFilterAST(new_batch.front().relative_path); + is_initialized = true; + } - std::lock_guard lock(next_mutex); - return (*blobs_with_metadata)[current_index]; + if (filter_ast) + { + auto block = virtual_header.cloneEmpty(); + for (size_t i = 0; i < new_batch.size(); ++i) + addPathToVirtualColumns(block, fs::path(container) / new_batch[i].relative_path, i); + + VirtualColumnUtils::filterBlockWithQuery(query, block, getContext(), filter_ast); + const auto & idxs = typeid_cast(*block.getByName("_idx").column); + + blobs_with_metadata.clear(); + for (UInt64 idx : idxs.getData()) + { + total_size.fetch_add(new_batch[idx].metadata.size_bytes, std::memory_order_relaxed); + blobs_with_metadata.emplace_back(std::move(new_batch[idx])); + if (outer_blobs) + outer_blobs->emplace_back(blobs_with_metadata.back()); + } + } + else + { + if (outer_blobs) + outer_blobs->insert(outer_blobs->end(), new_batch.begin(), new_batch.end()); + + blobs_with_metadata = std::move(new_batch); + for (const auto & [_, info] : blobs_with_metadata) + total_size.fetch_add(info.size_bytes, std::memory_order_relaxed); + } } + + size_t current_index = index++; + if (current_index >= blobs_with_metadata.size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Index out of bound for blob metadata"); + return blobs_with_metadata[current_index]; } -size_t StorageAzureBlobSource::Iterator::getTotalSize() const +size_t StorageAzureBlobSource::GlobIterator::getTotalSize() const { return total_size.load(std::memory_order_relaxed); } -void StorageAzureBlobSource::Iterator::createFilterAST(const String & any_key) +void StorageAzureBlobSource::GlobIterator::createFilterAST(const String & any_key) { if (!query || !virtual_header) return; @@ -1009,6 +937,78 @@ void StorageAzureBlobSource::Iterator::createFilterAST(const String & any_key) } +StorageAzureBlobSource::KeysIterator::KeysIterator( + AzureObjectStorage * object_storage_, + const std::string & container_, + Strings keys_, + ASTPtr query_, + const Block & virtual_header_, + ContextPtr context_, + RelativePathsWithMetadata * outer_blobs_) + : IIterator(context_) + , object_storage(object_storage_) + , container(container_) + , query(query_) + , virtual_header(virtual_header_) + , outer_blobs(outer_blobs_) +{ + Strings all_keys = keys_; + + /// Create a virtual block with one row to construct filter + if (query && virtual_header && !all_keys.empty()) + { + /// Append "idx" column as the filter result + virtual_header.insert({ColumnUInt64::create(), std::make_shared(), "_idx"}); + + auto block = virtual_header.cloneEmpty(); + addPathToVirtualColumns(block, fs::path(container) / all_keys.front(), 0); + + VirtualColumnUtils::prepareFilterBlockWithQuery(query, getContext(), block, filter_ast); + + if (filter_ast) + { + block = virtual_header.cloneEmpty(); + for (size_t i = 0; i < all_keys.size(); ++i) + addPathToVirtualColumns(block, fs::path(container) / all_keys[i], i); + + VirtualColumnUtils::filterBlockWithQuery(query, block, getContext(), filter_ast); + const auto & idxs = typeid_cast(*block.getByName("_idx").column); + + Strings filtered_keys; + filtered_keys.reserve(block.rows()); + for (UInt64 idx : idxs.getData()) + filtered_keys.emplace_back(std::move(all_keys[idx])); + + all_keys = std::move(filtered_keys); + } + } + + for (auto && key : all_keys) + { + ObjectMetadata object_metadata = object_storage->getObjectMetadata(key); + total_size += object_metadata.size_bytes; + keys.emplace_back(RelativePathWithMetadata{key, object_metadata}); + } + + if (outer_blobs) + *outer_blobs = keys; +} + +RelativePathWithMetadata StorageAzureBlobSource::KeysIterator::next() +{ + size_t current_index = index.fetch_add(1, std::memory_order_relaxed); + if (current_index >= keys.size()) + return {}; + + return keys[current_index]; +} + +size_t StorageAzureBlobSource::KeysIterator::getTotalSize() const +{ + return total_size.load(std::memory_order_relaxed); +} + + Chunk StorageAzureBlobSource::generate() { while (true) @@ -1095,7 +1095,7 @@ StorageAzureBlobSource::StorageAzureBlobSource( String compression_hint_, AzureObjectStorage * object_storage_, const String & container_, - std::shared_ptr file_iterator_) + std::shared_ptr file_iterator_) :ISource(getHeader(sample_block_, requested_virtual_columns_)) , WithContext(context_) , requested_virtual_columns(requested_virtual_columns_) @@ -1196,18 +1196,16 @@ ColumnsDescription StorageAzureBlob::getTableStructureFromData( ContextPtr ctx) { RelativePathsWithMetadata read_keys; - std::shared_ptr file_iterator; + std::shared_ptr file_iterator; if (configuration.withGlobs()) { - file_iterator = std::make_shared( - object_storage, configuration.container, std::nullopt, - configuration.blob_path, nullptr, Block{}, ctx, &read_keys); + file_iterator = std::make_shared( + object_storage, configuration.container, configuration.blob_path, nullptr, Block{}, ctx, &read_keys); } else { - file_iterator = std::make_shared( - object_storage, configuration.container, configuration.blobs_paths, - std::nullopt, nullptr, Block{}, ctx, &read_keys); + file_iterator = std::make_shared( + object_storage, configuration.container, configuration.blobs_paths, nullptr, Block{}, ctx, &read_keys); } std::optional columns_from_cache; diff --git a/src/Storages/StorageAzureBlob.h b/src/Storages/StorageAzureBlob.h index 25c791f1700..3d6b0c64998 100644 --- a/src/Storages/StorageAzureBlob.h +++ b/src/Storages/StorageAzureBlob.h @@ -142,36 +142,45 @@ private: class StorageAzureBlobSource : public ISource, WithContext { public: - class Iterator : WithContext + class IIterator : public WithContext { public: - Iterator( + IIterator(ContextPtr context_):WithContext(context_) {} + virtual ~IIterator() = default; + virtual RelativePathWithMetadata next() = 0; + virtual size_t getTotalSize() const = 0; + + RelativePathWithMetadata operator ()() { return next(); } + }; + + class GlobIterator : public IIterator + { + public: + GlobIterator( AzureObjectStorage * object_storage_, const std::string & container_, - std::optional keys_, - std::optional blob_path_with_globs_, + String blob_path_with_globs_, ASTPtr query_, const Block & virtual_header_, ContextPtr context_, RelativePathsWithMetadata * outer_blobs_); - RelativePathWithMetadata next(); - size_t getTotalSize() const; - ~Iterator() = default; + RelativePathWithMetadata next() override; + size_t getTotalSize() const override; + ~GlobIterator() override = default; private: AzureObjectStorage * object_storage; std::string container; - std::optional keys; - std::optional blob_path_with_globs; + String blob_path_with_globs; ASTPtr query; ASTPtr filter_ast; Block virtual_header; - std::atomic index = 0; + size_t index = 0; std::atomic total_size = 0; - std::optional blobs_with_metadata; + RelativePathsWithMetadata blobs_with_metadata; RelativePathsWithMetadata * outer_blobs; ObjectStorageIteratorPtr object_storage_iterator; bool recursive{false}; @@ -179,11 +188,42 @@ public: std::unique_ptr matcher; void createFilterAST(const String & any_key); - std::atomic is_finished = false; - std::atomic is_initialized = false; + bool is_finished = false; + bool is_initialized = false; std::mutex next_mutex; }; + class KeysIterator : public IIterator + { + public: + KeysIterator( + AzureObjectStorage * object_storage_, + const std::string & container_, + Strings keys_, + ASTPtr query_, + const Block & virtual_header_, + ContextPtr context_, + RelativePathsWithMetadata * outer_blobs_); + + RelativePathWithMetadata next() override; + size_t getTotalSize() const override; + ~KeysIterator() override = default; + + private: + AzureObjectStorage * object_storage; + std::string container; + RelativePathsWithMetadata keys; + + ASTPtr query; + ASTPtr filter_ast; + Block virtual_header; + + std::atomic index = 0; + std::atomic total_size = 0; + + RelativePathsWithMetadata * outer_blobs; + }; + StorageAzureBlobSource( const std::vector & requested_virtual_columns_, const String & format_, @@ -196,7 +236,7 @@ public: String compression_hint_, AzureObjectStorage * object_storage_, const String & container_, - std::shared_ptr file_iterator_); + std::shared_ptr file_iterator_); ~StorageAzureBlobSource() override; @@ -217,7 +257,7 @@ private: String compression_hint; AzureObjectStorage * object_storage; String container; - std::shared_ptr file_iterator; + std::shared_ptr file_iterator; struct ReaderHolder { diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 0472ce6f832..c46192ab43b 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -88,7 +88,6 @@ #include #include #include -#include #include #include #include diff --git a/src/Storages/StorageDummy.cpp b/src/Storages/StorageDummy.cpp index 4f2fb3883bf..e2396a54acb 100644 --- a/src/Storages/StorageDummy.cpp +++ b/src/Storages/StorageDummy.cpp @@ -5,6 +5,7 @@ #include #include +#include namespace DB { diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index ff67272e542..96306a37750 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -23,6 +23,7 @@ #include #include +#include #include #include #include diff --git a/src/Storages/StorageMemory.cpp b/src/Storages/StorageMemory.cpp index 1b45b9ae3f4..6ed0583bd44 100644 --- a/src/Storages/StorageMemory.cpp +++ b/src/Storages/StorageMemory.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -155,7 +156,7 @@ void StorageMemory::read( size_t /*max_block_size*/, size_t num_streams) { - query_plan.addStep(std::make_unique(column_names, storage_snapshot, num_streams, delay_read_for_global_subqueries)); + query_plan.addStep(std::make_unique(column_names, shared_from_this(), storage_snapshot, num_streams, delay_read_for_global_subqueries)); } diff --git a/src/Storages/StorageMemory.h b/src/Storages/StorageMemory.h index c4f4331ca64..acb2be4649b 100644 --- a/src/Storages/StorageMemory.h +++ b/src/Storages/StorageMemory.h @@ -132,6 +132,8 @@ private: std::atomic total_size_rows = 0; bool compress; + + friend class ReadFromMemoryStorageStep; }; } diff --git a/src/Storages/StorageSet.cpp b/src/Storages/StorageSet.cpp index f90539689e6..79369ab4bcb 100644 --- a/src/Storages/StorageSet.cpp +++ b/src/Storages/StorageSet.cpp @@ -147,7 +147,7 @@ StorageSet::StorageSet( const String & comment, bool persistent_) : StorageSetOrJoinBase{disk_, relative_path_, table_id_, columns_, constraints_, comment, persistent_} - , set(std::make_shared(SizeLimits(), false, true)) + , set(std::make_shared(SizeLimits(), 0, true)) { Block header = getInMemoryMetadataPtr()->getSampleBlock(); set->setHeader(header.getColumnsWithTypeAndName()); @@ -176,7 +176,7 @@ void StorageSet::truncate(const ASTPtr &, const StorageMetadataPtr & metadata_sn Block header = metadata_snapshot->getSampleBlock(); increment = 0; - set = std::make_shared(SizeLimits(), false, true); + set = std::make_shared(SizeLimits(), 0, true); set->setHeader(header.getColumnsWithTypeAndName()); } diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 947881d34c6..e6953afe68e 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -36,6 +36,7 @@ #include #include #include +#include namespace DB diff --git a/src/Storages/System/StorageSystemQueryCache.cpp b/src/Storages/System/StorageSystemQueryCache.cpp index 3dfc5cf298a..117fb4e8a5c 100644 --- a/src/Storages/System/StorageSystemQueryCache.cpp +++ b/src/Storages/System/StorageSystemQueryCache.cpp @@ -29,7 +29,7 @@ StorageSystemQueryCache::StorageSystemQueryCache(const StorageID & table_id_) void StorageSystemQueryCache::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - auto query_cache = context->getQueryCache(); + QueryCachePtr query_cache = context->getQueryCache(); if (!query_cache) return; diff --git a/src/Storages/System/StorageSystemZooKeeper.cpp b/src/Storages/System/StorageSystemZooKeeper.cpp index a3f5761a85e..d361a4173c1 100644 --- a/src/Storages/System/StorageSystemZooKeeper.cpp +++ b/src/Storages/System/StorageSystemZooKeeper.cpp @@ -16,12 +16,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include #include #include @@ -162,6 +164,17 @@ public: } }; +/// Type of path to be fetched +enum class ZkPathType +{ + Exact, /// Fetch all nodes under this path + Prefix, /// Fetch all nodes starting with this prefix, recursively (multiple paths may match prefix) + Recurse, /// Fatch all nodes under this path, recursively +}; + +/// List of paths to be feched from zookeeper +using Paths = std::deque>; + class ReadFromSystemZooKeeper final : public SourceStepWithFilter { public: @@ -171,11 +184,14 @@ public: void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) override; + void onAddFilterFinish() override; + private: - void fillData(MutableColumns & res_columns) const; + void fillData(MutableColumns & res_columns); std::shared_ptr storage_limits; ContextPtr context; + Paths paths; }; StorageSystemZooKeeper::StorageSystemZooKeeper(const StorageID & table_id_) @@ -247,17 +263,6 @@ NamesAndTypesList StorageSystemZooKeeper::getNamesAndTypes() }; } -/// Type of path to be fetched -enum class ZkPathType -{ - Exact, /// Fetch all nodes under this path - Prefix, /// Fetch all nodes starting with this prefix, recursively (multiple paths may match prefix) - Recurse, /// Fatch all nodes under this path, recursively -}; - -/// List of paths to be feched from zookeeper -using Paths = std::deque>; - static String pathCorrected(const String & path) { String path_corrected; @@ -314,11 +319,12 @@ static void extractPathImpl(const ActionsDAG::Node & node, Paths & res, ContextP if (!column_set) return; - auto set = column_set->getData(); - if (!set || !set->isCreated()) + auto future_set = column_set->getData(); + if (!future_set) return; - if (!set->hasExplicitSetElements()) + auto set = future_set->buildOrderedSetInplace(context); + if (!set || !set->hasExplicitSetElements()) return; set->checkColumnsNumber(1); @@ -415,10 +421,13 @@ static Paths extractPath(const ActionsDAG::NodeRawConstPtrs & filter_nodes, Cont } -void ReadFromSystemZooKeeper::fillData(MutableColumns & res_columns) const +void ReadFromSystemZooKeeper::onAddFilterFinish() { - Paths paths = extractPath(getFilterNodes().nodes, context, context->getSettingsRef().allow_unrestricted_reads_from_keeper); + paths = extractPath(getFilterNodes().nodes, context, context->getSettingsRef().allow_unrestricted_reads_from_keeper); +} +void ReadFromSystemZooKeeper::fillData(MutableColumns & res_columns) +{ zkutil::ZooKeeperPtr zookeeper = context->getZooKeeper(); if (paths.empty()) diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index c38a2b4ed42..907fc0cd22c 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -20,9 +20,18 @@ #include #include +#include +#include +#include +#include +#include +#include + #include #include #include +#include +#include #include @@ -80,25 +89,6 @@ ASTPtr buildWhereExpression(const ASTs & functions) return makeASTFunction("and", functions); } -void buildSets(const ASTPtr & expression, ExpressionAnalyzer & analyzer) -{ - const auto * func = expression->as(); - if (func && functionIsInOrGlobalInOperator(func->name)) - { - const IAST & args = *func->arguments; - const ASTPtr & arg = args.children.at(1); - if (arg->as() || arg->as()) - { - analyzer.tryMakeSetForIndexFromSubquery(arg); - } - } - else - { - for (const auto & child : expression->children) - buildSets(child, analyzer); - } -} - } namespace VirtualColumnUtils @@ -199,9 +189,36 @@ void filterBlockWithQuery(const ASTPtr & query, Block & block, ContextPtr contex /// Let's analyze and calculate the prepared expression. auto syntax_result = TreeRewriter(context).analyze(expression_ast, block.getNamesAndTypesList()); ExpressionAnalyzer analyzer(expression_ast, syntax_result, context); - buildSets(expression_ast, analyzer); ExpressionActionsPtr actions = analyzer.getActions(false /* add alises */, true /* project result */, CompileExpressions::yes); + for (const auto & node : actions->getNodes()) + { + if (node.type == ActionsDAG::ActionType::COLUMN) + { + const ColumnSet * column_set = checkAndGetColumnConstData(node.column.get()); + if (!column_set) + column_set = checkAndGetColumn(node.column.get()); + + if (column_set) + { + auto future_set = column_set->getData(); + if (!future_set->get()) + { + if (auto * set_from_subquery = typeid_cast(future_set.get())) + { + auto plan = set_from_subquery->build(context); + auto builder = plan->buildQueryPipeline(QueryPlanOptimizationSettings::fromContext(context), BuildQueryPipelineSettings::fromContext(context)); + auto pipeline = QueryPipelineBuilder::getPipeline(std::move(*builder)); + pipeline.complete(std::make_shared(Block())); + + CompletedPipelineExecutor executor(pipeline); + executor.execute(); + } + } + } + } + } + Block block_with_filter = block; actions->execute(block_with_filter); diff --git a/src/Storages/buildQueryTreeForShard.cpp b/src/Storages/buildQueryTreeForShard.cpp index fa4730cbe84..1ee7d747fcc 100644 --- a/src/Storages/buildQueryTreeForShard.cpp +++ b/src/Storages/buildQueryTreeForShard.cpp @@ -1,3 +1,4 @@ + #include #include @@ -15,6 +16,8 @@ #include #include #include +#include +#include namespace DB { @@ -276,11 +279,14 @@ TableNodePtr executeSubqueryNode(const QueryTreeNodePtr & subquery_node, temporary_table_expression_node->setTemporaryTableName(temporary_table_name); auto table_out = external_storage->write({}, external_storage->getInMemoryMetadataPtr(), mutable_context, /*async_insert=*/false); - auto io = interpreter.execute(); - io.pipeline.complete(std::move(table_out)); - CompletedPipelineExecutor executor(io.pipeline); - executor.execute(); + auto optimization_settings = QueryPlanOptimizationSettings::fromContext(mutable_context); + auto build_pipeline_settings = BuildQueryPipelineSettings::fromContext(mutable_context); + auto pipeline = QueryPipelineBuilder::getPipeline(std::move(*query_plan.buildQueryPipeline(optimization_settings, build_pipeline_settings))); + + pipeline.complete(std::move(table_out)); + CompletedPipelineExecutor executor(pipeline); + executor.execute(); mutable_context->addExternalTable(temporary_table_name, std::move(external_storage_holder)); return temporary_table_expression_node; @@ -291,14 +297,13 @@ TableNodePtr executeSubqueryNode(const QueryTreeNodePtr & subquery_node, QueryTreeNodePtr buildQueryTreeForShard(SelectQueryInfo & query_info, QueryTreeNodePtr query_tree_to_modify) { auto & planner_context = query_info.planner_context; - const auto & query_context = planner_context->getQueryContext(); CollectColumnSourceToColumnsVisitor collect_column_source_to_columns_visitor; collect_column_source_to_columns_visitor.visit(query_tree_to_modify); const auto & column_source_to_columns = collect_column_source_to_columns_visitor.getColumnSourceToColumns(); - DistributedProductModeRewriteInJoinVisitor visitor(query_info.planner_context->getQueryContext()); + DistributedProductModeRewriteInJoinVisitor visitor(planner_context->getQueryContext()); visitor.visit(query_tree_to_modify); auto replacement_map = visitor.getReplacementMap(); diff --git a/tests/broken_tests.txt b/tests/analyzer_tech_debt.txt similarity index 97% rename from tests/broken_tests.txt rename to tests/analyzer_tech_debt.txt index b3668b06e21..0872033aed0 100644 --- a/tests/broken_tests.txt +++ b/tests/analyzer_tech_debt.txt @@ -124,3 +124,5 @@ 02534_s3_cluster_insert_select_schema_inference 02765_parallel_replicas_final_modifier 02784_parallel_replicas_automatic_disabling +02581_share_big_sets_between_mutation_tasks_long +02581_share_big_sets_between_multiple_mutations_tasks_long diff --git a/tests/ci/fast_test_check.py b/tests/ci/fast_test_check.py index 89066ade2cb..2849759a3ee 100644 --- a/tests/ci/fast_test_check.py +++ b/tests/ci/fast_test_check.py @@ -150,7 +150,7 @@ def main(): os.makedirs(logs_path) run_log_path = os.path.join(logs_path, "run.log") - with TeePopen(run_cmd, run_log_path, timeout=40 * 60) as process: + with TeePopen(run_cmd, run_log_path, timeout=90 * 60) as process: retcode = process.wait() if retcode == 0: logging.info("Run successfully") @@ -214,8 +214,11 @@ def main(): # Refuse other checks to run if fast test failed if state != "success": - if FORCE_TESTS_LABEL in pr_info.labels and state != "error": - print(f"'{FORCE_TESTS_LABEL}' enabled, will report success") + if state == "error": + print("The status is 'error', report failure disregard the labels") + sys.exit(1) + elif FORCE_TESTS_LABEL in pr_info.labels: + print(f"'{FORCE_TESTS_LABEL}' enabled, reporting success") else: sys.exit(1) diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index f77ef330ea2..9279b19b187 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -108,7 +108,7 @@ def get_run_command( env_str = " ".join(envs) volume_with_broken_test = ( - f"--volume={repo_tests_path}/broken_tests.txt:/broken_tests.txt" + f"--volume={repo_tests_path}/analyzer_tech_debt.txt:/analyzer_tech_debt.txt" if "analyzer" in check_name else "" ) diff --git a/tests/ci/tee_popen.py b/tests/ci/tee_popen.py index 4869301785e..7872b489951 100644 --- a/tests/ci/tee_popen.py +++ b/tests/ci/tee_popen.py @@ -11,7 +11,7 @@ import os import sys -# Very simple tee logic implementation. You can specify shell command, output +# Very simple tee logic implementation. You can specify a shell command, output # logfile and env variables. After TeePopen is created you can only wait until # it finishes. stderr and stdout will be redirected both to specified file and # stdout. diff --git a/tests/ci/workflow_approve_rerun_lambda/app.py b/tests/ci/workflow_approve_rerun_lambda/app.py index 3db62430d85..5e2331ece3c 100644 --- a/tests/ci/workflow_approve_rerun_lambda/app.py +++ b/tests/ci/workflow_approve_rerun_lambda/app.py @@ -224,8 +224,8 @@ def approve_run(workflow_description: WorkflowDescription, token: str) -> None: def label_manual_approve(pull_request, token): - url = f"{pull_request['url']}/labels" - data = {"labels": "manual approve"} + url = f"{pull_request['issue_url']}/labels" + data = {"labels": ["manual approve"]} _exec_post_with_retry(url, token, data) @@ -376,11 +376,10 @@ def main(event): changed_files = get_changed_files_for_pull_request(pull_request, token) print(f"Totally have {len(changed_files)} changed files in PR:", changed_files) if check_suspicious_changed_files(changed_files): - print( - f"Pull Request {pull_request['number']} has suspicious changes, " - "label it for manuall approve" - ) - label_manual_approve(pull_request, token) + print(f"Pull Request {pull_request['number']} has suspicious changes") + if "manual approve" not in labels: + print("Label the PR as needed for manuall approve") + label_manual_approve(pull_request, token) else: print(f"Pull Request {pull_request['number']} has no suspicious changes") approve_run(workflow_description, token) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 82aa3639179..4860ce0fac9 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -589,7 +589,7 @@ class SettingsRandomizer: "filesystem_prefetch_step_bytes": lambda: random.choice( [0, "100Mi"] ), # 0 means 'auto' - "compile_expressions": lambda: random.randint(0, 1), + # "compile_expressions": lambda: random.randint(0, 1), - this setting has a bug: https://github.com/ClickHouse/ClickHouse/issues/51264 "compile_aggregate_expressions": lambda: random.randint(0, 1), "compile_sort_description": lambda: random.randint(0, 1), "merge_tree_coarse_index_granularity": lambda: random.randint(2, 32), diff --git a/tests/config/config.d/database_replicated.xml b/tests/config/config.d/database_replicated.xml index 9a405f85908..2504a7ca526 100644 --- a/tests/config/config.d/database_replicated.xml +++ b/tests/config/config.d/database_replicated.xml @@ -40,7 +40,7 @@ 10000 30000 1000 - 4000 + 2000 5000 information false diff --git a/tests/integration/test_host_regexp_multiple_ptr_records/test.py b/tests/integration/test_host_regexp_multiple_ptr_records/test.py index ba807d3b5d7..82ae0b6c527 100644 --- a/tests/integration/test_host_regexp_multiple_ptr_records/test.py +++ b/tests/integration/test_host_regexp_multiple_ptr_records/test.py @@ -1,5 +1,7 @@ import pytest +import socket from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check +from time import sleep import os DOCKER_COMPOSE_PATH = get_docker_compose_path() @@ -32,11 +34,27 @@ def started_cluster(): cluster.shutdown() +def check_ptr_record(ip, hostname): + try: + host, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) + if hostname.lower() == host.lower(): + return True + except socket.herror: + pass + return False + + def setup_dns_server(ip): domains_string = "test3.example.com test2.example.com test1.example.com" example_file_path = f'{ch_server.env_variables["COREDNS_CONFIG_DIR"]}/example.com' run_and_check(f"echo '{ip} {domains_string}' > {example_file_path}", shell=True) + # DNS server takes time to reload the configuration. + for try_num in range(10): + if all(check_ptr_record(ip, host) for host in domains_string.split()): + break + sleep(1) + def setup_ch_server(dns_server_ip): ch_server.exec_in_container( diff --git a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py index 62f47579612..d73e8813e79 100644 --- a/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py +++ b/tests/integration/test_host_regexp_multiple_ptr_records_concurrent/test.py @@ -1,4 +1,5 @@ import pytest +import socket from helpers.cluster import ClickHouseCluster, get_docker_compose_path, run_and_check from time import sleep import os @@ -31,11 +32,27 @@ def started_cluster(): cluster.shutdown() +def check_ptr_record(ip, hostname): + try: + host, aliaslist, ipaddrlist = socket.gethostbyaddr(ip) + if hostname.lower() == host.lower(): + return True + except socket.herror: + pass + return False + + def setup_dns_server(ip): domains_string = "test3.example.com test2.example.com test1.example.com" example_file_path = f'{ch_server.env_variables["COREDNS_CONFIG_DIR"]}/example.com' run_and_check(f"echo '{ip} {domains_string}' > {example_file_path}", shell=True) + # DNS server takes time to reload the configuration. + for try_num in range(10): + if all(check_ptr_record(ip, host) for host in domains_string.split()): + break + sleep(1) + def setup_ch_server(dns_server_ip): ch_server.exec_in_container( diff --git a/tests/integration/test_multiple_disks/test.py b/tests/integration/test_multiple_disks/test.py index 54e7f6dd8ee..b5606ee8bc2 100644 --- a/tests/integration/test_multiple_disks/test.py +++ b/tests/integration/test_multiple_disks/test.py @@ -846,7 +846,7 @@ def test_start_stop_moves(start_cluster, name, engine): node1.query("SYSTEM START MOVES {}".format(name)) # wait sometime until background backoff finishes - retry = 30 + retry = 60 i = 0 while not sum(1 for x in used_disks if x == "jbod1") <= 2 and i < retry: time.sleep(1) diff --git a/tests/integration/test_profile_events_s3/test.py b/tests/integration/test_profile_events_s3/test.py index 10c9385f865..a8e037d667f 100644 --- a/tests/integration/test_profile_events_s3/test.py +++ b/tests/integration/test_profile_events_s3/test.py @@ -139,7 +139,7 @@ def test_profile_events(cluster): ) stat1 = get_query_stat(instance, query1) for metric in stat1: - assert stat1[metric] == metrics1[metric] - metrics0[metric] + assert stat1[metric] == metrics1.get(metric, 0) - metrics0.get(metric, 0) assert ( metrics1["WriteBufferFromS3Bytes"] - metrics0["WriteBufferFromS3Bytes"] == size1 ) @@ -163,7 +163,7 @@ def test_profile_events(cluster): stat2 = get_query_stat(instance, query2) for metric in stat2: - assert stat2[metric] == metrics2[metric] - metrics1[metric] + assert stat2[metric] == metrics2.get(metric, 0) - metrics1.get(metric, 0) assert ( metrics2["WriteBufferFromS3Bytes"] - metrics1["WriteBufferFromS3Bytes"] @@ -189,4 +189,4 @@ def test_profile_events(cluster): # With async reads profile events are not updated fully because reads are done in a separate thread. # for metric in stat3: # print(metric) - # assert stat3[metric] == metrics3[metric] - metrics2[metric] + # assert stat3[metric] == metrics3.get(metric, 0) - metrics2.get(metric, 0) diff --git a/tests/integration/test_ssl_cert_authentication/configs/ssl_config.xml b/tests/integration/test_ssl_cert_authentication/configs/ssl_config.xml index ed3b2b595db..24c9eb8891f 100644 --- a/tests/integration/test_ssl_cert_authentication/configs/ssl_config.xml +++ b/tests/integration/test_ssl_cert_authentication/configs/ssl_config.xml @@ -25,12 +25,9 @@ true sslv2,sslv3 true - - RejectCertificateHandler - \ No newline at end of file diff --git a/tests/integration/test_storage_azure_blob_storage/test.py b/tests/integration/test_storage_azure_blob_storage/test.py index f9d337b6d86..6089466ff5d 100644 --- a/tests/integration/test_storage_azure_blob_storage/test.py +++ b/tests/integration/test_storage_azure_blob_storage/test.py @@ -300,10 +300,10 @@ def test_put_get_with_globs(cluster): azure_query( node, - f"CREATE TABLE test_{i}_{j} ({table_format}) Engine = AzureBlobStorage(azure_conf2, container='cont', blob_path='{path}', format='CSV')", + f"CREATE TABLE test_put_{i}_{j} ({table_format}) Engine = AzureBlobStorage(azure_conf2, container='cont', blob_path='{path}', format='CSV')", ) - query = f"insert into test_{i}_{j} VALUES {values}" + query = f"insert into test_put_{i}_{j} VALUES {values}" azure_query(node, query) azure_query( @@ -332,9 +332,11 @@ def test_azure_glob_scheherazade(cluster): unique_num = random.randint(1, 10000) azure_query( node, - f"CREATE TABLE test_{i}_{unique_num} ({table_format}) Engine = AzureBlobStorage(azure_conf2, container='cont', blob_path='{path}', format='CSV')", + f"CREATE TABLE test_scheherazade_{i}_{unique_num} ({table_format}) Engine = AzureBlobStorage(azure_conf2, container='cont', blob_path='{path}', format='CSV')", + ) + query = ( + f"insert into test_scheherazade_{i}_{unique_num} VALUES {values}" ) - query = f"insert into test_{i}_{unique_num} VALUES {values}" azure_query(node, query) jobs.append( @@ -558,6 +560,7 @@ def test_schema_inference_from_globs_tf(cluster): node = cluster.instances["node"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" max_path = "" + for i in range(10): for j in range(10): path = "{}/{}_{}/{}.csv".format( @@ -582,13 +585,29 @@ def test_partition_by_tf(cluster): table_format = "column1 UInt32, column2 UInt32, column3 UInt32" partition_by = "column3" values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)" - filename = "test_tf_{_partition_id}.csv" + filename = "test_partition_tf_{_partition_id}.csv" azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('http://azurite1:10000/devstoreaccount1', 'cont', '{filename}', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', '{table_format}') PARTITION BY {partition_by} VALUES {values}", ) - assert "1,2,3\n" == get_azure_file_content("test_tf_3.csv") - assert "3,2,1\n" == get_azure_file_content("test_tf_1.csv") - assert "78,43,45\n" == get_azure_file_content("test_tf_45.csv") + assert "1,2,3\n" == get_azure_file_content("test_partition_tf_3.csv") + assert "3,2,1\n" == get_azure_file_content("test_partition_tf_1.csv") + assert "78,43,45\n" == get_azure_file_content("test_partition_tf_45.csv") + + +def test_filter_using_file(cluster): + node = cluster.instances["node"] + table_format = "column1 UInt32, column2 UInt32, column3 UInt32" + partition_by = "column3" + values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)" + filename = "test_partition_tf_{_partition_id}.csv" + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage('http://azurite1:10000/devstoreaccount1', 'cont', '{filename}', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', '{table_format}') PARTITION BY {partition_by} VALUES {values}", + ) + + query = f"select count(*) from azureBlobStorage('http://azurite1:10000/devstoreaccount1', 'cont', 'test_partition_tf_*.csv', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', '{table_format}') WHERE _file='test_partition_tf_3.csv'" + assert azure_query(node, query) == "1\n" diff --git a/tests/integration/test_storage_s3/test.py b/tests/integration/test_storage_s3/test.py index a7293337a9e..6c251d2f84e 100644 --- a/tests/integration/test_storage_s3/test.py +++ b/tests/integration/test_storage_s3/test.py @@ -1776,7 +1776,7 @@ def test_skip_empty_files(started_cluster): assert int(res) == 0 res = instance.query( - f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files{{11|1|22}}.parquet') settings engine_url_skip_empty_files=1" + f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/skip_empty_files{{11|1|22}}.parquet', auto, 'number UInt64') settings engine_url_skip_empty_files=1" ) assert len(res.strip()) == 0 diff --git a/tests/integration/test_tlsv1_3/test.py b/tests/integration/test_tlsv1_3/test.py index e25216c67df..f5c2be51ed7 100644 --- a/tests/integration/test_tlsv1_3/test.py +++ b/tests/integration/test_tlsv1_3/test.py @@ -4,6 +4,7 @@ from helpers.ssl_context import WrapSSLContextWithSNI import urllib.request, urllib.parse import ssl import os.path +import logging # The test cluster is configured with certificate for that host name, see 'server-ext.cnf'. @@ -11,6 +12,7 @@ import os.path SSL_HOST = "integration-tests.clickhouse.com" HTTPS_PORT = 8443 SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +MAX_RETRY = 5 cluster = ClickHouseCluster(__file__) instance = cluster.add_instance( @@ -88,10 +90,18 @@ def test_https_wrong_cert(): execute_query_https("SELECT currentUser()", user="john", cert_name="client2") assert "HTTP Error 403" in str(err.value) + count = 0 # Wrong certificate: self-signed certificate. - with pytest.raises(Exception) as err: - execute_query_https("SELECT currentUser()", user="john", cert_name="wrong") - assert "unknown ca" in str(err.value) + while count <= MAX_RETRY: + with pytest.raises(Exception) as err: + execute_query_https("SELECT currentUser()", user="john", cert_name="wrong") + err_str = str(err.value) + if count < MAX_RETRY and "Broken pipe" in err_str: + count = count + 1 + logging.warning(f"Failed attempt with wrong cert, err: {err_str}") + continue + assert "unknown ca" in err_str + break # No certificate. with pytest.raises(Exception) as err: @@ -181,24 +191,45 @@ def test_https_non_ssl_auth(): == "jane\n" ) + count = 0 # However if we send a certificate it must not be wrong. - with pytest.raises(Exception) as err: - execute_query_https( - "SELECT currentUser()", - user="peter", - enable_ssl_auth=False, - cert_name="wrong", - ) - assert "unknown ca" in str(err.value) - with pytest.raises(Exception) as err: - execute_query_https( - "SELECT currentUser()", - user="jane", - enable_ssl_auth=False, - password="qwe123", - cert_name="wrong", - ) - assert "unknown ca" in str(err.value) + while count <= MAX_RETRY: + with pytest.raises(Exception) as err: + execute_query_https( + "SELECT currentUser()", + user="peter", + enable_ssl_auth=False, + cert_name="wrong", + ) + err_str = str(err.value) + if count < MAX_RETRY and "Broken pipe" in err_str: + count = count + 1 + logging.warning( + f"Failed attempt with wrong cert, user: peter, err: {err_str}" + ) + continue + assert "unknown ca" in err_str + break + + count = 0 + while count <= MAX_RETRY: + with pytest.raises(Exception) as err: + execute_query_https( + "SELECT currentUser()", + user="jane", + enable_ssl_auth=False, + password="qwe123", + cert_name="wrong", + ) + err_str = str(err.value) + if count < MAX_RETRY and "Broken pipe" in err_str: + count = count + 1 + logging.warning( + f"Failed attempt with wrong cert, user: jane, err: {err_str}" + ) + continue + assert "unknown ca" in err_str + break def test_create_user(): diff --git a/tests/queries/0_stateless/00809_add_days_segfault.sql b/tests/queries/0_stateless/00809_add_days_segfault.sql index 6791439708a..d2d91dd2711 100644 --- a/tests/queries/0_stateless/00809_add_days_segfault.sql +++ b/tests/queries/0_stateless/00809_add_days_segfault.sql @@ -9,4 +9,4 @@ SET send_logs_level = 'fatal'; SELECT ignore(addDays((CAST((96.338) AS DateTime)), -3)); SELECT ignore(subtractDays((CAST((-5263074.47) AS DateTime)), -737895)); SELECT quantileDeterministic([], identity(( SELECT subtractDays((CAST((566450.398706) AS DateTime)), 54) ) )), '\0', []; -- { serverError 43 } -SELECT sequenceCount((CAST((( SELECT NULL ) AS rg, ( SELECT ( SELECT [], ' 5; +create table ttl_01280_1 (a Int, b Int, x Int, y Int, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second delete where x % 10 == 0 and y > 5 SETTINGS max_parts_to_merge_at_once = 1; insert into ttl_01280_1 values (1, 1, 0, 4, now() + 10); insert into ttl_01280_1 values (1, 1, 10, 6, now()); insert into ttl_01280_1 values (1, 2, 3, 7, now()); @@ -34,7 +36,7 @@ $CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_2" echo "ttl_01280_2" $CLICKHOUSE_CLIENT -n --query " -create table ttl_01280_2 (a Int, b Int, x Array(Int32), y Double, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a, b set x = minForEach(x), y = sum(y), d = max(d); +create table ttl_01280_2 (a Int, b Int, x Array(Int32), y Double, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a, b set x = minForEach(x), y = sum(y), d = max(d) SETTINGS max_parts_to_merge_at_once = 1; insert into ttl_01280_2 values (1, 1, array(0, 2, 3), 4, now() + 10); insert into ttl_01280_2 values (1, 1, array(5, 4, 3), 6, now()); insert into ttl_01280_2 values (1, 1, array(5, 5, 1), 7, now()); @@ -53,7 +55,7 @@ $CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_3" echo "ttl_01280_3" $CLICKHOUSE_CLIENT -n --query " -create table ttl_01280_3 (a Int, b Int, x Int64, y Int, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a set b = min(b), x = argMax(x, d), y = argMax(y, d), d = max(d); +create table ttl_01280_3 (a Int, b Int, x Int64, y Int, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a set b = min(b), x = argMax(x, d), y = argMax(y, d), d = max(d) SETTINGS max_parts_to_merge_at_once = 1; insert into ttl_01280_3 values (1, 1, 0, 4, now() + 10); insert into ttl_01280_3 values (1, 1, 10, 6, now() + 1); insert into ttl_01280_3 values (1, 2, 3, 7, now()); @@ -72,7 +74,7 @@ $CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_4" echo "ttl_01280_4" $CLICKHOUSE_CLIENT -n --query " -create table ttl_01280_4 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), -(a + b)) ttl d + interval 1 second group by toDate(d) set x = sum(x), y = max(y); +create table ttl_01280_4 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), -(a + b)) ttl d + interval 1 second group by toDate(d) set x = sum(x), y = max(y) SETTINGS max_parts_to_merge_at_once = 1; insert into ttl_01280_4 values (1, 1, 0, 4, now() + 10); insert into ttl_01280_4 values (10, 2, 3, 3, now()); insert into ttl_01280_4 values (2, 10, 1, 7, now()); @@ -86,7 +88,7 @@ $CLICKHOUSE_CLIENT --query "select x, y from ttl_01280_4 ORDER BY a, b, x, y" $CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_5" echo "ttl_01280_5" -$CLICKHOUSE_CLIENT -n --query "create table ttl_01280_5 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a, -b) ttl d + interval 1 second group by toDate(d), a set x = sum(x), b = argMax(b, -b); +$CLICKHOUSE_CLIENT -n --query "create table ttl_01280_5 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a, -b) ttl d + interval 1 second group by toDate(d), a set x = sum(x), b = argMax(b, -b) SETTINGS max_parts_to_merge_at_once = 1; insert into ttl_01280_5 values (1, 2, 3, 5, now()); insert into ttl_01280_5 values (2, 10, 1, 5, now()); insert into ttl_01280_5 values (2, 3, 5, 5, now()); @@ -100,7 +102,7 @@ $CLICKHOUSE_CLIENT --query "drop table if exists ttl_01280_6" echo "ttl_01280_6" $CLICKHOUSE_CLIENT -n --query " -create table ttl_01280_6 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a, -b) ttl d + interval 1 second group by toDate(d), a; +create table ttl_01280_6 (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (toDate(d), a, -b) ttl d + interval 1 second group by toDate(d), a SETTINGS max_parts_to_merge_at_once = 1; insert into ttl_01280_6 values (1, 2, 3, 5, now()); insert into ttl_01280_6 values (2, 10, 3, 5, now()); insert into ttl_01280_6 values (2, 3, 3, 5, now()); diff --git a/tests/queries/0_stateless/01443_merge_truncate_long.reference b/tests/queries/0_stateless/01443_merge_truncate_long.reference index e69de29bb2d..190423f88f8 100644 --- a/tests/queries/0_stateless/01443_merge_truncate_long.reference +++ b/tests/queries/0_stateless/01443_merge_truncate_long.reference @@ -0,0 +1,100 @@ +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 diff --git a/tests/queries/0_stateless/01443_merge_truncate_long.sh b/tests/queries/0_stateless/01443_merge_truncate_long.sh index 00abc48493a..ce867d32c1a 100755 --- a/tests/queries/0_stateless/01443_merge_truncate_long.sh +++ b/tests/queries/0_stateless/01443_merge_truncate_long.sh @@ -3,23 +3,40 @@ set -e +CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=none + CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -CLICKHOUSE_CLIENT=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=none/g') - ${CLICKHOUSE_CLIENT} --query="DROP TABLE IF EXISTS t" -${CLICKHOUSE_CLIENT} --query="CREATE TABLE t (x Int8) ENGINE = MergeTree ORDER BY tuple()" +${CLICKHOUSE_CLIENT} --query="CREATE TABLE t (x Int8) ENGINE = MergeTree ORDER BY ()" -for _ in {1..70}; do - ${CLICKHOUSE_CLIENT} --query="INSERT INTO t VALUES (0)" - ${CLICKHOUSE_CLIENT} --query="INSERT INTO t VALUES (0)" - ${CLICKHOUSE_CLIENT} --query="OPTIMIZE TABLE t FINAL" 2>/dev/null & - ${CLICKHOUSE_CLIENT} --query="TRUNCATE TABLE t" - ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM t HAVING count() > 0" -done +function thread() +{ + trap 'BREAK=1' 2 + + while [[ -z "${BREAK}" ]] + do + ${CLICKHOUSE_CLIENT} --query="OPTIMIZE TABLE t FINAL;" 2>&1 | tr -d '\n' | rg -v 'Cancelled merging parts' ||: + done +} + +thread & +pid=$! + +for i in {1..100}; do + echo " + INSERT INTO t VALUES (0); + INSERT INTO t VALUES (0); + TRUNCATE TABLE t; + SELECT count() FROM t HAVING count() > 0; + SELECT ${i}; + " +done | ${CLICKHOUSE_CLIENT} --multiquery + +kill -2 "$pid" wait $CLICKHOUSE_CLIENT -q "DROP TABLE t" diff --git a/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql b/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql index ca7d0f3c950..4623c456475 100644 --- a/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql +++ b/tests/queries/0_stateless/01605_adaptive_granularity_block_borders.sql @@ -5,9 +5,9 @@ SET allow_prefetched_read_pool_for_remote_filesystem=0; DROP TABLE IF EXISTS adaptive_table; ---- If granularity of consequent blocks differs a lot, then adaptive ---- granularity will adjust amout of marks correctly. Data for test empirically ---- derived, it's quite hard to get good parameters. +-- If granularity of consequent blocks differs a lot, then adaptive +-- granularity will adjust the amount of marks correctly. +-- Data for test was empirically derived, it's quite hard to get good parameters. CREATE TABLE adaptive_table( key UInt64, @@ -32,6 +32,7 @@ SET enable_filesystem_cache = 0; -- If we have computed granularity incorrectly than we will exceed this limit. SET max_memory_usage='30M'; +SET max_threads = 1; SELECT max(length(value)) FROM adaptive_table; diff --git a/tests/queries/0_stateless/01651_bugs_from_15889.reference b/tests/queries/0_stateless/01651_bugs_from_15889.reference index 8b137891791..77ac542d4fb 100644 --- a/tests/queries/0_stateless/01651_bugs_from_15889.reference +++ b/tests/queries/0_stateless/01651_bugs_from_15889.reference @@ -1 +1,2 @@ +0 diff --git a/tests/queries/0_stateless/01651_bugs_from_15889.sql b/tests/queries/0_stateless/01651_bugs_from_15889.sql index 4717a8dcc0d..6a8c6d35911 100644 --- a/tests/queries/0_stateless/01651_bugs_from_15889.sql +++ b/tests/queries/0_stateless/01651_bugs_from_15889.sql @@ -8,10 +8,9 @@ INSERT INTO xp SELECT '2020-01-01', number, '' FROM numbers(100000); CREATE TABLE xp_d AS xp ENGINE = Distributed(test_shard_localhost, currentDatabase(), xp); --- FIXME: this query spontaneously returns either 8 or 20 error code. Looks like it's potentially flaky. --- SELECT count(7 = (SELECT number FROM numbers(0) ORDER BY number ASC NULLS FIRST LIMIT 7)) FROM xp_d PREWHERE toYYYYMM(A) GLOBAL IN (SELECT NULL = (SELECT number FROM numbers(1) ORDER BY number DESC NULLS LAST LIMIT 1), toYYYYMM(min(A)) FROM xp_d) WHERE B > NULL; -- { serverError 8 } +SELECT count(7 = (SELECT number FROM numbers(0) ORDER BY number ASC NULLS FIRST LIMIT 7)) FROM xp_d PREWHERE toYYYYMM(A) GLOBAL IN (SELECT NULL = (SELECT number FROM numbers(1) ORDER BY number DESC NULLS LAST LIMIT 1), toYYYYMM(min(A)) FROM xp_d) WHERE B > NULL; -- { serverError 8 } -SELECT count() FROM xp_d WHERE A GLOBAL IN (SELECT NULL); -- { serverError 53 } +SELECT count() FROM xp_d WHERE A GLOBAL IN (SELECT NULL); DROP TABLE IF EXISTS xp; DROP TABLE IF EXISTS xp_d; diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.reference b/tests/queries/0_stateless/01786_explain_merge_tree.reference index e6628813dbd..8d3954484dd 100644 --- a/tests/queries/0_stateless/01786_explain_merge_tree.reference +++ b/tests/queries/0_stateless/01786_explain_merge_tree.reference @@ -10,7 +10,7 @@ Keys: y bitAnd(z, 3) - Condition: and((bitAnd(z, 3) not in [1, 1]), and((y in [1, +Inf)), (bitAnd(z, 3) not in [1, 1]))) + Condition: and((y in [1, +Inf)), (bitAnd(z, 3) not in [1, 1])) Parts: 3/4 Granules: 10/11 PrimaryKey @@ -46,7 +46,7 @@ { "Type": "Partition", "Keys": ["y", "bitAnd(z, 3)"], - "Condition": "and((bitAnd(z, 3) not in [1, 1]), and((y in [1, +Inf)), (bitAnd(z, 3) not in [1, 1])))", + "Condition": "and((y in [1, +Inf)), (bitAnd(z, 3) not in [1, 1]))", "Initial Parts": 4, "Selected Parts": 3, "Initial Granules": 11, diff --git a/tests/queries/0_stateless/01945_system_warnings.sh b/tests/queries/0_stateless/01945_system_warnings.sh index bf11cee2911..c9bd0fd4b8a 100755 --- a/tests/queries/0_stateless/01945_system_warnings.sh +++ b/tests/queries/0_stateless/01945_system_warnings.sh @@ -21,5 +21,4 @@ ${CLICKHOUSE_CLIENT} --multiple_joins_rewriter_version=42 -q "SELECT message FRO ${CLICKHOUSE_CLIENT} -q "SELECT count() = countDistinct(message) FROM system.warnings" # Avoid too many warnings, especially in CI -${CLICKHOUSE_CLIENT} -q "SELECT count() < 5 FROM system.warnings" - +${CLICKHOUSE_CLIENT} -q "SELECT count() < 10 FROM system.warnings" diff --git a/tests/queries/0_stateless/02050_clickhouse_client_local_exception.sh b/tests/queries/0_stateless/02050_clickhouse_client_local_exception.sh index eac085a914e..a2bb29377ac 100755 --- a/tests/queries/0_stateless/02050_clickhouse_client_local_exception.sh +++ b/tests/queries/0_stateless/02050_clickhouse_client_local_exception.sh @@ -10,4 +10,4 @@ function cleanup() rm "${CLICKHOUSE_TMP}/test_exception" } trap cleanup EXIT -$CLICKHOUSE_LOCAL --query="SELECT 1 INTO OUTFILE '${CLICKHOUSE_TMP}/test_exception' FORMAT Native" 2>&1 | grep -q "Code: 76. DB::ErrnoException:" && echo 'OK' || echo 'FAIL' ||: +$CLICKHOUSE_LOCAL --query="SELECT 1 INTO OUTFILE '${CLICKHOUSE_TMP}/test_exception' FORMAT Native" 2>&1 | grep -q "Code: 504. DB::Exception:" && echo 'OK' || echo 'FAIL' ||: diff --git a/tests/queries/0_stateless/02125_many_mutations.sh b/tests/queries/0_stateless/02125_many_mutations.sh index c3108df5ae3..b42d5bb15d3 100755 --- a/tests/queries/0_stateless/02125_many_mutations.sh +++ b/tests/queries/0_stateless/02125_many_mutations.sh @@ -5,7 +5,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "create table many_mutations (x UInt32, y UInt32) engine = MergeTree order by x settings number_of_mutations_to_delay = 0, number_of_mutations_to_throw = 0" +# "max_parts_to_merge_at_once = 1" prevents merges to start in background before our own OPTIMIZE FINAL + +$CLICKHOUSE_CLIENT -q "create table many_mutations (x UInt32, y UInt32) engine = MergeTree order by x settings number_of_mutations_to_delay = 0, number_of_mutations_to_throw = 0, max_parts_to_merge_at_once = 1" $CLICKHOUSE_CLIENT -q "insert into many_mutations values (0, 0), (1, 1)" $CLICKHOUSE_CLIENT -q "system stop merges many_mutations" @@ -44,6 +46,6 @@ wait $CLICKHOUSE_CLIENT -q "select count() from system.mutations where database = currentDatabase() and table = 'many_mutations' and not is_done" $CLICKHOUSE_CLIENT -q "system start merges many_mutations" -$CLICKHOUSE_CLIENT -q "optimize table many_mutations final" +$CLICKHOUSE_CLIENT -q "optimize table many_mutations final" --optimize_throw_if_noop 1 $CLICKHOUSE_CLIENT -q "select count() from system.mutations where database = currentDatabase() and table = 'many_mutations' and not is_done" $CLICKHOUSE_CLIENT -q "select x, y from many_mutations order by x" diff --git a/tests/queries/0_stateless/02125_many_mutations_2.sh b/tests/queries/0_stateless/02125_many_mutations_2.sh index 52866a54974..e5e3070a944 100755 --- a/tests/queries/0_stateless/02125_many_mutations_2.sh +++ b/tests/queries/0_stateless/02125_many_mutations_2.sh @@ -5,7 +5,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -q "create table many_mutations (x UInt32, y UInt32) engine = MergeTree order by x settings number_of_mutations_to_delay = 0, number_of_mutations_to_throw = 0" +# "max_parts_to_merge_at_once = 1" prevents merges to start in background before our own OPTIMIZE FINAL + +$CLICKHOUSE_CLIENT -q "create table many_mutations (x UInt32, y UInt32) engine = MergeTree order by x settings number_of_mutations_to_delay = 0, number_of_mutations_to_throw = 0, max_parts_to_merge_at_once = 1" $CLICKHOUSE_CLIENT -q "insert into many_mutations select number, number + 1 from numbers(2000)" $CLICKHOUSE_CLIENT -q "system stop merges many_mutations" @@ -44,7 +46,7 @@ wait $CLICKHOUSE_CLIENT -q "select count() from system.mutations where database = currentDatabase() and table = 'many_mutations' and not is_done" $CLICKHOUSE_CLIENT -q "system start merges many_mutations" -$CLICKHOUSE_CLIENT -q "optimize table many_mutations final" +$CLICKHOUSE_CLIENT -q "optimize table many_mutations final" --optimize_throw_if_noop 1 $CLICKHOUSE_CLIENT -q "system flush logs" $CLICKHOUSE_CLIENT -q "select count() from system.mutations where database = currentDatabase() and table = 'many_mutations' and not is_done" $CLICKHOUSE_CLIENT -q "select count() from many_mutations" diff --git a/tests/queries/0_stateless/02234_cast_to_ip_address.reference b/tests/queries/0_stateless/02234_cast_to_ip_address.reference index 9023b36a9bf..fa9c6bd0f94 100644 --- a/tests/queries/0_stateless/02234_cast_to_ip_address.reference +++ b/tests/queries/0_stateless/02234_cast_to_ip_address.reference @@ -11,6 +11,10 @@ IPv4 functions 127.0.0.1 127.0.0.1 -- +1.2.3.4 +1.2.3.4 +0.0.0.0 +-- 127.0.0.1 -- 0 diff --git a/tests/queries/0_stateless/02234_cast_to_ip_address.sql b/tests/queries/0_stateless/02234_cast_to_ip_address.sql index 6c65fe86cc9..28f1afff57f 100644 --- a/tests/queries/0_stateless/02234_cast_to_ip_address.sql +++ b/tests/queries/0_stateless/02234_cast_to_ip_address.sql @@ -20,6 +20,13 @@ SELECT toIPv4OrNull('127.0.0.1'); SELECT '--'; +SELECT toIPv4(toIPv6('::ffff:1.2.3.4')); +SELECT toIPv4(toIPv6('::afff:1.2.3.4')); --{serverError CANNOT_CONVERT_TYPE} +SELECT toIPv4OrDefault(toIPv6('::ffff:1.2.3.4')); +SELECT toIPv4OrDefault(toIPv6('::afff:1.2.3.4')); + +SELECT '--'; + SELECT cast('test' , 'IPv4'); --{serverError CANNOT_PARSE_IPV4} SELECT cast('127.0.0.1' , 'IPv4'); diff --git a/tests/queries/0_stateless/02346_into_outfile_and_stdout.sh b/tests/queries/0_stateless/02346_into_outfile_and_stdout.sh index 021dc9125d4..38145ab3e51 100755 --- a/tests/queries/0_stateless/02346_into_outfile_and_stdout.sh +++ b/tests/queries/0_stateless/02346_into_outfile_and_stdout.sh @@ -66,7 +66,7 @@ performBadQuery "bad_query_incorrect_usage" "SELECT 1, 2, 3 INTO OUTFILE AND STD performBadQuery "bad_query_no_into_outfile" "SELECT 1, 2, 3 AND STDOUT'" "SYNTAX_ERROR" -performFileExists "bad_query_file_exists" "SELECT 1, 2, 3 INTO OUTFILE '${CLICKHOUSE_TMP}/test_into_outfile_and_stdout_bad_query_file_exists.out' AND STDOUT" "File exists. (CANNOT_OPEN_FILE)" +performFileExists "bad_query_file_exists" "SELECT 1, 2, 3 INTO OUTFILE '${CLICKHOUSE_TMP}/test_into_outfile_and_stdout_bad_query_file_exists.out' AND STDOUT" "File ${CLICKHOUSE_TMP}/test_into_outfile_and_stdout_bad_query_file_exists.out exists, consider using APPEND or TRUNCATE." performCompression "compression" "SELECT * FROM (SELECT 'Hello, World! From clickhouse.') INTO OUTFILE '${CLICKHOUSE_TMP}/test_into_outfile_and_stdout_compression.gz' AND STDOUT COMPRESSION 'GZ' FORMAT TabSeparated" diff --git a/tests/queries/0_stateless/02458_relax_too_many_parts.sql b/tests/queries/0_stateless/02458_relax_too_many_parts.sql index a1f8e86fce5..01f9edce675 100644 --- a/tests/queries/0_stateless/02458_relax_too_many_parts.sql +++ b/tests/queries/0_stateless/02458_relax_too_many_parts.sql @@ -1,5 +1,5 @@ DROP TABLE IF EXISTS test; -CREATE TABLE test (x UInt64, s String) ENGINE = MergeTree ORDER BY tuple() SETTINGS parts_to_throw_insert = 3; +CREATE TABLE test (x UInt64, s String) ENGINE = MergeTree ORDER BY tuple() SETTINGS parts_to_throw_insert = 3, max_parts_to_merge_at_once = 1; -- The "too many parts" threshold works: SET max_block_size = 1, min_insert_block_size_rows = 1, min_insert_block_size_bytes = 1; @@ -14,7 +14,7 @@ ALTER TABLE test MODIFY SETTING max_avg_part_size_for_too_many_parts = '1M'; -- It works in the same way if parts are small: SYSTEM START MERGES test; -OPTIMIZE TABLE test FINAL; +OPTIMIZE TABLE test FINAL SETTINGS optimize_throw_if_noop=1; SYSTEM STOP MERGES test; INSERT INTO test VALUES (5, 'a'); @@ -23,7 +23,7 @@ INSERT INTO test VALUES (7, 'a'); -- { serverError TOO_MANY_PARTS } -- But it allows having more parts if their average size is large: SYSTEM START MERGES test; -OPTIMIZE TABLE test FINAL; +OPTIMIZE TABLE test FINAL SETTINGS optimize_throw_if_noop=1; SYSTEM STOP MERGES test; SET max_block_size = 65000, min_insert_block_size_rows = 65000, min_insert_block_size_bytes = '1M'; diff --git a/tests/queries/0_stateless/02479_mysql_connect_to_self.sql b/tests/queries/0_stateless/02479_mysql_connect_to_self.sql index ff46ebf1ed2..cf2220073d3 100644 --- a/tests/queries/0_stateless/02479_mysql_connect_to_self.sql +++ b/tests/queries/0_stateless/02479_mysql_connect_to_self.sql @@ -2,34 +2,36 @@ SET send_logs_level = 'fatal'; -- failed connection tries are ok, if it succeeded after retry. +DROP TABLE IF EXISTS foo; + CREATE TABLE foo (key UInt32, a String, b Int64, c String) ENGINE = TinyLog; INSERT INTO foo VALUES (1, 'one', -1, 'een'), (2, 'two', -2, 'twee'), (3, 'three', -3, 'drie'), (4, 'four', -4, 'vier'), (5, 'five', -5, 'vijf'); SET allow_experimental_analyzer = 1; SELECT '---'; -SELECT * FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '') ORDER BY key; +SELECT * FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) ORDER BY key; SELECT '---'; -SELECT count() FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', ''); +SELECT count() FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100); SELECT '---'; -SELECT 1 FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', ''); +SELECT 1 FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100); SELECT '---'; -SELECT key FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '') ORDER BY key; +SELECT key FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) ORDER BY key; SELECT '---'; -SELECT b, a FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '') ORDER BY a; +SELECT b, a FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) ORDER BY a; SELECT '---'; -SELECT b, a FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '') ORDER BY c; +SELECT b, a FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) ORDER BY c; SELECT '---'; -SELECT b FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '') WHERE c != 'twee' ORDER BY b; +SELECT b FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) WHERE c != 'twee' ORDER BY b; SELECT '---'; -SELECT count() FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '') WHERE c != 'twee'; +SELECT count() FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) WHERE c != 'twee'; EXPLAIN QUERY TREE dump_ast = 1 SELECT * FROM mysql( @@ -38,5 +40,7 @@ SELECT * FROM mysql( ); SELECT '---'; -SELECT count() FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connection_pool_size = 1); +SELECT count() FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connection_pool_size = 1, connect_timeout = 100, connection_wait_timeout = 100); SELECT count() FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connection_pool_size = 0); -- { serverError BAD_ARGUMENTS } + +DROP TABLE foo; diff --git a/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh b/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh index 47245eeb940..55e6ac2f758 100755 --- a/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh +++ b/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -# Tags: no-ubsan, no-fasttest +# Tags: no-ubsan, no-fasttest, no-tsan +# It is too slow under TSan CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -13,4 +14,4 @@ ${CLICKHOUSE_CLIENT} --query="CREATE TABLE parquet_load (list Array(Int64), json cat "$DATA_FILE" | ${CLICKHOUSE_CLIENT} -q "INSERT INTO parquet_load FORMAT Parquet" ${CLICKHOUSE_CLIENT} --query="SELECT * FROM parquet_load" | md5sum ${CLICKHOUSE_CLIENT} --query="SELECT count() FROM parquet_load" -${CLICKHOUSE_CLIENT} --query="drop table parquet_load" \ No newline at end of file +${CLICKHOUSE_CLIENT} --query="drop table parquet_load" diff --git a/tests/queries/0_stateless/02494_query_cache_nested_query_bug.reference b/tests/queries/0_stateless/02494_query_cache_nested_query_bug.reference new file mode 100644 index 00000000000..389e2621455 --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_nested_query_bug.reference @@ -0,0 +1,2 @@ +2 +0 diff --git a/tests/queries/0_stateless/02494_query_cache_nested_query_bug.sh b/tests/queries/0_stateless/02494_query_cache_nested_query_bug.sh new file mode 100755 index 00000000000..394367919e9 --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_nested_query_bug.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# Tags: no-parallel, no-fasttest +# Tag no-parallel: Messes with internal cache +# no-fasttest: Produces wrong results in fasttest, unclear why, didn't reproduce locally. + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +# Start with empty query cache (QC). +${CLICKHOUSE_CLIENT} --query "SYSTEM DROP QUERY CACHE" + +${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS tab" +${CLICKHOUSE_CLIENT} --query "CREATE TABLE tab (a UInt64) ENGINE=MergeTree() ORDER BY a" +${CLICKHOUSE_CLIENT} --query "INSERT INTO tab VALUES (1) (2) (3)" +${CLICKHOUSE_CLIENT} --query "INSERT INTO tab VALUES (3) (4) (5)" + +# Verify that the first query does two aggregations and the second query zero aggregations. Since query cache is currently not integrated +# with EXPLAIN PLAN, we need need to check the logs. +${CLICKHOUSE_CLIENT} --send_logs_level=trace --query "SELECT count(a) / (SELECT sum(a) FROM tab) FROM tab SETTINGS use_query_cache=1, max_threads=1, allow_experimental_analyzer=0" 2>&1 | grep "Aggregated. " | wc -l +${CLICKHOUSE_CLIENT} --send_logs_level=trace --query "SELECT count(a) / (SELECT sum(a) FROM tab) FROM tab SETTINGS use_query_cache=1, max_threads=1, allow_experimental_analyzer=0" 2>&1 | grep "Aggregated. " | wc -l + +${CLICKHOUSE_CLIENT} --query "SYSTEM DROP QUERY CACHE" diff --git a/tests/queries/0_stateless/02497_if_transform_strings_to_enum.reference b/tests/queries/0_stateless/02497_if_transform_strings_to_enum.reference index a1a653361ee..88f23334d31 100644 --- a/tests/queries/0_stateless/02497_if_transform_strings_to_enum.reference +++ b/tests/queries/0_stateless/02497_if_transform_strings_to_enum.reference @@ -405,6 +405,16 @@ QUERY id: 0 TABLE id: 7, table_name: system.numbers LIMIT CONSTANT id: 17, constant_value: UInt64_10, constant_value_type: UInt64 +other +other +other +other +other +other +other +other +other +other SELECT transform(number, [NULL], _CAST([\'google\', \'censor.net\', \'yahoo\'], \'Array(Enum8(\\\'censor.net\\\' = 1, \\\'google\\\' = 2, \\\'other\\\' = 3, \\\'yahoo\\\' = 4))\'), _CAST(\'other\', \'Enum8(\\\'censor.net\\\' = 1, \\\'google\\\' = 2, \\\'other\\\' = 3, \\\'yahoo\\\' = 4)\')) FROM ( diff --git a/tests/queries/0_stateless/02497_if_transform_strings_to_enum.sql b/tests/queries/0_stateless/02497_if_transform_strings_to_enum.sql index 492d42cb6bc..c3db61d1fb2 100644 --- a/tests/queries/0_stateless/02497_if_transform_strings_to_enum.sql +++ b/tests/queries/0_stateless/02497_if_transform_strings_to_enum.sql @@ -33,7 +33,7 @@ SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') EXPLAIN SYNTAX SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') as value, value FROM system.numbers LIMIT 10; EXPLAIN QUERY TREE run_passes = 1 SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') as value, value FROM system.numbers LIMIT 10; -SELECT transform(number, [NULL], ['google', 'censor.net', 'yahoo'], 'other') FROM (SELECT NULL as number FROM system.numbers LIMIT 10); -- { serverError 36 } +SELECT transform(number, [NULL], ['google', 'censor.net', 'yahoo'], 'other') FROM (SELECT NULL as number FROM system.numbers LIMIT 10); EXPLAIN SYNTAX SELECT transform(number, [NULL], ['google', 'censor.net', 'yahoo'], 'other') FROM (SELECT NULL as number FROM system.numbers LIMIT 10); EXPLAIN QUERY TREE run_passes = 1 SELECT transform(number, [NULL], ['google', 'censor.net', 'yahoo'], 'other') FROM (SELECT NULL as number FROM system.numbers LIMIT 10); diff --git a/tests/queries/0_stateless/02497_trace_events_stress_long.sh b/tests/queries/0_stateless/02497_trace_events_stress_long.sh index 7dc72807c5a..3ec729079b8 100755 --- a/tests/queries/0_stateless/02497_trace_events_stress_long.sh +++ b/tests/queries/0_stateless/02497_trace_events_stress_long.sh @@ -43,5 +43,21 @@ timeout $TIMEOUT bash -c thread2 >/dev/null & wait -$CLICKHOUSE_CLIENT -q "KILL QUERY WHERE query_id LIKE '02497_$CLICKHOUSE_DATABASE%' SYNC" >/dev/null -$CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE query_id LIKE '02497_$CLICKHOUSE_DATABASE%'" +for _ in {1..10} +do + $CLICKHOUSE_CLIENT -q "KILL QUERY WHERE query_id LIKE '02497_$CLICKHOUSE_DATABASE%' SYNC" >/dev/null + + # After this moment, the server can still run another query. + # For example, the 'timeout' command killed all threads of thread1, + # and the 'timeout' itself has finished, and we have successfully 'wait'-ed for it, + # but just before that, one of the threads successfully sent a query to the server, + # but the server didn't start to run this query yet, + # and even when the KILL QUERY was run, the query from the thread didn't start, + # but only started after the KILL QUERY has been already processed. + + # That's why we have to run this in a loop. + + $CLICKHOUSE_CLIENT -q "SELECT count() FROM system.processes WHERE query_id LIKE '02497_$CLICKHOUSE_DATABASE%'" | rg '^0$' && break + + sleep 1 +done diff --git a/tests/queries/0_stateless/02552_siphash128_reference.reference b/tests/queries/0_stateless/02552_siphash128_reference.reference index 452e9910660..d00491fd7e5 100644 --- a/tests/queries/0_stateless/02552_siphash128_reference.reference +++ b/tests/queries/0_stateless/02552_siphash128_reference.reference @@ -1,130 +1 @@ -A3817F04BA25A8E66DF67214C7550293 -DA87C1D86B99AF44347659119B22FC45 -8177228DA4A45DC7FCA38BDEF60AFFE4 -9C70B60C5267A94E5F33B6B02985ED51 -F88164C12D9C8FAF7D0F6E7C7BCD5579 -1368875980776F8854527A07690E9627 -14EECA338B208613485EA0308FD7A15E -A1F1EBBED8DBC153C0B84AA61FF08239 -3B62A9BA6258F5610F83E264F31497B4 -264499060AD9BAABC47F8B02BB6D71ED -00110DC378146956C95447D3F3D0FBBA -0151C568386B6677A2B4DC6F81E5DC18 -D626B266905EF35882634DF68532C125 -9869E247E9C08B10D029934FC4B952F7 -31FCEFAC66D7DE9C7EC7485FE4494902 -5493E99933B0A8117E08EC0F97CFC3D9 -6EE2A4CA67B054BBFD3315BF85230577 -473D06E8738DB89854C066C47AE47740 -A426E5E423BF4885294DA481FEAEF723 -78017731CF65FAB074D5208952512EB1 -9E25FC833F2290733E9344A5E83839EB -568E495ABE525A218A2214CD3E071D12 -4A29B54552D16B9A469C10528EFF0AAE -C9D184DDD5A9F5E0CF8CE29A9ABF691C -2DB479AE78BD50D8882A8A178A6132AD -8ECE5F042D5E447B5051B9EACB8D8F6F -9C0B53B4B3C307E87EAEE08678141F66 -ABF248AF69A6EAE4BFD3EB2F129EEB94 -0664DA1668574B88B935F3027358AEF4 -AA4B9DC4BF337DE90CD4FD3C467C6AB7 -EA5C7F471FAF6BDE2B1AD7D4686D2287 -2939B0183223FAFC1723DE4F52C43D35 -7C3956CA5EEAFC3E363E9D556546EB68 -77C6077146F01C32B6B69D5F4EA9FFCF -37A6986CB8847EDF0925F0F1309B54DE -A705F0E69DA9A8F907241A2E923C8CC8 -3DC47D1F29C448461E9E76ED904F6711 -0D62BF01E6FC0E1A0D3C4751C5D3692B -8C03468BCA7C669EE4FD5E084BBEE7B5 -528A5BB93BAF2C9C4473CCE5D0D22BD9 -DF6A301E95C95DAD97AE0CC8C6913BD8 -801189902C857F39E73591285E70B6DB -E617346AC9C231BB3650AE34CCCA0C5B -27D93437EFB721AA401821DCEC5ADF89 -89237D9DED9C5E78D8B1C9B166CC7342 -4A6D8091BF5E7D651189FA94A250B14C -0E33F96055E7AE893FFC0E3DCF492902 -E61C432B720B19D18EC8D84BDC63151B -F7E5AEF549F782CF379055A608269B16 -438D030FD0B7A54FA837F2AD201A6403 -A590D3EE4FBF04E3247E0D27F286423F -5FE2C1A172FE93C4B15CD37CAEF9F538 -2C97325CBD06B36EB2133DD08B3A017C -92C814227A6BCA949FF0659F002AD39E -DCE850110BD8328CFBD50841D6911D87 -67F14984C7DA791248E32BB5922583DA -1938F2CF72D54EE97E94166FA91D2A36 -74481E9646ED49FE0F6224301604698E -57FCA5DE98A9D6D8006438D0583D8A1D -9FECDE1CEFDC1CBED4763674D9575359 -E3040C00EB28F15366CA73CBD872E740 -7697009A6A831DFECCA91C5993670F7A -5853542321F567A005D547A4F04759BD -5150D1772F50834A503E069A973FBD7C -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 1 diff --git a/tests/queries/0_stateless/02552_siphash128_reference.sql b/tests/queries/0_stateless/02552_siphash128_reference.sql index c238e51b690..200954c3b57 100644 --- a/tests/queries/0_stateless/02552_siphash128_reference.sql +++ b/tests/queries/0_stateless/02552_siphash128_reference.sql @@ -1,207 +1 @@ --- Test Vectors from the SipHash reference C implementation: --- Written by --- Jean-Philippe Aumasson --- Daniel J. Bernstein --- Released under CC0 --- https://github.com/veorq/SipHash/blob/eee7d0d84dc7731df2359b243aa5e75d85f6eaef/vectors.h#L645 - -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - '')); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61))); -select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), - char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62))); - --- CH tests -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0)) == sipHash128Reference(char(0)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1)) == sipHash128Reference(char(0, 1)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2)) == sipHash128Reference(char(0, 1, 2)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3)) == sipHash128Reference(char(0, 1, 2, 3)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4)) == sipHash128Reference(char(0, 1, 2, 3, 4)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62)); -select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)); - -select sipHash128ReferenceKeyed((0, 0), '1'); -- { serverError 48 } -select sipHash128ReferenceKeyed(toUInt64(0), '1'); -- { serverError 48 } - SELECT hex(sipHash128Reference()) = hex(reverse(unhex('1CE422FEE7BD8DE20000000000000000'))) or hex(sipHash128()) = '1CE422FEE7BD8DE20000000000000000'; -SELECT hex(sipHash128ReferenceKeyed()) = hex(reverse(unhex('1CE422FEE7BD8DE20000000000000000'))) or hex(sipHash128Keyed()) = '1CE422FEE7BD8DE20000000000000000'; diff --git a/tests/queries/0_stateless/02751_ip_types_aggregate_functions_states.reference.j2 b/tests/queries/0_stateless/02751_ip_types_aggregate_functions_states.reference.j2 new file mode 100644 index 00000000000..03b0e065151 --- /dev/null +++ b/tests/queries/0_stateless/02751_ip_types_aggregate_functions_states.reference.j2 @@ -0,0 +1,220 @@ +----- hash / State / ip4 ----- +Row 1: +────── +minState: 12535288824949910799 +maxState: 15790547582231549775 +first_valueState: 12535288824949910799 +last_valueState: 15790547582231549775 +topKState: 4906125994014190470 +groupArrayState: 9875990674330641453 +groupUniqArrayState: 15788623094139372318 +uniqState: 13857072740564896081 +uniqExactState: 2764760571052675772 +uniqCombinedState: 927481020821507998 +uniqCombined64State: 11588178464185397904 +uniqHLL12State: 592773541433144605 +uniqThetaState: 12573391720108828030 +----- hash / State / ip6 ----- +Row 1: +────── +minState: 9428555662807296659 +maxState: 18253481702148601156 +first_valueState: 9428555662807296659 +last_valueState: 18253481702148601156 +topKState: 4649040466816645853 +groupArrayState: 15334593495826890008 +groupUniqArrayState: 18179202420787216155 +uniqState: 1113473461736161202 +uniqExactState: 17291302630176581193 +uniqCombinedState: 7689865507370303115 +uniqCombined64State: 7689865507370303115 +uniqHLL12State: 12630756003012135681 +uniqThetaState: 11768246996604802350 +----- finalizeAggregation / State / ip4 ----- +Row 1: +────── +min: 59.154.201.255 +max: 59.154.202.47 +first_value: 59.154.201.255 +last_value: 59.154.202.47 +topK: ['59.154.201.255','59.154.202.0','59.154.202.1','59.154.202.2','59.154.202.3','59.154.202.4','59.154.202.5','59.154.202.6','59.154.202.7','59.154.202.8'] +groupArray: ['59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.39','59.154.202.39','59.154.202.39','59.154.202.39','59.154.202.39','59.154.202.39','59.154.202.39','59.154.202.39','59.154.202.39','59.154.202.40','59.154.202.40','59.154.202.40','59.154.202.40','59.154.202.40','59.154.202.40','59.154.202.40','59.154.202.40','59.154.202.41','59.154.202.41','59.154.202.41','59.154.202.41','59.154.202.41','59.154.202.41','59.154.202.41','59.154.202.42','59.154.202.42','59.154.202.42','59.154.202.42','59.154.202.42','59.154.202.42','59.154.202.43','59.154.202.43','59.154.202.43','59.154.202.43','59.154.202.43','59.154.202.44','59.154.202.44','59.154.202.44','59.154.202.44','59.154.202.45','59.154.202.45','59.154.202.45','59.154.202.46','59.154.202.46','59.154.202.47'] +groupUniqArray: ['59.154.202.28','59.154.202.45','59.154.202.35','59.154.202.2','59.154.202.42','59.154.202.1','59.154.202.4','59.154.202.15','59.154.202.22','59.154.202.20','59.154.202.12','59.154.202.3','59.154.202.40','59.154.202.43','59.154.202.26','59.154.202.37','59.154.202.7','59.154.202.36','59.154.202.32','59.154.202.47','59.154.202.17','59.154.202.11','59.154.201.255','59.154.202.0','59.154.202.14','59.154.202.25','59.154.202.6','59.154.202.30','59.154.202.16','59.154.202.21','59.154.202.23','59.154.202.38','59.154.202.44','59.154.202.39','59.154.202.41','59.154.202.27','59.154.202.33','59.154.202.19','59.154.202.5','59.154.202.9','59.154.202.18','59.154.202.24','59.154.202.34','59.154.202.46','59.154.202.8','59.154.202.29','59.154.202.10','59.154.202.13','59.154.202.31'] +uniq: 49 +uniqExact: 49 +uniqCombined: 49 +uniqCombined64: 49 +uniqHLL12: 48 +uniqTheta: 49 +----- finalizeAggregation / State / ip6 ----- +Row 1: +────── +min: 8c:333c::8c:333c:0:0 +max: ff8b:333c::ff8b:333c:0:0 +first_value: 8c:333c::8c:333c:0:0 +last_value: ff8b:333c::ff8b:333c:0:0 +topK: ['dd8b:333c::dd8b:333c:0:0','de8b:333c::de8b:333c:0:0','df8b:333c::df8b:333c:0:0','e08b:333c::e08b:333c:0:0','e18b:333c::e18b:333c:0:0','e28b:333c::e28b:333c:0:0','e38b:333c::e38b:333c:0:0','e48b:333c::e48b:333c:0:0','e58b:333c::e58b:333c:0:0','e68b:333c::e68b:333c:0:0'] +groupArray: ['8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','68c:333c::68c:333c:0:0','68c:333c::68c:333c:0:0','68c:333c::68c:333c:0:0','68c:333c::68c:333c:0:0','68c:333c::68c:333c:0:0','68c:333c::68c:333c:0:0','68c:333c::68c:333c:0:0','68c:333c::68c:333c:0:0','78c:333c::78c:333c:0:0','78c:333c::78c:333c:0:0','78c:333c::78c:333c:0:0','78c:333c::78c:333c:0:0','78c:333c::78c:333c:0:0','78c:333c::78c:333c:0:0','78c:333c::78c:333c:0:0','88c:333c::88c:333c:0:0','88c:333c::88c:333c:0:0','88c:333c::88c:333c:0:0','88c:333c::88c:333c:0:0','88c:333c::88c:333c:0:0','88c:333c::88c:333c:0:0','98c:333c::98c:333c:0:0','98c:333c::98c:333c:0:0','98c:333c::98c:333c:0:0','98c:333c::98c:333c:0:0','98c:333c::98c:333c:0:0','a8c:333c::a8c:333c:0:0','a8c:333c::a8c:333c:0:0','a8c:333c::a8c:333c:0:0','a8c:333c::a8c:333c:0:0','b8c:333c::b8c:333c:0:0','b8c:333c::b8c:333c:0:0','b8c:333c::b8c:333c:0:0','c8c:333c::c8c:333c:0:0','c8c:333c::c8c:333c:0:0','d8c:333c::d8c:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0'] +groupUniqArray: ['58c:333c::58c:333c:0:0','f78b:333c::f78b:333c:0:0','f38b:333c::f38b:333c:0:0','18c:333c::18c:333c:0:0','e78b:333c::e78b:333c:0:0','e38b:333c::e38b:333c:0:0','e48b:333c::e48b:333c:0:0','e08b:333c::e08b:333c:0:0','df8b:333c::df8b:333c:0:0','f48b:333c::f48b:333c:0:0','68c:333c::68c:333c:0:0','28c:333c::28c:333c:0:0','f08b:333c::f08b:333c:0:0','fa8b:333c::fa8b:333c:0:0','88c:333c::88c:333c:0:0','c8c:333c::c8c:333c:0:0','fe8b:333c::fe8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','e98b:333c::e98b:333c:0:0','ed8b:333c::ed8b:333c:0:0','b8c:333c::b8c:333c:0:0','f98b:333c::f98b:333c:0:0','fd8b:333c::fd8b:333c:0:0','de8b:333c::de8b:333c:0:0','f58b:333c::f58b:333c:0:0','78c:333c::78c:333c:0:0','38c:333c::38c:333c:0:0','f18b:333c::f18b:333c:0:0','e58b:333c::e58b:333c:0:0','e18b:333c::e18b:333c:0:0','e68b:333c::e68b:333c:0:0','e28b:333c::e28b:333c:0:0','48c:333c::48c:333c:0:0','dd8b:333c::dd8b:333c:0:0','f68b:333c::f68b:333c:0:0','f28b:333c::f28b:333c:0:0','8c:333c::8c:333c:0:0','a8c:333c::a8c:333c:0:0','f88b:333c::f88b:333c:0:0','fc8b:333c::fc8b:333c:0:0','e88b:333c::e88b:333c:0:0','ec8b:333c::ec8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','98c:333c::98c:333c:0:0','d8c:333c::d8c:333c:0:0','ff8b:333c::ff8b:333c:0:0'] +uniq: 49 +uniqExact: 49 +uniqCombined: 49 +uniqCombined64: 49 +uniqHLL12: 49 +uniqTheta: 49 +----- hash / IfState / ip4 ----- +Row 1: +────── +minIfState: 12535288824949910799 +maxIfState: 15790547582231549775 +first_valueIfState: 12535288824949910799 +last_valueIfState: 15790547582231549775 +topKIfState: 4906125994014190470 +groupArrayIfState: 9875990674330641453 +groupUniqArrayIfState: 15788623094139372318 +uniqIfState: 13857072740564896081 +uniqExactIfState: 2764760571052675772 +uniqCombinedIfState: 927481020821507998 +uniqCombined64IfState: 11588178464185397904 +uniqHLL12IfState: 592773541433144605 +uniqThetaIfState: 12573391720108828030 +----- hash / IfState / ip6 ----- +Row 1: +────── +minIfState: 9428555662807296659 +maxIfState: 18253481702148601156 +first_valueIfState: 9428555662807296659 +last_valueIfState: 18253481702148601156 +topKIfState: 4649040466816645853 +groupArrayIfState: 15334593495826890008 +groupUniqArrayIfState: 18179202420787216155 +uniqIfState: 1113473461736161202 +uniqExactIfState: 17291302630176581193 +uniqCombinedIfState: 7689865507370303115 +uniqCombined64IfState: 7689865507370303115 +uniqHLL12IfState: 12630756003012135681 +uniqThetaIfState: 11768246996604802350 +----- finalizeAggregation / IfState / ip4 ----- +Row 1: +────── +min: 59.154.201.255 +max: 59.154.202.47 +first_value: 59.154.201.255 +last_value: 59.154.202.47 +topK: ['59.154.201.255','59.154.202.0','59.154.202.1','59.154.202.2','59.154.202.3','59.154.202.4','59.154.202.5','59.154.202.6','59.154.202.7','59.154.202.8'] +groupArray: ['59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.201.255','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.0','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.1','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.2','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.3','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.4','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.5','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.6','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.7','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.8','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.9','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.10','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.11','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.12','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.13','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.14','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.15','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.16','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.17','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.18','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.19','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.20','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.21','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.22','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.23','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.24','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.25','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.26','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.27','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.28','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.29','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.30','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.31','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.32','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.33','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.34','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.35','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.36','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.37','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.38','59.154.202.39','59.154.202.39','59.154.202.39','59.154.202.39','59.154.202.39','59.154.202.39','59.154.202.39','59.154.202.39','59.154.202.39','59.154.202.40','59.154.202.40','59.154.202.40','59.154.202.40','59.154.202.40','59.154.202.40','59.154.202.40','59.154.202.40','59.154.202.41','59.154.202.41','59.154.202.41','59.154.202.41','59.154.202.41','59.154.202.41','59.154.202.41','59.154.202.42','59.154.202.42','59.154.202.42','59.154.202.42','59.154.202.42','59.154.202.42','59.154.202.43','59.154.202.43','59.154.202.43','59.154.202.43','59.154.202.43','59.154.202.44','59.154.202.44','59.154.202.44','59.154.202.44','59.154.202.45','59.154.202.45','59.154.202.45','59.154.202.46','59.154.202.46','59.154.202.47'] +groupUniqArray: ['59.154.202.28','59.154.202.45','59.154.202.35','59.154.202.2','59.154.202.42','59.154.202.1','59.154.202.4','59.154.202.15','59.154.202.22','59.154.202.20','59.154.202.12','59.154.202.3','59.154.202.40','59.154.202.43','59.154.202.26','59.154.202.37','59.154.202.7','59.154.202.36','59.154.202.32','59.154.202.47','59.154.202.17','59.154.202.11','59.154.201.255','59.154.202.0','59.154.202.14','59.154.202.25','59.154.202.6','59.154.202.30','59.154.202.16','59.154.202.21','59.154.202.23','59.154.202.38','59.154.202.44','59.154.202.39','59.154.202.41','59.154.202.27','59.154.202.33','59.154.202.19','59.154.202.5','59.154.202.9','59.154.202.18','59.154.202.24','59.154.202.34','59.154.202.46','59.154.202.8','59.154.202.29','59.154.202.10','59.154.202.13','59.154.202.31'] +uniq: 49 +uniqExact: 49 +uniqCombined: 49 +uniqCombined64: 49 +uniqHLL12: 48 +uniqTheta: 49 +----- finalizeAggregation / IfState / ip6 ----- +Row 1: +────── +min: 8c:333c::8c:333c:0:0 +max: ff8b:333c::ff8b:333c:0:0 +first_value: 8c:333c::8c:333c:0:0 +last_value: ff8b:333c::ff8b:333c:0:0 +topK: ['dd8b:333c::dd8b:333c:0:0','de8b:333c::de8b:333c:0:0','df8b:333c::df8b:333c:0:0','e08b:333c::e08b:333c:0:0','e18b:333c::e18b:333c:0:0','e28b:333c::e28b:333c:0:0','e38b:333c::e38b:333c:0:0','e48b:333c::e48b:333c:0:0','e58b:333c::e58b:333c:0:0','e68b:333c::e68b:333c:0:0'] +groupArray: ['8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','8c:333c::8c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','18c:333c::18c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','28c:333c::28c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','38c:333c::38c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','48c:333c::48c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','58c:333c::58c:333c:0:0','68c:333c::68c:333c:0:0','68c:333c::68c:333c:0:0','68c:333c::68c:333c:0:0','68c:333c::68c:333c:0:0','68c:333c::68c:333c:0:0','68c:333c::68c:333c:0:0','68c:333c::68c:333c:0:0','68c:333c::68c:333c:0:0','78c:333c::78c:333c:0:0','78c:333c::78c:333c:0:0','78c:333c::78c:333c:0:0','78c:333c::78c:333c:0:0','78c:333c::78c:333c:0:0','78c:333c::78c:333c:0:0','78c:333c::78c:333c:0:0','88c:333c::88c:333c:0:0','88c:333c::88c:333c:0:0','88c:333c::88c:333c:0:0','88c:333c::88c:333c:0:0','88c:333c::88c:333c:0:0','88c:333c::88c:333c:0:0','98c:333c::98c:333c:0:0','98c:333c::98c:333c:0:0','98c:333c::98c:333c:0:0','98c:333c::98c:333c:0:0','98c:333c::98c:333c:0:0','a8c:333c::a8c:333c:0:0','a8c:333c::a8c:333c:0:0','a8c:333c::a8c:333c:0:0','a8c:333c::a8c:333c:0:0','b8c:333c::b8c:333c:0:0','b8c:333c::b8c:333c:0:0','b8c:333c::b8c:333c:0:0','c8c:333c::c8c:333c:0:0','c8c:333c::c8c:333c:0:0','d8c:333c::d8c:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','dd8b:333c::dd8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','de8b:333c::de8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','df8b:333c::df8b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e08b:333c::e08b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e18b:333c::e18b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e28b:333c::e28b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e38b:333c::e38b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e48b:333c::e48b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e58b:333c::e58b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e68b:333c::e68b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e78b:333c::e78b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e88b:333c::e88b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','e98b:333c::e98b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ec8b:333c::ec8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ed8b:333c::ed8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f08b:333c::f08b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f18b:333c::f18b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f28b:333c::f28b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f38b:333c::f38b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f48b:333c::f48b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f58b:333c::f58b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f68b:333c::f68b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f78b:333c::f78b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f88b:333c::f88b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','f98b:333c::f98b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fa8b:333c::fa8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fc8b:333c::fc8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fd8b:333c::fd8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','fe8b:333c::fe8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0','ff8b:333c::ff8b:333c:0:0'] +groupUniqArray: ['58c:333c::58c:333c:0:0','f78b:333c::f78b:333c:0:0','f38b:333c::f38b:333c:0:0','18c:333c::18c:333c:0:0','e78b:333c::e78b:333c:0:0','e38b:333c::e38b:333c:0:0','e48b:333c::e48b:333c:0:0','e08b:333c::e08b:333c:0:0','df8b:333c::df8b:333c:0:0','f48b:333c::f48b:333c:0:0','68c:333c::68c:333c:0:0','28c:333c::28c:333c:0:0','f08b:333c::f08b:333c:0:0','fa8b:333c::fa8b:333c:0:0','88c:333c::88c:333c:0:0','c8c:333c::c8c:333c:0:0','fe8b:333c::fe8b:333c:0:0','ea8b:333c::ea8b:333c:0:0','ee8b:333c::ee8b:333c:0:0','e98b:333c::e98b:333c:0:0','ed8b:333c::ed8b:333c:0:0','b8c:333c::b8c:333c:0:0','f98b:333c::f98b:333c:0:0','fd8b:333c::fd8b:333c:0:0','de8b:333c::de8b:333c:0:0','f58b:333c::f58b:333c:0:0','78c:333c::78c:333c:0:0','38c:333c::38c:333c:0:0','f18b:333c::f18b:333c:0:0','e58b:333c::e58b:333c:0:0','e18b:333c::e18b:333c:0:0','e68b:333c::e68b:333c:0:0','e28b:333c::e28b:333c:0:0','48c:333c::48c:333c:0:0','dd8b:333c::dd8b:333c:0:0','f68b:333c::f68b:333c:0:0','f28b:333c::f28b:333c:0:0','8c:333c::8c:333c:0:0','a8c:333c::a8c:333c:0:0','f88b:333c::f88b:333c:0:0','fc8b:333c::fc8b:333c:0:0','e88b:333c::e88b:333c:0:0','ec8b:333c::ec8b:333c:0:0','eb8b:333c::eb8b:333c:0:0','ef8b:333c::ef8b:333c:0:0','fb8b:333c::fb8b:333c:0:0','98c:333c::98c:333c:0:0','d8c:333c::d8c:333c:0:0','ff8b:333c::ff8b:333c:0:0'] +uniq: 49 +uniqExact: 49 +uniqCombined: 49 +uniqCombined64: 49 +uniqHLL12: 49 +uniqTheta: 49 +----- Arg / hash / State / ip4 ----- +Row 1: +────── +argMinState: 13774589216353164344 +argMaxState: 1131101663917664667 +----- Arg / hash / State / ip6 ----- +Row 1: +────── +argMinState: 7320668278649617037 +argMaxState: 16598449636475438091 +----- Arg / finalizeAggregation / State / ip4 ----- +Row 1: +────── +argMinState: 59.154.201.255 +argMaxState: 59.154.202.47 +----- Arg / finalizeAggregation / State / ip6 ----- +Row 1: +────── +argMinState: 8c:333c::8c:333c:0:0 +argMaxState: ff8b:333c::ff8b:333c:0:0 +----- hash / State / ip4 ----- +Row 1: +────── +anyState: 12535288824949910799 +anyHeavyState: 9327034461443333306 +anyLastState: 12535288824949910799 +----- hash / State / ip6 ----- +Row 1: +────── +anyState: 1383994153676807399 +anyHeavyState: 15555709096566410627 +anyLastState: 1383994153676807399 +----- finalizeAggregation / State / ip4 ----- +Row 1: +────── +any: 59.154.201.255 +anyHeavy: 59.154.201.255 +anyLast: 59.154.201.255 +----- finalizeAggregation / State / ip6 ----- +Row 1: +────── +any: dd8b:333c::dd8b:333c:0:0 +anyHeavy: dd8b:333c::dd8b:333c:0:0 +anyLast: dd8b:333c::dd8b:333c:0:0 +----- Map/Map hash / State / ip4 ----- +Row 1: +────── +sumMapState: 9327034461443333306 +minMapState: 17403430892851901033 +maxMapState: 17403430892851901033 +----- Map/Map hash / State / ip6 ----- +Row 1: +────── +sumMapState: 5204122423200337352 +minMapState: 14534921151579960284 +maxMapState: 14534921151579960284 +----- Map/Map finalizeAggregation / State / ip4 ----- +Row 1: +────── +sumMap: {'59.154.201.255':1} +minMap: {'59.154.201.255':1} +maxMap: {'59.154.201.255':1} +----- Map/Map finalizeAggregation / State / ip6 ----- +Row 1: +────── +sumMap: {'dd8b:333c::dd8b:333c:0:0':1} +minMap: {'dd8b:333c::dd8b:333c:0:0':1} +maxMap: {'dd8b:333c::dd8b:333c:0:0':1} +----- Map/Array hash / State / ip4 ----- +Row 1: +────── +sumMapState: 9327034461443333306 +minMapState: 9327034461443333306 +maxMapState: 9327034461443333306 +----- Map/Array hash / State / ip6 ----- +Row 1: +────── +sumMapState: 15555709096566410627 +minMapState: 15555709096566410627 +maxMapState: 15555709096566410627 +----- Map/Array finalizeAggregation / State / ip4 ----- +Row 1: +────── +sumMap: (['59.154.201.255'],[1]) +minMap: (['59.154.201.255'],[1]) +maxMap: (['59.154.201.255'],[1]) +----- Map/Array finalizeAggregation / State / ip6 ----- +Row 1: +────── +sumMap: (['dd8b:333c::dd8b:333c:0:0'],[1]) +minMap: (['dd8b:333c::dd8b:333c:0:0'],[1]) +maxMap: (['dd8b:333c::dd8b:333c:0:0'],[1]) diff --git a/tests/queries/0_stateless/02751_ip_types_aggregate_functions_states.sql.j2 b/tests/queries/0_stateless/02751_ip_types_aggregate_functions_states.sql.j2 new file mode 100644 index 00000000000..7d030d4be2d --- /dev/null +++ b/tests/queries/0_stateless/02751_ip_types_aggregate_functions_states.sql.j2 @@ -0,0 +1,170 @@ +-- Tags: no-parallel, no-fasttest + +{# this test checks backward compatibility of aggregate functions States against IPv4, IPv6 types #} + +{% set ip4_generator = "select num::UInt32::IPv4 ip from (select arrayJoin(range(999999999, number)) as num from numbers(999999999,50)) order by ip" %} +{% set ip6_generator = "SELECT toIPv6(IPv6NumToString(toFixedString(reinterpretAsFixedString(num)||reinterpretAsFixedString(num), 16))) AS ip FROM (select arrayJoin(range(1010011101, number)) as num from numbers(1010011101,50)) order by ip" %} +{% set ip_generators = {'ip4': ip4_generator, 'ip6': ip6_generator} %} + + +{% set agg_func_list = [ "min", "max", "first_value", "last_value", "topK", "groupArray", "groupUniqArray", "uniq", "uniqExact", "uniqCombined", "uniqCombined64", "uniqHLL12", "uniqTheta" ] %} + +{% for generator_name, ip_generator in ip_generators.items() %} + +select '----- hash / State / {{ generator_name }} -----'; +select + {% for func in agg_func_list -%} + cityHash64(hex( {{ func }}State(ip) )) AS {{ func }}State{{ "," if not loop.last }} + {% endfor -%} +from ( {{ ip_generator }} ) format Vertical; + +{% endfor -%} + + +{% for generator_name, ip_generator in ip_generators.items() %} + +select '----- finalizeAggregation / State / {{ generator_name }} -----'; +select + {% for func in agg_func_list -%} + finalizeAggregation( {{ func }}State(ip) ) AS {{ func }}{{ "," if not loop.last }} + {% endfor -%} +from ( {{ ip_generator }} ) format Vertical; + +{% endfor -%} + + +{% for generator_name, ip_generator in ip_generators.items() %} + +select '----- hash / IfState / {{ generator_name }} -----'; +select + {% for func in agg_func_list -%} + cityHash64(hex( {{ func }}IfState(ip, 1) )) AS {{ func }}IfState{{ "," if not loop.last }} + {% endfor -%} +from ( {{ ip_generator }} ) format Vertical; + +{% endfor -%} + + +{% for generator_name, ip_generator in ip_generators.items() %} + +select '----- finalizeAggregation / IfState / {{ generator_name }} -----'; +select + {% for func in agg_func_list -%} + finalizeAggregation( {{ func }}IfState(ip, 1) ) AS {{ func }}{{ "," if not loop.last }} + {% endfor -%} +from ( {{ ip_generator }} ) format Vertical; + +{% endfor -%} + + +{% set agg_func_list = [ "argMin", "argMax" ] %} + +{% for generator_name, ip_generator in ip_generators.items() %} + +select '----- Arg / hash / State / {{ generator_name }} -----'; +select + {% for func in agg_func_list -%} + cityHash64(hex( {{ func }}State(ip, ip) )) AS {{ func }}State{{ "," if not loop.last }} + {% endfor -%} +from ( {{ ip_generator }} ) format Vertical; + +{% endfor -%} + + +{% for generator_name, ip_generator in ip_generators.items() %} + +select '----- Arg / finalizeAggregation / State / {{ generator_name }} -----'; +select + {% for func in agg_func_list -%} + finalizeAggregation( {{ func }}State(ip, ip) ) AS {{ func }}State{{ "," if not loop.last }} + {% endfor -%} +from ( {{ ip_generator }} ) format Vertical; + +{% endfor -%} + + + + +{# let's test functions with not deterministic result against 1 row, to make it deterministic #} +{% set ip4_generator = "select number::UInt32::IPv4 ip from numbers(999999999,1) order by ip" %} +{% set ip6_generator = "SELECT toIPv6(IPv6NumToString(toFixedString(reinterpretAsFixedString(number)||reinterpretAsFixedString(number), 16))) AS ip FROM numbers(1010011101, 1) order by ip" %} + +{% set ip_generators = {'ip4': ip4_generator, 'ip6': ip6_generator} %} + +{% set agg_func_list = [ "any", "anyHeavy", "anyLast" ] %} + + +{% for generator_name, ip_generator in ip_generators.items() %} + +select '----- hash / State / {{ generator_name }} -----'; +select + {% for func in agg_func_list -%} + cityHash64(hex( {{ func }}State(ip) )) AS {{ func }}State{{ "," if not loop.last }} + {% endfor -%} +from ( {{ ip_generator }} ) format Vertical; + +{% endfor -%} + + +{% for generator_name, ip_generator in ip_generators.items() %} + +select '----- finalizeAggregation / State / {{ generator_name }} -----'; +select + {% for func in agg_func_list -%} + finalizeAggregation( {{ func }}State(ip) ) AS {{ func }}{{ "," if not loop.last }} + {% endfor -%} +from ( {{ ip_generator }} ) format Vertical; + +{% endfor -%} + + + +{% set agg_func_list = [ "sumMap", "minMap", "maxMap" ] %} + +{% for generator_name, ip_generator in ip_generators.items() %} + +select '----- Map/Map hash / State / {{ generator_name }} -----'; +select + {% for func in agg_func_list -%} + cityHash64(hex( {{ func }}State(map(ip, 1::Int64)) )) AS {{ func }}State{{ "," if not loop.last }} + {% endfor -%} +from ( {{ ip_generator }} ) format Vertical; + +{% endfor -%} + + + +{% for generator_name, ip_generator in ip_generators.items() %} + +select '----- Map/Map finalizeAggregation / State / {{ generator_name }} -----'; +select + {% for func in agg_func_list -%} + finalizeAggregation( {{ func }}State(map(ip, 1::Int64)) ) AS {{ func }}{{ "," if not loop.last }} + {% endfor -%} +from ( {{ ip_generator }} ) format Vertical; + +{% endfor -%} + + +{% for generator_name, ip_generator in ip_generators.items() %} + +select '----- Map/Array hash / State / {{ generator_name }} -----'; +select + {% for func in agg_func_list -%} + cityHash64(hex( {{ func }}State([ip], [1::Int64]) )) AS {{ func }}State{{ "," if not loop.last }} + {% endfor -%} +from ( {{ ip_generator }} ) format Vertical; + +{% endfor -%} + + +{% for generator_name, ip_generator in ip_generators.items() %} + +select '----- Map/Array finalizeAggregation / State / {{ generator_name }} -----'; +select + {% for func in agg_func_list -%} + finalizeAggregation( {{ func }}State([ip], [1::Int64]) ) AS {{ func }}{{ "," if not loop.last }} + {% endfor -%} +from ( {{ ip_generator }} ) format Vertical; + +{% endfor -%} diff --git a/tests/queries/0_stateless/02783_date_predicate_optimizations.reference b/tests/queries/0_stateless/02783_date_predicate_optimizations.reference index 872a5dd1d7d..cd689b93034 100644 --- a/tests/queries/0_stateless/02783_date_predicate_optimizations.reference +++ b/tests/queries/0_stateless/02783_date_predicate_optimizations.reference @@ -1,54 +1,2 @@ 2021-12-31 23:00:00 0 2021-12-31 23:00:00 0 -Date -2 -3 -2 -4 -1 -3 -3 -2 -1 -4 -1 -4 -DateTime -2 -3 -2 -4 -1 -3 -3 -2 -1 -4 -1 -4 -Date32 -2 -3 -2 -4 -1 -3 -3 -2 -1 -4 -1 -4 -DateTime64 -2 -3 -2 -4 -1 -3 -3 -2 -1 -4 -1 -4 diff --git a/tests/queries/0_stateless/02783_date_predicate_optimizations.sql b/tests/queries/0_stateless/02783_date_predicate_optimizations.sql index 0a2fa6cc93b..abb13f1005e 100644 --- a/tests/queries/0_stateless/02783_date_predicate_optimizations.sql +++ b/tests/queries/0_stateless/02783_date_predicate_optimizations.sql @@ -11,79 +11,3 @@ INSERT INTO source values ('2021-12-31 23:00:00', 0); SELECT * FROM source WHERE toYYYYMM(ts) = 202112; SELECT * FROM source WHERE toYear(ts) = 2021; - -DROP TABLE IF EXISTS source; -CREATE TABLE source -( - `dt` Date, - `ts` DateTime, - `dt_32` Date32, - `ts_64` DateTime64(3), - `n` Int32 -) -ENGINE = MergeTree -PARTITION BY toYYYYMM(ts) -ORDER BY tuple(); - -INSERT INTO source values ('2022-12-31', '2022-12-31 23:59:59', '2022-12-31', '2022-12-31 23:59:59.123', 0); -INSERT INTO source values ('2023-01-01', '2023-01-01 00:00:00', '2023-01-01', '2023-01-01 00:00:00.000', 1); -INSERT INTO source values ('2023-12-01', '2023-12-01 00:00:00', '2023-12-01', '2023-12-01 00:00:00.000', 2); -INSERT INTO source values ('2023-12-31', '2023-12-31 23:59:59', '2023-12-31', '2023-12-31 23:59:59.123', 3); -INSERT INTO source values ('2024-01-01', '2024-01-01 00:00:00', '2024-01-01', '2024-01-01 00:00:00.000', 4); - -SELECT 'Date'; -SELECT count(*) FROM source WHERE toYYYYMM(dt) = 202312; -SELECT count(*) FROM source WHERE toYYYYMM(dt) <> 202312; -SELECT count(*) FROM source WHERE toYYYYMM(dt) < 202312; -SELECT count(*) FROM source WHERE toYYYYMM(dt) <= 202312; -SELECT count(*) FROM source WHERE toYYYYMM(dt) > 202312; -SELECT count(*) FROM source WHERE toYYYYMM(dt) >= 202312; -SELECT count(*) FROM source WHERE toYear(dt) = 2023; -SELECT count(*) FROM source WHERE toYear(dt) <> 2023; -SELECT count(*) FROM source WHERE toYear(dt) < 2023; -SELECT count(*) FROM source WHERE toYear(dt) <= 2023; -SELECT count(*) FROM source WHERE toYear(dt) > 2023; -SELECT count(*) FROM source WHERE toYear(dt) >= 2023; - -SELECT 'DateTime'; -SELECT count(*) FROM source WHERE toYYYYMM(ts) = 202312; -SELECT count(*) FROM source WHERE toYYYYMM(ts) <> 202312; -SELECT count(*) FROM source WHERE toYYYYMM(ts) < 202312; -SELECT count(*) FROM source WHERE toYYYYMM(ts) <= 202312; -SELECT count(*) FROM source WHERE toYYYYMM(ts) > 202312; -SELECT count(*) FROM source WHERE toYYYYMM(ts) >= 202312; -SELECT count(*) FROM source WHERE toYear(ts) = 2023; -SELECT count(*) FROM source WHERE toYear(ts) <> 2023; -SELECT count(*) FROM source WHERE toYear(ts) < 2023; -SELECT count(*) FROM source WHERE toYear(ts) <= 2023; -SELECT count(*) FROM source WHERE toYear(ts) > 2023; -SELECT count(*) FROM source WHERE toYear(ts) >= 2023; - -SELECT 'Date32'; -SELECT count(*) FROM source WHERE toYYYYMM(dt_32) = 202312; -SELECT count(*) FROM source WHERE toYYYYMM(dt_32) <> 202312; -SELECT count(*) FROM source WHERE toYYYYMM(dt_32) < 202312; -SELECT count(*) FROM source WHERE toYYYYMM(dt_32) <= 202312; -SELECT count(*) FROM source WHERE toYYYYMM(dt_32) > 202312; -SELECT count(*) FROM source WHERE toYYYYMM(dt_32) >= 202312; -SELECT count(*) FROM source WHERE toYear(dt_32) = 2023; -SELECT count(*) FROM source WHERE toYear(dt_32) <> 2023; -SELECT count(*) FROM source WHERE toYear(dt_32) < 2023; -SELECT count(*) FROM source WHERE toYear(dt_32) <= 2023; -SELECT count(*) FROM source WHERE toYear(dt_32) > 2023; -SELECT count(*) FROM source WHERE toYear(dt_32) >= 2023; - -SELECT 'DateTime64'; -SELECT count(*) FROM source WHERE toYYYYMM(ts_64) = 202312; -SELECT count(*) FROM source WHERE toYYYYMM(ts_64) <> 202312; -SELECT count(*) FROM source WHERE toYYYYMM(ts_64) < 202312; -SELECT count(*) FROM source WHERE toYYYYMM(ts_64) <= 202312; -SELECT count(*) FROM source WHERE toYYYYMM(ts_64) > 202312; -SELECT count(*) FROM source WHERE toYYYYMM(ts_64) >= 202312; -SELECT count(*) FROM source WHERE toYear(ts_64) = 2023; -SELECT count(*) FROM source WHERE toYear(ts_64) <> 2023; -SELECT count(*) FROM source WHERE toYear(ts_64) < 2023; -SELECT count(*) FROM source WHERE toYear(ts_64) <= 2023; -SELECT count(*) FROM source WHERE toYear(ts_64) > 2023; -SELECT count(*) FROM source WHERE toYear(ts_64) >= 2023; -DROP TABLE source; diff --git a/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_rewrite.reference b/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_rewrite.reference deleted file mode 100644 index 9235e7e106a..00000000000 --- a/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_rewrite.reference +++ /dev/null @@ -1,87 +0,0 @@ -SELECT value1 -FROM date_t -WHERE ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE ((date1 < \'1993-01-01\') OR (date1 >= \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE (date1 < \'1993-01-01\') AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE (date1 >= \'1994-01-01\') AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE (date1 < \'1994-01-01\') AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE (date1 >= \'1993-01-01\') AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE ((date1 >= \'1993-01-01\') AND (date1 < \'1998-01-01\')) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE (((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) OR ((date1 >= \'1994-01-01\') AND (date1 < \'1995-01-01\'))) AND ((id >= 1) AND (id <= 3)) -SELECT - value1, - toYear(date1) AS year1 -FROM date_t -WHERE ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE (date1 < \'1993-01-01\') AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -PREWHERE (date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\') -WHERE ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE ((id >= 1) AND (id <= 3)) AND ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) -SELECT value1 -FROM date_t -WHERE (toYYYYMM(date1) = 199300) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE (toYYYYMM(date1) = 199313) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE ((date1 >= \'1993-12-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE ((date1 >= \'1992-03-01\') AND (date1 < \'1992-04-01\')) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE ((date1 < \'1992-03-01\') OR (date1 >= \'1992-04-01\')) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE (date1 < \'1992-03-01\') AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE (date1 >= \'1992-04-01\') AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE (date1 < \'1992-04-01\') AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE (date1 >= \'1992-03-01\') AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date_t -WHERE ((date1 >= \'1992-03-01\') OR ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\'))) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM datetime_t -WHERE ((date1 >= \'1993-01-01 00:00:00\') AND (date1 < \'1994-01-01 00:00:00\')) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM datetime_t -WHERE ((date1 >= \'1993-12-01 00:00:00\') AND (date1 < \'1994-01-01 00:00:00\')) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date32_t -WHERE ((date1 >= \'1993-01-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM date32_t -WHERE ((date1 >= \'1993-12-01\') AND (date1 < \'1994-01-01\')) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM datetime64_t -WHERE ((date1 >= \'1993-01-01 00:00:00\') AND (date1 < \'1994-01-01 00:00:00\')) AND ((id >= 1) AND (id <= 3)) -SELECT value1 -FROM datetime64_t -WHERE ((date1 >= \'1993-12-01 00:00:00\') AND (date1 < \'1994-01-01 00:00:00\')) AND ((id >= 1) AND (id <= 3)) diff --git a/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_rewrite.sql b/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_rewrite.sql deleted file mode 100644 index 266be59b0a3..00000000000 --- a/tests/queries/0_stateless/02785_date_predicate_optimizations_ast_rewrite.sql +++ /dev/null @@ -1,47 +0,0 @@ -DROP TABLE IF EXISTS date_t; -CREATE TABLE date_t (id UInt32, value1 String, date1 Date) ENGINE ReplacingMergeTree() ORDER BY id; - -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) <> 1993 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) < 1993 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) > 1993 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) <= 1993 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) >= 1993 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) BETWEEN 1993 AND 1997 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE (toYear(date1) = 1993 OR toYear(date1) = 1994) AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1, toYear(date1) as year1 FROM date_t WHERE year1 = 1993 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE 1993 > toYear(date1) AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t PREWHERE toYear(date1) = 1993 WHERE id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE id BETWEEN 1 AND 3 HAVING toYear(date1) = 1993; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199300 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199313 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199203 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) <> 199203 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) < 199203 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) > 199203 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) <= 199203 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) >= 199203 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE (toYYYYMM(date1) >= 199203 OR toYear(date1) = 1993) AND id BETWEEN 1 AND 3; -DROP TABLE date_t; - -DROP TABLE IF EXISTS datetime_t; -CREATE TABLE datetime_t (id UInt32, value1 String, date1 Datetime) ENGINE ReplacingMergeTree() ORDER BY id; - -EXPLAIN SYNTAX SELECT value1 FROM datetime_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM datetime_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; -DROP TABLE datetime_t; - -DROP TABLE IF EXISTS date32_t; -CREATE TABLE date32_t (id UInt32, value1 String, date1 Date32) ENGINE ReplacingMergeTree() ORDER BY id; - -EXPLAIN SYNTAX SELECT value1 FROM date32_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM date32_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; -DROP TABLE date32_t; - -DROP TABLE IF EXISTS datetime64_t; -CREATE TABLE datetime64_t (id UInt32, value1 String, date1 Datetime64) ENGINE ReplacingMergeTree() ORDER BY id; - -EXPLAIN SYNTAX SELECT value1 FROM datetime64_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; -EXPLAIN SYNTAX SELECT value1 FROM datetime64_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; -DROP TABLE datetime64_t; diff --git a/tests/queries/0_stateless/02789_set_index_nullable_condition_bug.reference b/tests/queries/0_stateless/02789_set_index_nullable_condition_bug.reference new file mode 100644 index 00000000000..e6ce356cd45 --- /dev/null +++ b/tests/queries/0_stateless/02789_set_index_nullable_condition_bug.reference @@ -0,0 +1 @@ +v1 v2 diff --git a/tests/queries/0_stateless/02789_set_index_nullable_condition_bug.sql b/tests/queries/0_stateless/02789_set_index_nullable_condition_bug.sql new file mode 100644 index 00000000000..fed030f170d --- /dev/null +++ b/tests/queries/0_stateless/02789_set_index_nullable_condition_bug.sql @@ -0,0 +1,15 @@ +drop table if exists test_table; +CREATE TABLE test_table +( + col1 String, + col2 String, + INDEX test_table_col2_idx col2 TYPE set(0) GRANULARITY 1 +) ENGINE = MergeTree() + ORDER BY col1 +AS SELECT 'v1', 'v2'; + +SELECT * FROM test_table +WHERE 1 == 1 AND col1 == col1 OR + 0 AND col2 == NULL; + +drop table if exists test_table; diff --git a/tests/queries/0_stateless/02797_transform_narrow_types.reference b/tests/queries/0_stateless/02797_transform_narrow_types.reference new file mode 100644 index 00000000000..5a2bd60ca83 --- /dev/null +++ b/tests/queries/0_stateless/02797_transform_narrow_types.reference @@ -0,0 +1,6 @@ +f +s +11 +11 +33 +3 diff --git a/tests/queries/0_stateless/02797_transform_narrow_types.sql b/tests/queries/0_stateless/02797_transform_narrow_types.sql new file mode 100644 index 00000000000..56a02fb8af7 --- /dev/null +++ b/tests/queries/0_stateless/02797_transform_narrow_types.sql @@ -0,0 +1,6 @@ +SELECT transform(-1, [-1, 2], ['f', 's'], 'g'); +SELECT transform(2, [-1, 2], ['f', 's'], 'g'); +SELECT transform(-1, [-1, 2], [11, 22], 33); +SELECT transform(-1, [-1, 2], [11, 22]); +SELECT transform(3, [-1, 2], [11, 22], 33); +SELECT transform(3, [-1, 2], [11, 22]); diff --git a/tests/queries/0_stateless/02798_generic_transform.reference b/tests/queries/0_stateless/02798_generic_transform.reference new file mode 100644 index 00000000000..2140c3218b3 --- /dev/null +++ b/tests/queries/0_stateless/02798_generic_transform.reference @@ -0,0 +1,38 @@ +def +def +def +hello +def +world +def +abc! +def +def + + + +hello + +world + +abc + + + + + +hello + +world + +abc + + +123 +2023-03-03 00:00:00.000 +2023-02-02 00:00:00.000 +2023-01-01 00:00:00.000 +1 1 +42 42 +42 +42 diff --git a/tests/queries/0_stateless/02798_generic_transform.sql b/tests/queries/0_stateless/02798_generic_transform.sql new file mode 100644 index 00000000000..6317d83fb29 --- /dev/null +++ b/tests/queries/0_stateless/02798_generic_transform.sql @@ -0,0 +1,12 @@ +SELECT transform((number, toString(number)), [(3, '3'), (5, '5'), (7, '7')], ['hello', 'world', 'abc!'], 'def') FROM system.numbers LIMIT 10; +SELECT transform(toNullable(toInt256(number)), [3, 5, 7], ['hello', 'world', 'abc'], '') FROM system.numbers LIMIT 10; +SELECT transform(toUInt256(number), [3, 5, 7], ['hello', 'world', 'abc'], '') FROM system.numbers LIMIT 10; + +select case 1::Nullable(Int32) when 1 then 123 else 0 end; + +SELECT transform(arrayJoin(['c', 'b', 'a']), ['a', 'b'], [toDateTime64('2023-01-01', 3), toDateTime64('2023-02-02', 3)], toDateTime64('2023-03-03', 3)); + +SELECT transform(1, [1], [toDecimal32(1, 2)]), toDecimal32(1, 2); +select transform(1, [1], [toDecimal32(42, 2)]), toDecimal32(42, 2); +SELECT transform(1, [1], [toDecimal32(42, 2)], 0); +SELECT transform(1, [1], [toDecimal32(42, 2)], toDecimal32(0, 2)); diff --git a/tests/queries/0_stateless/02799_transform_empty_arrays.reference b/tests/queries/0_stateless/02799_transform_empty_arrays.reference new file mode 100644 index 00000000000..fd44083947e --- /dev/null +++ b/tests/queries/0_stateless/02799_transform_empty_arrays.reference @@ -0,0 +1,30 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +Hello +Hello +Hello +Hello +Hello +Hello +Hello +Hello +Hello +Hello +Hello 0 +Hello 1 +Hello 2 +Hello 3 +Hello 4 +Hello 5 +Hello 6 +Hello 7 +Hello 8 +Hello 9 diff --git a/tests/queries/0_stateless/02799_transform_empty_arrays.sql b/tests/queries/0_stateless/02799_transform_empty_arrays.sql new file mode 100644 index 00000000000..84e3e9d2930 --- /dev/null +++ b/tests/queries/0_stateless/02799_transform_empty_arrays.sql @@ -0,0 +1,3 @@ +SELECT transform(number, [], [1]) FROM numbers(10); +SELECT transform(number, [], [], 'Hello') FROM numbers(10); +SELECT transform(number, [], [], 'Hello ' || number::String) FROM numbers(10); diff --git a/tests/queries/0_stateless/02800_transform_alter.reference b/tests/queries/0_stateless/02800_transform_alter.reference new file mode 100644 index 00000000000..cae8aeba20f --- /dev/null +++ b/tests/queries/0_stateless/02800_transform_alter.reference @@ -0,0 +1,6 @@ +1 a1 a1 +2 a2 b2 +3 a3 b3 +1 a1 +2 b2 +3 b3 diff --git a/tests/queries/0_stateless/02800_transform_alter.sql b/tests/queries/0_stateless/02800_transform_alter.sql new file mode 100644 index 00000000000..7458f51c5a4 --- /dev/null +++ b/tests/queries/0_stateless/02800_transform_alter.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS test_xy; +DROP TABLE IF EXISTS updates; + +CREATE TABLE test_xy +( + `x` Int32, + `y` String +) +ENGINE = MergeTree +ORDER BY x; + +CREATE TABLE updates +( + `x` Int32, + `y` String +) +ENGINE = MergeTree +ORDER BY x; + +INSERT INTO test_xy(x, y) VALUES (1, 'a1'), (2, 'a2'), (3, 'a3'); +INSERT INTO updates(x, y) VALUES (2, 'b2'), (3, 'b3'); + +SELECT x, y, + transform(x, + (select groupArray(x) from (select x, y from updates order by x) t1), + (select groupArray(y) from (select x, y from updates order by x) t2), + y) +FROM test_xy +WHERE 1 ORDER BY x, y; + +SET mutations_sync = 1; +ALTER table test_xy + UPDATE + y = transform(x, + (select groupArray(x) from (select x, y from updates order by x) t1), + (select groupArray(y) from (select x, y from updates order by x) t2), + y) + WHERE 1; + +SELECT * FROM test_xy ORDER BY x, y; + +DROP TABLE test_xy; +DROP TABLE updates; diff --git a/tests/queries/0_stateless/02801_transform_nullable.reference b/tests/queries/0_stateless/02801_transform_nullable.reference new file mode 100644 index 00000000000..48d529e7d90 --- /dev/null +++ b/tests/queries/0_stateless/02801_transform_nullable.reference @@ -0,0 +1,75 @@ +1970-01-01 00:00:01.000 +1970-01-01 00:00:02.000 +1970-01-01 00:00:00.000 +\N +\N +\N +hello +\N +world +\N +abc +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +\N +-1 +-1 +-1 +111 +-1 +222 +-1 +\N +-1 +-1 +-1.1 +-1.1 +-1.1 +\N +-1.1 +222 +-1.1 +333 +-1.1 +-1.1 +1 +1 +1 +\N +1 +\N +1 +\N +1 +1 +Остальные +\N +- +xyz +Hello +- +xyz +World +- +xyz +- +- diff --git a/tests/queries/0_stateless/02801_transform_nullable.sql b/tests/queries/0_stateless/02801_transform_nullable.sql new file mode 100644 index 00000000000..95f3c16600a --- /dev/null +++ b/tests/queries/0_stateless/02801_transform_nullable.sql @@ -0,0 +1,14 @@ +select transform('a', ['a', 'b'], [toDateTime64(1, 3, 'UTC'), toDateTime64(2, 3, 'UTC')], toDateTime64(0, 3, 'UTC')); +select transform(2, [1, 2], [toDateTime64(1, 3, 'UTC'), toDateTime64(2, 3, 'UTC')], toDateTime64(0, 3, 'UTC')); +select transform(null, [1, 2], [toDateTime64(1, 3, 'UTC'), toDateTime64(2, 3, 'UTC')], toDateTime64(0, 3, 'UTC')); + +SELECT transform(number, [3, 5, 7], ['hello', 'world', 'abc'], null) FROM system.numbers LIMIT 10; +SELECT transform(null, ['3', '5', '7'], ['hello', 'world', 'abc'], null) FROM system.numbers LIMIT 10; +SELECT transform(null, [null, null, null], [null, null, null], null) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [111, 222, null], -1) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [null, 222, 333], materialize(-1.1)) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [null, null, null], materialize(1)) FROM system.numbers LIMIT 10; +SELECT transform(1, [2, 3], ['Meta.ua', null], materialize('Остальные')) AS title; +SELECT transform(2, [2, 3], [null, 'Google'], materialize('Остальные')) AS title; + +SELECT transform(number % 3 = 1 ? NULL : number, [2, 5, NULL], ['Hello', 'World', 'xyz'], '-') FROM numbers(10); diff --git a/tests/queries/0_stateless/02807_lower_utf8_msan.reference b/tests/queries/0_stateless/02807_lower_utf8_msan.reference new file mode 100644 index 00000000000..7d7df6af777 --- /dev/null +++ b/tests/queries/0_stateless/02807_lower_utf8_msan.reference @@ -0,0 +1,4 @@ +©-------------------- +©-------------------------------------- +AAAAАБВГAAAAAAAAAAAAАБВГAAAAAAAA +AAAAАБВГAAAAAAAAAAAAАБВГAAAAAAAA diff --git a/tests/queries/0_stateless/02807_lower_utf8_msan.sql b/tests/queries/0_stateless/02807_lower_utf8_msan.sql new file mode 100644 index 00000000000..e9eb18bf615 --- /dev/null +++ b/tests/queries/0_stateless/02807_lower_utf8_msan.sql @@ -0,0 +1,2 @@ +SELECT lowerUTF8(arrayJoin(['©--------------------------------------', '©--------------------'])) ORDER BY 1; +SELECT upperUTF8(materialize('aaaaАБВГaaaaaaaaaaaaАБВГAAAAaaAA')) FROM numbers(2); diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index bf988295644..526e674a154 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -469,6 +469,7 @@ MSan MVCC MacBook MacOS +MapState MarkCacheBytes MarkCacheFiles MarksLoaderThreads @@ -2169,6 +2170,7 @@ snowflakeToDateTime socketcache soundex sparkbar +sparkBar sparsehash speedscope splitByChar diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index dd21cc7e953..307ed97068f 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -5,6 +5,7 @@ v23.4.4.16-stable 2023-06-17 v23.4.3.48-stable 2023-06-12 v23.4.2.11-stable 2023-05-02 v23.4.1.1943-stable 2023-04-27 +v23.3.6.7-lts 2023-06-28 v23.3.5.9-lts 2023-06-22 v23.3.4.17-lts 2023-06-17 v23.3.3.52-lts 2023-06-12