Merge branch 'master' into vdimir/debug_02956_rocksdb_bulk_sink

This commit is contained in:
vdimir 2024-07-11 11:04:46 +02:00 committed by GitHub
commit ada95a6eaa
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
411 changed files with 8258 additions and 4039 deletions

View File

@ -172,7 +172,7 @@ jobs:
################################# Stage Final #################################
#
FinishCheck:
if: ${{ !cancelled() }}
if: ${{ !failure() }}
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
runs-on: [self-hosted, style-checker-aarch64]
steps:

View File

@ -1,5 +1,6 @@
#pragma once
#include <cstdlib>
#include <memory>
#include <string>

View File

@ -108,6 +108,14 @@ struct make_unsigned // NOLINT(readability-identifier-naming)
using type = std::make_unsigned_t<T>;
};
template <> struct make_unsigned<Int8> { using type = UInt8; };
template <> struct make_unsigned<UInt8> { using type = UInt8; };
template <> struct make_unsigned<Int16> { using type = UInt16; };
template <> struct make_unsigned<UInt16> { using type = UInt16; };
template <> struct make_unsigned<Int32> { using type = UInt32; };
template <> struct make_unsigned<UInt32> { using type = UInt32; };
template <> struct make_unsigned<Int64> { using type = UInt64; };
template <> struct make_unsigned<UInt64> { using type = UInt64; };
template <> struct make_unsigned<Int128> { using type = UInt128; };
template <> struct make_unsigned<UInt128> { using type = UInt128; };
template <> struct make_unsigned<Int256> { using type = UInt256; };
@ -121,6 +129,14 @@ struct make_signed // NOLINT(readability-identifier-naming)
using type = std::make_signed_t<T>;
};
template <> struct make_signed<Int8> { using type = Int8; };
template <> struct make_signed<UInt8> { using type = Int8; };
template <> struct make_signed<Int16> { using type = Int16; };
template <> struct make_signed<UInt16> { using type = Int16; };
template <> struct make_signed<Int32> { using type = Int32; };
template <> struct make_signed<UInt32> { using type = Int32; };
template <> struct make_signed<Int64> { using type = Int64; };
template <> struct make_signed<UInt64> { using type = Int64; };
template <> struct make_signed<Int128> { using type = Int128; };
template <> struct make_signed<UInt128> { using type = Int128; };
template <> struct make_signed<Int256> { using type = Int256; };

View File

@ -0,0 +1,9 @@
#pragma once
#include <memory>
template <typename T>
bool isSharedPtrUnique(const std::shared_ptr<T> & ptr)
{
return ptr.use_count() == 1;
}

View File

@ -232,7 +232,7 @@ void Foundation_API format(
const Any & value10);
void Foundation_API format(std::string & result, const std::string & fmt, const std::vector<Any> & values);
void Foundation_API formatVector(std::string & result, const std::string & fmt, const std::vector<Any> & values);
/// Supports a variable number of arguments and is used by
/// all other variants of format().

View File

@ -21,6 +21,8 @@
#include "Poco/AtomicCounter.h"
#include "Poco/Foundation.h"
#include <atomic>
namespace Poco
{

View File

@ -51,8 +51,8 @@ namespace
}
if (width != 0) str.width(width);
}
void parsePrec(std::ostream& str, std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
{
if (itFmt != endFmt && *itFmt == '.')
@ -67,7 +67,7 @@ namespace
if (prec >= 0) str.precision(prec);
}
}
char parseMod(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
{
char mod = 0;
@ -77,13 +77,13 @@ namespace
{
case 'l':
case 'h':
case 'L':
case 'L':
case '?': mod = *itFmt++; break;
}
}
return mod;
}
std::size_t parseIndex(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
{
int index = 0;
@ -110,8 +110,8 @@ namespace
case 'f': str << std::fixed; break;
}
}
void writeAnyInt(std::ostream& str, const Any& any)
{
if (any.type() == typeid(char))
@ -201,7 +201,7 @@ namespace
str << RefAnyCast<std::string>(*itVal++);
break;
case 'z':
str << AnyCast<std::size_t>(*itVal++);
str << AnyCast<std::size_t>(*itVal++);
break;
case 'I':
case 'D':
@ -303,7 +303,7 @@ void format(std::string& result, const std::string& fmt, const Any& value)
{
std::vector<Any> args;
args.push_back(value);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -312,7 +312,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
std::vector<Any> args;
args.push_back(value1);
args.push_back(value2);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -322,7 +322,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value1);
args.push_back(value2);
args.push_back(value3);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -333,7 +333,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value2);
args.push_back(value3);
args.push_back(value4);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -345,7 +345,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value3);
args.push_back(value4);
args.push_back(value5);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -358,7 +358,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value4);
args.push_back(value5);
args.push_back(value6);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -372,7 +372,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value5);
args.push_back(value6);
args.push_back(value7);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -387,7 +387,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value6);
args.push_back(value7);
args.push_back(value8);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -403,7 +403,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value7);
args.push_back(value8);
args.push_back(value9);
format(result, fmt, args);
formatVector(result, fmt, args);
}
@ -420,16 +420,16 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
args.push_back(value8);
args.push_back(value9);
args.push_back(value10);
format(result, fmt, args);
formatVector(result, fmt, args);
}
void format(std::string& result, const std::string& fmt, const std::vector<Any>& values)
void formatVector(std::string& result, const std::string& fmt, const std::vector<Any>& values)
{
std::string::const_iterator itFmt = fmt.begin();
std::string::const_iterator endFmt = fmt.end();
std::vector<Any>::const_iterator itVal = values.begin();
std::vector<Any>::const_iterator endVal = values.end();
std::vector<Any>::const_iterator endVal = values.end();
while (itFmt != endFmt)
{
switch (*itFmt)

View File

@ -57,7 +57,7 @@ std::string ObjectId::toString(const std::string& fmt) const
for (int i = 0; i < 12; ++i)
{
s += format(fmt, (unsigned int) _id[i]);
s += Poco::format(fmt, (unsigned int) _id[i]);
}
return s;
}

View File

@ -43,9 +43,9 @@ namespace Poco {
namespace MongoDB {
static const std::string keyCursor {"cursor"};
static const std::string keyFirstBatch {"firstBatch"};
static const std::string keyNextBatch {"nextBatch"};
[[ maybe_unused ]] static const std::string keyCursor {"cursor"};
[[ maybe_unused ]] static const std::string keyFirstBatch {"firstBatch"};
[[ maybe_unused ]] static const std::string keyNextBatch {"nextBatch"};
static Poco::Int64 cursorIdFromResponse(const MongoDB::Document& doc);
@ -131,7 +131,7 @@ OpMsgMessage& OpMsgCursor::next(Connection& connection)
connection.readResponse(_response);
}
else
#endif
#endif
{
_response.clear();
_query.setCursor(_cursorID, _batchSize);

View File

@ -17,9 +17,9 @@
#include "Poco/NumberFormatter.h"
#include "Poco/NumberParser.h"
#include "Poco/String.h"
#include <charconv>
#include <format>
using Poco::NumberFormatter;
using Poco::NumberParser;
using Poco::icompare;
@ -75,7 +75,7 @@ void HTTPMessage::setContentLength(std::streamsize length)
erase(CONTENT_LENGTH);
}
std::streamsize HTTPMessage::getContentLength() const
{
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
@ -98,7 +98,7 @@ void HTTPMessage::setContentLength64(Poco::Int64 length)
erase(CONTENT_LENGTH);
}
Poco::Int64 HTTPMessage::getContentLength64() const
{
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
@ -133,13 +133,13 @@ void HTTPMessage::setChunkedTransferEncoding(bool flag)
setTransferEncoding(IDENTITY_TRANSFER_ENCODING);
}
bool HTTPMessage::getChunkedTransferEncoding() const
{
return icompare(getTransferEncoding(), CHUNKED_TRANSFER_ENCODING) == 0;
}
void HTTPMessage::setContentType(const std::string& mediaType)
{
if (mediaType.empty())
@ -154,7 +154,7 @@ void HTTPMessage::setContentType(const MediaType& mediaType)
setContentType(mediaType.toString());
}
const std::string& HTTPMessage::getContentType() const
{
return get(CONTENT_TYPE, UNKNOWN_CONTENT_TYPE);

2
contrib/grpc vendored

@ -1 +1 @@
Subproject commit 77b2737a709d43d8c6895e3f03ca62b00bd9201c
Subproject commit f5b7fdc2dff09ada06dbf6c75df298fb40f898df

View File

@ -284,6 +284,11 @@ function run_tests
NPROC=1
fi
export CLICKHOUSE_CONFIG_DIR=$FASTTEST_DATA
export CLICKHOUSE_CONFIG="$FASTTEST_DATA/config.xml"
export CLICKHOUSE_USER_FILES="$FASTTEST_DATA/user_files"
export CLICKHOUSE_SCHEMA_FILES="$FASTTEST_DATA/format_schemas"
local test_opts=(
--hung-check
--fast-tests-only

View File

@ -9,6 +9,15 @@ set -e -x -a
MAX_RUN_TIME=${MAX_RUN_TIME:-10800}
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 10800 : MAX_RUN_TIME))
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=1
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] || [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
fi
# Choose random timezone for this test run.
#
# NOTE: that clickhouse-test will randomize session_timezone by itself as well
@ -89,10 +98,57 @@ if [ "$NUM_TRIES" -gt "1" ]; then
mkdir -p /var/run/clickhouse-server
fi
# Run a CH instance to execute sequential tests on it in parallel with all other tests.
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
mkdir -p /var/run/clickhouse-server3 /etc/clickhouse-server3 /var/lib/clickhouse3
cp -r -L /etc/clickhouse-server/* /etc/clickhouse-server3/
sudo chown clickhouse:clickhouse /var/run/clickhouse-server3 /var/lib/clickhouse3 /etc/clickhouse-server3/
sudo chown -R clickhouse:clickhouse /etc/clickhouse-server3/*
function replace(){
sudo find /etc/clickhouse-server3/ -type f -name '*.xml' -exec sed -i "$1" {} \;
}
replace "s|<port>9000</port>|<port>19000</port>|g"
replace "s|<port>9440</port>|<port>19440</port>|g"
replace "s|<port>9988</port>|<port>19988</port>|g"
replace "s|<port>9234</port>|<port>19234</port>|g"
replace "s|<port>9181</port>|<port>19181</port>|g"
replace "s|<https_port>8443</https_port>|<https_port>18443</https_port>|g"
replace "s|<tcp_port>9000</tcp_port>|<tcp_port>19000</tcp_port>|g"
replace "s|<tcp_port>9181</tcp_port>|<tcp_port>19181</tcp_port>|g"
replace "s|<tcp_port_secure>9440</tcp_port_secure>|<tcp_port_secure>19440</tcp_port_secure>|g"
replace "s|<tcp_with_proxy_port>9010</tcp_with_proxy_port>|<tcp_with_proxy_port>19010</tcp_with_proxy_port>|g"
replace "s|<mysql_port>9004</mysql_port>|<mysql_port>19004</mysql_port>|g"
replace "s|<postgresql_port>9005</postgresql_port>|<postgresql_port>19005</postgresql_port>|g"
replace "s|<interserver_http_port>9009</interserver_http_port>|<interserver_http_port>19009</interserver_http_port>|g"
replace "s|8123|18123|g"
replace "s|/var/lib/clickhouse/|/var/lib/clickhouse3/|g"
replace "s|/etc/clickhouse-server/|/etc/clickhouse-server3/|g"
# distributed cache
replace "s|<tcp_port>10001</tcp_port>|<tcp_port>10003</tcp_port>|g"
replace "s|<tcp_port>10002</tcp_port>|<tcp_port>10004</tcp_port>|g"
sudo -E -u clickhouse /usr/bin/clickhouse server --daemon --config /etc/clickhouse-server3/config.xml \
--pid-file /var/run/clickhouse-server3/clickhouse-server.pid \
-- --path /var/lib/clickhouse3/ --logger.stderr /var/log/clickhouse-server/stderr3.log \
--logger.log /var/log/clickhouse-server/clickhouse-server3.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server3.err.log \
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
--prometheus.port 19988 --keeper_server.raft_configuration.server.port 19234 --keeper_server.tcp_port 19181 \
--mysql_port 19004 --postgresql_port 19005
for _ in {1..100}
do
clickhouse-client --port 19000 --query "SELECT 1" && break
sleep 1
done
fi
# simplest way to forward env variables to server
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_2/</filesystem_caches_path>|" /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
@ -129,7 +185,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
fi
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
sudo cat /etc/clickhouse-server1/config.d/filesystem_caches_path.xml \
| sed "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" \
> /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp
@ -209,15 +265,15 @@ function run_tests()
ADDITIONAL_OPTIONS+=('--no-random-merge-tree-settings')
fi
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--shared-catalog')
fi
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--replicated-database')
# Too many tests fail for DatabaseReplicated in parallel.
ADDITIONAL_OPTIONS+=('--jobs')
ADDITIONAL_OPTIONS+=('2')
ADDITIONAL_OPTIONS+=('3')
elif [[ 1 == $(clickhouse-client --query "SELECT value LIKE '%SANITIZE_COVERAGE%' FROM system.build_options WHERE name = 'CXX_FLAGS'") ]]; then
# Coverage on a per-test basis could only be collected sequentially.
# Do not set the --jobs parameter.
@ -225,7 +281,11 @@ function run_tests()
else
# All other configurations are OK.
ADDITIONAL_OPTIONS+=('--jobs')
ADDITIONAL_OPTIONS+=('8')
ADDITIONAL_OPTIONS+=('5')
fi
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--run-sequential-tests-in-parallel')
fi
if [[ -n "$RUN_BY_HASH_NUM" ]] && [[ -n "$RUN_BY_HASH_TOTAL" ]]; then
@ -249,7 +309,7 @@ function run_tests()
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
set +e
timeout -s TERM --preserve-status 120m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
| ts '%Y-%m-%d %H:%M:%S' \
| tee -a test_output/test_result.txt
@ -260,7 +320,7 @@ export -f run_tests
# This should be enough to setup job and collect artifacts
TIMEOUT=$((MAX_RUN_TIME - 300))
TIMEOUT=$((MAX_RUN_TIME - 600))
if [ "$NUM_TRIES" -gt "1" ]; then
# We don't run tests with Ordinary database in PRs, only in master.
# So run new/changed tests with Ordinary at least once in flaky check.
@ -289,7 +349,7 @@ do
err=$(clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.gz' format TSVWithNamesAndTypes")
echo "$err"
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
echo "$err"
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
@ -298,7 +358,7 @@ do
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
fi
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
echo "$err"
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
@ -309,12 +369,17 @@ done
# Why do we read data with clickhouse-local?
# Because it's the simplest way to read it when server has crashed.
sudo clickhouse stop ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
sudo clickhouse stop --pid-path /var/run/clickhouse-server3 ||:
fi
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||:
fi
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
fi
@ -322,6 +387,12 @@ rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log ||:
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
rg -Fa "<Fatal>" /var/log/clickhouse-server3/clickhouse-server.log ||:
rg -A50 -Fa "============" /var/log/clickhouse-server3/stderr.log ||:
zstd --threads=0 < /var/log/clickhouse-server3/clickhouse-server.log > /test_output/clickhouse-server3.log.zst &
fi
data_path_config="--path=/var/lib/clickhouse/"
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
# We need s3 storage configuration (but it's more likely that clickhouse-local will fail for some reason)
@ -341,12 +412,17 @@ if [ $failed_to_save_logs -ne 0 ]; then
for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log
do
clickhouse-local "$data_path_config" --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
clickhouse-local --path /var/lib/clickhouse3/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.3.tsv.zst ||:
fi
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
clickhouse-local --path /var/lib/clickhouse2/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst ||:
fi
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
fi
done
@ -382,7 +458,14 @@ rm -rf /var/lib/clickhouse/data/system/*/
tar -chf /test_output/store.tar /var/lib/clickhouse/store ||:
tar -chf /test_output/metadata.tar /var/lib/clickhouse/metadata/*.sql ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
rm -rf /var/lib/clickhouse3/data/system/*/
tar -chf /test_output/store.tar /var/lib/clickhouse3/store ||:
tar -chf /test_output/metadata.tar /var/lib/clickhouse3/metadata/*.sql ||:
fi
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server2.log ||:
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst ||:
@ -393,7 +476,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||:
fi
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst ||:
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:

View File

@ -75,7 +75,7 @@ SETTINGS
Possible values:
- unordered — With unordered mode, the set of all already processed files is tracked with persistent nodes in ZooKeeper.
- ordered — With ordered mode, only the max name of the successfully consumed file, and the names of files that will be retried after unsuccessful loading attempt are being stored in ZooKeeper.
- ordered — With ordered mode, the files are processed in lexicographic order. It means that if file named 'BBB' was processed at some point and later on a file named 'AA' is added to the bucket, it will be ignored. Only the max name (in lexicographic sense) of the successfully consumed file, and the names of files that will be retried after unsuccessful loading attempt are being stored in ZooKeeper.
Default value: `ordered` in versions before 24.6. Starting with 24.6 there is no default value, the setting becomes required to be specified manually. For tables created on earlier versions the default value will remain `Ordered` for compatibility.

View File

@ -1535,6 +1535,10 @@ the columns from input data will be mapped to the columns from the table by thei
Otherwise, the first row will be skipped.
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
If setting [output_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#output_format_binary_encode_types_in_binary_format) is set to 1,
the types in header will be written using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes output format.
If setting [input_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#input_format_binary_encode_types_in_binary_format) is set to 1,
the types in header will be read using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes input format.
:::
## RowBinaryWithDefaults {#rowbinarywithdefaults}

View File

@ -2,15 +2,11 @@
slug: /en/operations/opentelemetry
sidebar_position: 62
sidebar_label: Tracing ClickHouse with OpenTelemetry
title: "[experimental] Tracing ClickHouse with OpenTelemetry"
title: "Tracing ClickHouse with OpenTelemetry"
---
[OpenTelemetry](https://opentelemetry.io/) is an open standard for collecting traces and metrics from the distributed application. ClickHouse has some support for OpenTelemetry.
:::note
This is an experimental feature that will change in backwards-incompatible ways in future releases.
:::
## Supplying Trace Context to ClickHouse
ClickHouse accepts trace context HTTP headers, as described by the [W3C recommendation](https://www.w3.org/TR/trace-context/). It also accepts trace context over a native protocol that is used for communication between ClickHouse servers or between the client and server. For manual testing, trace context headers conforming to the Trace Context recommendation can be supplied to `clickhouse-client` using `--opentelemetry-traceparent` and `--opentelemetry-tracestate` flags.

View File

@ -1951,6 +1951,18 @@ The maximum allowed size for String in RowBinary format. It prevents allocating
Default value: `1GiB`.
### output_format_binary_encode_types_in_binary_format {#output_format_binary_encode_types_in_binary_format}
Write data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in RowBinaryWithNamesAndTypes output format.
Disabled by default.
### input_format_binary_decode_types_in_binary_format {#input_format_binary_decode_types_in_binary_format}
Read data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in RowBinaryWithNamesAndTypes input format.
Disabled by default.
## Native format settings {#native-format-settings}
### input_format_native_allow_types_conversion {#input_format_native_allow_types_conversion}
@ -1958,3 +1970,15 @@ Default value: `1GiB`.
Allow types conversion in Native input format between columns from input data and requested columns.
Enabled by default.
### output_format_native_encode_types_in_binary_format {#output_format_native_encode_types_in_binary_format}
Write data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in Native output format.
Disabled by default.
### input_format_native_decode_types_in_binary_format {#input_format_native_decode_types_in_binary_format}
Read data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in Native input format.
Disabled by default.

View File

@ -1358,12 +1358,25 @@ Connection pool size for PostgreSQL table engine and database engine.
Default value: 16
## postgresql_connection_attempt_timeout {#postgresql-connection-attempt-timeout}
Connection timeout in seconds of a single attempt to connect PostgreSQL end-point.
The value is passed as a `connect_timeout` parameter of the connection URL.
Default value: `2`.
## postgresql_connection_pool_wait_timeout {#postgresql-connection-pool-wait-timeout}
Connection pool push/pop timeout on empty pool for PostgreSQL table engine and database engine. By default it will block on empty pool.
Default value: 5000
## postgresql_connection_pool_retries {#postgresql-connection-pool-retries}
The maximum number of retries to establish a connection with the PostgreSQL end-point.
Default value: `2`.
## postgresql_connection_pool_auto_close_connection {#postgresql-connection-pool-auto-close-connection}
Close connection before returning connection to the pool.

View File

@ -16,7 +16,7 @@ singleValueOrNull(x)
**Parameters**
- `x` — Column of any [data type](../../data-types/index.md).
- `x` — Column of any [data type](../../data-types/index.md) (except [Map](../../data-types/map.md), [Array](../../data-types/array.md) or [Tuple](../../data-types/tuple) which cannot be of type [Nullable](../../data-types/nullable.md)).
**Returned values**

View File

@ -0,0 +1,115 @@
---
slug: /en/sql-reference/data-types/data-types-binary-encoding
sidebar_position: 56
sidebar_label: Data types binary encoding specification.
---
# Data types binary encoding specification
This specification describes the binary format that can be used for binary encoding and decoding of ClickHouse data types. This format is used in `Dynamic` column [binary serialization](dynamic.md#binary-output-format) and can be used in input/output formats [RowBinaryWithNamesAndTypes](../../interfaces/formats.md#rowbinarywithnamesandtypes) and [Native](../../interfaces/formats.md#native) under corresponding settings.
The table below describes how each data type is represented in binary format. Each data type encoding consist of 1 byte that indicates the type and some optional additional information.
`var_uint` in the binary encoding means that the size is encoded using Variable-Length Quantity compression.
| ClickHouse data type | Binary encoding |
|--------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `Nothing` | `0x00` |
| `UInt8` | `0x01` |
| `UInt16` | `0x02` |
| `UInt32` | `0x03` |
| `UInt64` | `0x04` |
| `UInt128` | `0x05` |
| `UInt256` | `0x06` |
| `Int8` | `0x07` |
| `Int16` | `0x08` |
| `Int32` | `0x09` |
| `Int64` | `0x0A` |
| `Int128` | `0x0B` |
| `Int256` | `0x0C` |
| `Float32` | `0x0D` |
| `Float64` | `0x0E` |
| `Date` | `0x0F` |
| `Date32` | `0x10` |
| `DateTime` | `0x11` |
| `DateTime(time_zone)` | `0x12<var_uint_time_zone_name_size><time_zone_name_data>` |
| `DateTime64(P)` | `0x13<uint8_precision>` |
| `DateTime64(P, time_zone)` | `0x14<uint8_precision><var_uint_time_zone_name_size><time_zone_name_data>` |
| `String` | `0x15` |
| `FixedString(N)` | `0x16<var_uint_size>` |
| `Enum8` | `0x17<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int8_value_1>...<var_uint_name_size_N><name_data_N><int8_value_N>` |
| `Enum16` | `0x18<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int16_little_endian_value_1>...><var_uint_name_size_N><name_data_N><int16_little_endian_value_N>` |
| `Decimal32(P, S)` | `0x19<uint8_precision><uint8_scale>` |
| `Decimal64(P, S)` | `0x1A<uint8_precision><uint8_scale>` |
| `Decimal128(P, S)` | `0x1B<uint8_precision><uint8_scale>` |
| `Decimal256(P, S)` | `0x1C<uint8_precision><uint8_scale>` |
| `UUID` | `0x1D` |
| `Array(T)` | `0x1E<nested_type_encoding>` |
| `Tuple(T1, ..., TN)` | `0x1F<var_uint_number_of_elements><nested_type_encoding_1>...<nested_type_encoding_N>` |
| `Tuple(name1 T1, ..., nameN TN)` | `0x20<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N>` |
| `Set` | `0x21` |
| `Interval` | `0x22<interval_kind>` (see [interval kind binary encoding](#interval-kind-binary-encoding)) |
| `Nullable(T)` | `0x23<nested_type_encoding>` |
| `Function` | `0x24<var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N><return_type_encoding>` |
| `AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x25<var_uint_version><var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N>` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) |
| `LowCardinality(T)` | `0x26<nested_type_encoding>` |
| `Map(K, V)` | `0x27<key_type_encoding><value_type_encoding>` |
| `IPv4` | `0x28` |
| `IPv6` | `0x29` |
| `Variant(T1, ..., TN)` | `0x2A<var_uint_number_of_variants><variant_type_encoding_1>...<variant_type_encoding_N>` |
| `Dynamic(max_types=N)` | `0x2B<uint8_max_types>` |
| `Custom type` (`Ring`, `Polygon`, etc) | `0x2C<var_uint_type_name_size><type_name_data>` |
| `Bool` | `0x2D` |
| `SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x2E<var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N>` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) |
| `Nested(name1 T1, ..., nameN TN)` | `0x2F<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N>` |
### Interval kind binary encoding
The table below describes how different interval kinds of `Interval` data type are encoded.
| Interval kind | Binary encoding |
|---------------|-----------------|
| `Nanosecond` | `0x00` |
| `Microsecond` | `0x01` |
| `Millisecond` | `0x02` |
| `Second` | `0x03` |
| `Minute` | `0x04` |
| `Hour` | `0x05` |
| `Day` | `0x06` |
| `Week` | `0x07` |
| `Month` | `0x08` |
| `Quarter` | `0x09` |
| `Year` | `0x1A` |
### Aggregate function parameter binary encoding
The table below describes how parameters of `AggragateFunction` and `SimpleAggregateFunction` are encoded.
The encoding of a parameter consists of 1 byte indicating the type of the parameter and the value itself.
| Parameter type | Binary encoding |
|--------------------------|--------------------------------------------------------------------------------------------------------------------------------|
| `Null` | `0x00` |
| `UInt64` | `0x01<var_uint_value>` |
| `Int64` | `0x02<var_int_value>` |
| `UInt128` | `0x03<uint128_little_endian_value>` |
| `Int128` | `0x04<int128_little_endian_value>` |
| `UInt128` | `0x05<uint128_little_endian_value>` |
| `Int128` | `0x06<int128_little_endian_value>` |
| `Float64` | `0x07<float64_little_endian_value>` |
| `Decimal32` | `0x08<var_uint_scale><int32_little_endian_value>` |
| `Decimal64` | `0x09<var_uint_scale><int64_little_endian_value>` |
| `Decimal128` | `0x0A<var_uint_scale><int128_little_endian_value>` |
| `Decimal256` | `0x0B<var_uint_scale><int256_little_endian_value>` |
| `String` | `0x0C<var_uint_size><data>` |
| `Array` | `0x0D<var_uint_size><value_encoding_1>...<value_encoding_N>` |
| `Tuple` | `0x0E<var_uint_size><value_encoding_1>...<value_encoding_N>` |
| `Map` | `0x0F<var_uint_size><key_encoding_1><value_encoding_1>...<key_endoding_N><value_encoding_N>` |
| `IPv4` | `0x10<uint32_little_endian_value>` |
| `IPv6` | `0x11<uint128_little_endian_value>` |
| `UUID` | `0x12<uuid_value>` |
| `Bool` | `0x13<bool_value>` |
| `Object` | `0x14<var_uint_size><var_uint_key_size_1><key_data_1><value_encoding_1>...<var_uint_key_size_N><key_data_N><value_encoding_N>` |
| `AggregateFunctionState` | `0x15<var_uint_name_size><name_data><var_uint_data_size><data>` |
| `Negative infinity` | `0xFE` |
| `Positive infinity` | `0xFF` |

View File

@ -493,3 +493,14 @@ SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) O
```
As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`.
### Binary output format
In [RowBinary](../../interfaces/formats.md#rowbinary-rowbinary) format values of `Dynamic` type are serialized in the following format:
```text
<binary_encoded_data_type><value_in_binary_format_according_to_the_data_type>
```
See the [data types binary encoding specification](../../sql-reference/data-types/data-types-binary-encoding.md)

View File

@ -5,11 +5,11 @@ sidebar_label: Object Data Type
keywords: [object, data type]
---
# Object Data Type
# Object Data Type (deprecated)
:::note
This feature is not production-ready and is now deprecated. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864)
:::
**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
<hr />
Stores JavaScript Object Notation (JSON) documents in a single column.

View File

@ -7,7 +7,7 @@ sidebar_label: Tuples
## tuple
A function that allows grouping multiple columns.
For columns with the types T1, T2, ..., it returns a Tuple(T1, T2, ...) type tuple containing these columns. There is no cost to execute the function.
For columns C1, C2, ... with the types T1, T2, ..., it returns a named Tuple(C1 T1, C2 T2, ...) type tuple containing these columns if their names are unique and can be treated as unquoted identifiers, otherwise a Tuple(T1, T2, ...) is returned. There is no cost to execute the function.
Tuples are normally used as intermediate values for an argument of IN operators, or for creating a list of formal parameters of lambda functions. Tuples cant be written to a table.
The function implements the operator `(x, y, ...)`.
@ -259,6 +259,60 @@ Result:
└───────────────────────────────────────┘
```
## tupleNames
Converts a tuple into an array of column names. For a tuple in the form `Tuple(a T, b T, ...)`, it returns an array of strings representing the named columns of the tuple. If the tuple elements do not have explicit names, their indices will be used as the column names instead.
**Syntax**
``` sql
tupleNames(tuple)
```
**Arguments**
- `tuple` — Named tuple. [Tuple](../../sql-reference/data-types/tuple.md) with any types of values.
**Returned value**
- An array with strings.
Type: [Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md), ...)).
**Example**
Query:
``` sql
CREATE TABLE tupletest (col Tuple(user_ID UInt64, session_ID UInt64)) ENGINE = Memory;
INSERT INTO tupletest VALUES (tuple(1, 2));
SELECT tupleNames(col) FROM tupletest;
```
Result:
``` text
┌─tupleNames(col)──────────┐
│ ['user_ID','session_ID'] │
└──────────────────────────┘
```
If you pass a simple tuple to the function, ClickHouse uses the indexes of the columns as their names:
``` sql
SELECT tupleNames(tuple(3, 2, 1));
```
Result:
``` text
┌─tupleNames((3, 2, 1))─┐
│ ['1','2','3'] │
└───────────────────────┘
```
## tuplePlus
Calculates the sum of corresponding values of two tuples of the same size.

View File

@ -58,6 +58,8 @@ KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90'
KILL QUERY WHERE user='username' SYNC
```
:::tip If you are killing a query in ClickHouse Cloud or in a self-managed cluster, then be sure to use the ```ON CLUSTER [cluster-name]``` option, in order to ensure the query is killed on all replicas:::
Read-only users can only stop their own queries.
By default, the asynchronous version of queries is used (`ASYNC`), which does not wait for confirmation that queries have stopped.
@ -131,6 +133,7 @@ KILL MUTATION WHERE database = 'default' AND table = 'table'
-- Cancel the specific mutation:
KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt'
```
:::tip If you are killing a mutation in ClickHouse Cloud or in a self-managed cluster, then be sure to use the ```ON CLUSTER [cluster-name]``` option, in order to ensure the mutation is killed on all replicas:::
The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table).

View File

@ -130,7 +130,9 @@ SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv');
## Globs in path
Paths may use globbing. Files must match the whole path pattern, not only the suffix or prefix.
Paths may use globbing. Files must match the whole path pattern, not only the suffix or prefix. There is one exception that if the path refers to an existing
directory and does not use globs, a `*` will be implicitly added to the path so
all the files in the directory are selected.
- `*` — Represents arbitrarily many characters except `/` but including the empty string.
- `?` — Represents an arbitrary single character.
@ -163,6 +165,12 @@ An alternative path expression which achieves the same:
SELECT count(*) FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32');
```
Query the total number of rows in `some_dir` using the implicit `*`:
```sql
SELECT count(*) FROM file('some_dir', 'TSV', 'name String, value UInt32');
```
:::note
If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
:::

View File

@ -12,7 +12,7 @@ sidebar_label: "Отличительные возможности ClickHouse"
Этот пункт пришлось выделить, так как существуют системы, которые могут хранить значения отдельных столбцов по отдельности, но не могут эффективно выполнять аналитические запросы в силу оптимизации под другой сценарий работы. Примеры: HBase, BigTable, Cassandra, HyperTable. В этих системах вы получите пропускную способность в районе сотен тысяч строк в секунду, но не сотен миллионов строк в секунду.
Также стоит заметить, что ClickHouse является системой управления базами данных, а не системой для одной базой данных. То есть, ClickHouse позволяет создавать таблицы и базы данных во время выполнения (runtime), загружать данные и выполнять запросы без переконфигурирования и перезапуска сервера.
Также стоит заметить, что ClickHouse является системой управления базами данных, а не системой для одной базы данных. То есть, ClickHouse позволяет создавать таблицы и базы данных во время выполнения (runtime), загружать данные и выполнять запросы без переконфигурирования и перезапуска сервера.
## Сжатие данных {#szhatie-dannykh}

View File

@ -1117,6 +1117,7 @@ void Client::processOptions(const OptionsDescription & options_description,
if (!options["user"].defaulted())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User and JWT flags can't be specified together");
config().setString("jwt", options["jwt"].as<std::string>());
config().setString("user", "");
}
if (options.count("accept-invalid-certificate"))
{

View File

@ -10,6 +10,7 @@
#include <Analyzer/Utils.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <DataTypes/DataTypesNumber.h>
namespace DB
{
@ -26,12 +27,103 @@ static constexpr std::array boolean_functions{
"like"sv, "notLike"sv, "ilike"sv, "notILike"sv, "empty"sv, "notEmpty"sv, "not"sv, "and"sv,
"or"sv};
static bool isBooleanFunction(const String & func_name)
bool isBooleanFunction(const String & func_name)
{
return std::any_of(
boolean_functions.begin(), boolean_functions.end(), [&](const auto boolean_func) { return func_name == boolean_func; });
}
bool isNodeFunction(const QueryTreeNodePtr & node, const String & func_name)
{
if (const auto * function_node = node->as<FunctionNode>())
return function_node->getFunctionName() == func_name;
return false;
}
QueryTreeNodePtr getFunctionArgument(const QueryTreeNodePtr & node, size_t idx)
{
if (const auto * function_node = node->as<FunctionNode>())
{
const auto & args = function_node->getArguments().getNodes();
if (idx < args.size())
return args[idx];
}
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected '{}' to be a function with at least {} arguments", node->formatASTForErrorMessage(), idx + 1);
}
QueryTreeNodePtr findEqualsFunction(const QueryTreeNodes & nodes)
{
for (const auto & node : nodes)
{
const auto * function_node = node->as<FunctionNode>();
if (function_node && function_node->getFunctionName() == "equals" &&
function_node->getArguments().getNodes().size() == 2)
{
return node;
}
}
return nullptr;
}
bool isBooleanConstant(const QueryTreeNodePtr & node, bool expected_value)
{
const auto * constant_node = node->as<ConstantNode>();
if (!constant_node || !constant_node->getResultType()->equals(DataTypeUInt8()))
return false;
UInt64 constant_value;
return (constant_node->getValue().tryGet<UInt64>(constant_value) && constant_value == expected_value);
}
/// Returns true if expression consists of only conjunctions of functions with the specified name or true constants
bool isOnlyConjunctionOfFunctions(
const QueryTreeNodePtr & node,
const String & func_name,
const QueryTreeNodePtrWithHashSet & allowed_arguments)
{
if (isBooleanConstant(node, true))
return true;
const auto * node_function = node->as<FunctionNode>();
if (!node_function)
return false;
if (node_function->getFunctionName() == func_name
&& allowed_arguments.contains(node_function->getArgumentsNode()))
return true;
if (node_function->getFunctionName() == "and")
{
for (const auto & and_argument : node_function->getArguments().getNodes())
{
if (!isOnlyConjunctionOfFunctions(and_argument, func_name, allowed_arguments))
return false;
}
return true;
}
return false;
}
/// We can rewrite to a <=> b only if we are joining on a and b,
/// because the function is not yet implemented for other cases.
bool isTwoArgumentsFromDifferentSides(const FunctionNode & node_function, const JoinNode & join_node)
{
const auto & argument_nodes = node_function.getArguments().getNodes();
if (argument_nodes.size() != 2)
return false;
auto first_src = getExpressionSource(argument_nodes[0]);
auto second_src = getExpressionSource(argument_nodes[1]);
if (!first_src || !second_src)
return false;
const auto & lhs_join = *join_node.getLeftTableExpression();
const auto & rhs_join = *join_node.getRightTableExpression();
return (first_src->isEqual(lhs_join) && second_src->isEqual(rhs_join)) ||
(first_src->isEqual(rhs_join) && second_src->isEqual(lhs_join));
}
/// Visitor that optimizes logical expressions _only_ in JOIN ON section
class JoinOnLogicalExpressionOptimizerVisitor : public InDepthQueryTreeVisitorWithContext<JoinOnLogicalExpressionOptimizerVisitor>
{
@ -47,15 +139,16 @@ public:
{
auto * function_node = node->as<FunctionNode>();
if (!function_node)
return;
QueryTreeNodePtr new_node = nullptr;
if (function_node && function_node->getFunctionName() == "or")
new_node = tryOptimizeJoinOnNulls(function_node->getArguments().getNodes(), getContext());
else
new_node = tryOptimizeJoinOnNulls({node}, getContext());
if (function_node->getFunctionName() == "or")
if (new_node)
{
bool is_argument_type_changed = tryOptimizeIsNotDistinctOrIsNull(node, getContext());
if (is_argument_type_changed)
need_rerun_resolve = true;
return;
need_rerun_resolve |= !new_node->getResultType()->equals(*node->getResultType());
node = new_node;
}
}
@ -72,15 +165,11 @@ private:
const JoinNode * join_node;
bool need_rerun_resolve = false;
/// Returns true if type of some operand is changed and parent function needs to be re-resolved
bool tryOptimizeIsNotDistinctOrIsNull(QueryTreeNodePtr & node, const ContextPtr & context)
/// Returns optimized node or nullptr if nothing have been changed
QueryTreeNodePtr tryOptimizeJoinOnNulls(const QueryTreeNodes & nodes, const ContextPtr & context)
{
auto & function_node = node->as<FunctionNode &>();
chassert(function_node.getFunctionName() == "or");
QueryTreeNodes or_operands;
or_operands.reserve(function_node.getArguments().getNodes().size());
or_operands.reserve(nodes.size());
/// Indices of `equals` or `isNotDistinctFrom` functions in the vector above
std::vector<size_t> equals_functions_indices;
@ -93,47 +182,73 @@ private:
* b => [(a IS NULL AND b IS NULL)]
* c => [(a IS NULL AND c IS NULL)]
* }
* Then for each a <=> b we can find all operands that contains both a IS NULL and b IS NULL
* Then for each equality a = b we can check if we have operand (a IS NULL AND b IS NULL)
*/
QueryTreeNodePtrWithHashMap<std::vector<size_t>> is_null_argument_to_indices;
for (const auto & argument : function_node.getArguments())
{
or_operands.push_back(argument);
bool is_anything_changed = false;
auto * argument_function = argument->as<FunctionNode>();
for (const auto & node : nodes)
{
if (isBooleanConstant(node, false))
{
/// Remove false constants from OR
is_anything_changed = true;
continue;
}
or_operands.push_back(node);
auto * argument_function = node->as<FunctionNode>();
if (!argument_function)
continue;
const auto & func_name = argument_function->getFunctionName();
if (func_name == "equals" || func_name == "isNotDistinctFrom")
{
const auto & argument_nodes = argument_function->getArguments().getNodes();
if (argument_nodes.size() != 2)
continue;
/// We can rewrite to a <=> b only if we are joining on a and b,
/// because the function is not yet implemented for other cases.
auto first_src = getExpressionSource(argument_nodes[0]);
auto second_src = getExpressionSource(argument_nodes[1]);
if (!first_src || !second_src)
continue;
const auto & lhs_join = *join_node->getLeftTableExpression();
const auto & rhs_join = *join_node->getRightTableExpression();
bool arguments_from_both_sides = (first_src->isEqual(lhs_join) && second_src->isEqual(rhs_join)) ||
(first_src->isEqual(rhs_join) && second_src->isEqual(lhs_join));
if (!arguments_from_both_sides)
continue;
equals_functions_indices.push_back(or_operands.size() - 1);
if (isTwoArgumentsFromDifferentSides(*argument_function, *join_node))
equals_functions_indices.push_back(or_operands.size() - 1);
}
else if (func_name == "and")
{
for (const auto & and_argument : argument_function->getArguments().getNodes())
const auto & and_arguments = argument_function->getArguments().getNodes();
bool all_are_is_null = and_arguments.size() == 2 && isNodeFunction(and_arguments[0], "isNull") && isNodeFunction(and_arguments[1], "isNull");
if (all_are_is_null)
{
auto * and_argument_function = and_argument->as<FunctionNode>();
if (and_argument_function && and_argument_function->getFunctionName() == "isNull")
is_null_argument_to_indices[getFunctionArgument(and_arguments.front(), 0)].push_back(or_operands.size() - 1);
is_null_argument_to_indices[getFunctionArgument(and_arguments.back(), 0)].push_back(or_operands.size() - 1);
}
/// Expression `a = b AND (a IS NOT NULL) AND true AND (b IS NOT NULL)` we can be replaced with `a = b`
/// Even though this expression are not equivalent (first is NULL on NULLs, while second is FALSE),
/// it is still correct since for JOIN ON condition NULL is treated as FALSE
if (const auto & equals_function = findEqualsFunction(and_arguments))
{
const auto & equals_arguments = equals_function->as<FunctionNode>()->getArguments().getNodes();
/// Expected isNotNull arguments
QueryTreeNodePtrWithHashSet allowed_arguments;
allowed_arguments.insert(QueryTreeNodePtrWithHash(std::make_shared<ListNode>(QueryTreeNodes{equals_arguments[0]})));
allowed_arguments.insert(QueryTreeNodePtrWithHash(std::make_shared<ListNode>(QueryTreeNodes{equals_arguments[1]})));
bool can_be_optimized = true;
for (const auto & and_argument : and_arguments)
{
const auto & is_null_argument = and_argument_function->getArguments().getNodes()[0];
is_null_argument_to_indices[is_null_argument].push_back(or_operands.size() - 1);
if (and_argument.get() == equals_function.get())
continue;
if (isOnlyConjunctionOfFunctions(and_argument, "isNotNull", allowed_arguments))
continue;
can_be_optimized = false;
break;
}
if (can_be_optimized)
{
is_anything_changed = true;
or_operands.pop_back();
or_operands.push_back(equals_function);
if (isTwoArgumentsFromDifferentSides(equals_function->as<FunctionNode &>(), *join_node))
equals_functions_indices.push_back(or_operands.size() - 1);
}
}
}
@ -144,9 +259,9 @@ private:
for (size_t equals_function_idx : equals_functions_indices)
{
auto * equals_function = or_operands[equals_function_idx]->as<FunctionNode>();
const auto * equals_function = or_operands[equals_function_idx]->as<FunctionNode>();
/// For a <=> b we are looking for expressions containing both `a IS NULL` and `b IS NULL` combined with AND
/// For a = b we are looking for all expressions `a IS NULL AND b IS NULL`
const auto & argument_nodes = equals_function->getArguments().getNodes();
const auto & lhs_is_null_parents = is_null_argument_to_indices[argument_nodes[0]];
const auto & rhs_is_null_parents = is_null_argument_to_indices[argument_nodes[1]];
@ -161,60 +276,40 @@ private:
for (size_t to_optimize_idx : operands_to_optimize)
{
/// We are looking for operand `a IS NULL AND b IS NULL AND ...`
auto * operand_to_optimize = or_operands[to_optimize_idx]->as<FunctionNode>();
/// Remove `a IS NULL` and `b IS NULL` arguments from AND
QueryTreeNodes new_arguments;
for (const auto & and_argument : operand_to_optimize->getArguments().getNodes())
{
bool to_eliminate = false;
const auto * and_argument_function = and_argument->as<FunctionNode>();
if (and_argument_function && and_argument_function->getFunctionName() == "isNull")
{
const auto & is_null_argument = and_argument_function->getArguments().getNodes()[0];
to_eliminate = (is_null_argument->isEqual(*argument_nodes[0]) || is_null_argument->isEqual(*argument_nodes[1]));
}
if (to_eliminate)
arguments_to_reresolve.insert(to_optimize_idx);
else
new_arguments.emplace_back(and_argument);
}
/// If less than two arguments left, we will remove or replace the whole AND below
operand_to_optimize->getArguments().getNodes() = std::move(new_arguments);
/// Remove `a IS NULL AND b IS NULL`
or_operands[to_optimize_idx] = nullptr;
is_anything_changed = true;
}
}
if (arguments_to_reresolve.empty())
if (arguments_to_reresolve.empty() && !is_anything_changed)
/// Nothing have been changed
return false;
return nullptr;
auto and_function_resolver = FunctionFactory::instance().get("and", context);
auto strict_equals_function_resolver = FunctionFactory::instance().get("isNotDistinctFrom", context);
bool need_reresolve = false;
QueryTreeNodes new_or_operands;
for (size_t i = 0; i < or_operands.size(); ++i)
{
if (arguments_to_reresolve.contains(i))
{
auto * function = or_operands[i]->as<FunctionNode>();
const auto * function = or_operands[i]->as<FunctionNode>();
if (function->getFunctionName() == "equals")
{
/// We should replace `a = b` with `a <=> b` because we removed checks for IS NULL
need_reresolve |= function->getResultType()->isNullable();
function->resolveAsFunction(strict_equals_function_resolver);
new_or_operands.emplace_back(std::move(or_operands[i]));
auto new_function = or_operands[i]->clone();
new_function->as<FunctionNode>()->resolveAsFunction(strict_equals_function_resolver);
new_or_operands.emplace_back(std::move(new_function));
}
else if (function->getFunctionName() == "and")
{
const auto & and_arguments = function->getArguments().getNodes();
if (and_arguments.size() > 1)
{
function->resolveAsFunction(and_function_resolver);
new_or_operands.emplace_back(std::move(or_operands[i]));
auto new_function = or_operands[i]->clone();
new_function->as<FunctionNode>()->resolveAsFunction(and_function_resolver);
new_or_operands.emplace_back(std::move(new_function));
}
else if (and_arguments.size() == 1)
{
@ -223,25 +318,26 @@ private:
}
}
else
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected function name: '{}'", function->getFunctionName());
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected function '{}'", function->getFunctionName());
}
else
else if (or_operands[i])
{
new_or_operands.emplace_back(std::move(or_operands[i]));
}
}
if (new_or_operands.empty())
return nullptr;
if (new_or_operands.size() == 1)
{
node = std::move(new_or_operands[0]);
return need_reresolve;
}
return new_or_operands[0];
/// Rebuild OR function
auto or_function_resolver = FunctionFactory::instance().get("or", context);
function_node.getArguments().getNodes() = std::move(new_or_operands);
function_node.resolveAsFunction(or_function_resolver);
return need_reresolve;
auto function_node = std::make_shared<FunctionNode>("or");
function_node->getArguments().getNodes() = std::move(new_or_operands);
function_node->resolveAsFunction(or_function_resolver);
return function_node;
}
};

View File

@ -940,6 +940,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildJoinTree(const ASTPtr & tables_in_select
table_join.locality,
result_join_strictness,
result_join_kind);
join_node->setOriginalAST(table_element.table_join);
/** Original AST is not set because it will contain only join part and does
* not include left table expression.

View File

@ -3,6 +3,8 @@
#include <Backups/BackupStatus.h>
#include <Common/ProfileEvents.h>
#include <exception>
namespace DB
{

View File

@ -8,6 +8,7 @@ namespace ProfileEvents
extern const Event DistributedConnectionUsable;
extern const Event DistributedConnectionMissingTable;
extern const Event DistributedConnectionStaleReplica;
extern const Event DistributedConnectionFailTry;
}
namespace DB
@ -97,6 +98,8 @@ void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::
}
catch (const Exception & e)
{
ProfileEvents::increment(ProfileEvents::DistributedConnectionFailTry);
if (e.code() != ErrorCodes::NETWORK_ERROR && e.code() != ErrorCodes::SOCKET_TIMEOUT
&& e.code() != ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF && e.code() != ErrorCodes::DNS_ERROR)
throw;

View File

@ -7,7 +7,6 @@
namespace ProfileEvents
{
extern const Event HedgedRequestsChangeReplica;
extern const Event DistributedConnectionFailTry;
extern const Event DistributedConnectionFailAtAll;
}
@ -327,7 +326,6 @@ HedgedConnectionsFactory::State HedgedConnectionsFactory::processFinishedConnect
{
ShuffledPool & shuffled_pool = shuffled_pools[index];
LOG_INFO(log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message);
ProfileEvents::increment(ProfileEvents::DistributedConnectionFailTry);
shuffled_pool.error_count = std::min(pool->getMaxErrorCup(), shuffled_pool.error_count + 1);
shuffled_pool.slowdown_count = 0;

View File

@ -267,7 +267,11 @@ bool ColumnAggregateFunction::structureEquals(const IColumn & to) const
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnAggregateFunction::insertRangeFrom(const IColumn & from, size_t start, size_t length)
#else
void ColumnAggregateFunction::doInsertRangeFrom(const IColumn & from, size_t start, size_t length)
#endif
{
const ColumnAggregateFunction & from_concrete = assert_cast<const ColumnAggregateFunction &>(from);
@ -462,7 +466,11 @@ void ColumnAggregateFunction::insertFromWithOwnership(const IColumn & from, size
insertMergeFrom(from, n);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnAggregateFunction::insertFrom(const IColumn & from, size_t n)
#else
void ColumnAggregateFunction::doInsertFrom(const IColumn & from, size_t n)
#endif
{
insertRangeFrom(from, n, 1);
}

View File

@ -145,7 +145,14 @@ public:
void insertData(const char * pos, size_t length) override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & from, size_t n) override;
#else
using IColumn::insertFrom;
void doInsertFrom(const IColumn & from, size_t n) override;
#endif
void insertFrom(ConstAggregateDataPtr place);
@ -182,7 +189,11 @@ public:
void protect() override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn & from, size_t start, size_t length) override;
#else
void doInsertRangeFrom(const IColumn & from, size_t start, size_t length) override;
#endif
void popBack(size_t n) override;
@ -201,7 +212,11 @@ public:
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t, size_t, const IColumn &, int) const override
#else
int doCompareAt(size_t, size_t, const IColumn &, int) const override
#endif
{
return 0;
}

View File

@ -337,7 +337,11 @@ bool ColumnArray::tryInsert(const Field & x)
return true;
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnArray::insertFrom(const IColumn & src_, size_t n)
#else
void ColumnArray::doInsertFrom(const IColumn & src_, size_t n)
#endif
{
const ColumnArray & src = assert_cast<const ColumnArray &>(src_);
size_t size = src.sizeAt(n);
@ -392,7 +396,11 @@ int ColumnArray::compareAtImpl(size_t n, size_t m, const IColumn & rhs_, int nan
: 1);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int ColumnArray::compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const
#else
int ColumnArray::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const
#endif
{
return compareAtImpl(n, m, rhs_, nan_direction_hint);
}
@ -535,7 +543,11 @@ void ColumnArray::getExtremes(Field & min, Field & max) const
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnArray::insertRangeFrom(const IColumn & src, size_t start, size_t length)
#else
void ColumnArray::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
#endif
{
if (length == 0)
return;

View File

@ -84,10 +84,18 @@ public:
void updateHashWithValue(size_t n, SipHash & hash) const override;
void updateWeakHash32(WeakHash32 & hash) const override;
void updateHashFast(SipHash & hash) const override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#else
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#endif
void insert(const Field & x) override;
bool tryInsert(const Field & x) override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & src_, size_t n) override;
#else
void doInsertFrom(const IColumn & src_, size_t n) override;
#endif
void insertDefault() override;
void popBack(size_t n) override;
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
@ -95,7 +103,11 @@ public:
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
template <typename Type> ColumnPtr indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const;
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
#else
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
#endif
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint, const Collator & collator) const override;
void getPermutation(PermutationSortDirection direction, PermutationSortStability stability,
size_t limit, int nan_direction_hint, Permutation & res) const override;

View File

@ -85,7 +85,11 @@ public:
bool isDefaultAt(size_t) const override { throwMustBeDecompressed(); }
void insert(const Field &) override { throwMustBeDecompressed(); }
bool tryInsert(const Field &) override { throwMustBeDecompressed(); }
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn &, size_t, size_t) override { throwMustBeDecompressed(); }
#else
void doInsertRangeFrom(const IColumn &, size_t, size_t) override { throwMustBeDecompressed(); }
#endif
void insertData(const char *, size_t) override { throwMustBeDecompressed(); }
void insertDefault() override { throwMustBeDecompressed(); }
void popBack(size_t) override { throwMustBeDecompressed(); }
@ -100,7 +104,11 @@ public:
void expand(const Filter &, bool) override { throwMustBeDecompressed(); }
ColumnPtr permute(const Permutation &, size_t) const override { throwMustBeDecompressed(); }
ColumnPtr index(const IColumn &, size_t) const override { throwMustBeDecompressed(); }
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t, size_t, const IColumn &, int) const override { throwMustBeDecompressed(); }
#else
int doCompareAt(size_t, size_t, const IColumn &, int) const override { throwMustBeDecompressed(); }
#endif
void compareColumn(const IColumn &, size_t, PaddedPODArray<UInt64> *, PaddedPODArray<Int8> &, int, int) const override
{
throwMustBeDecompressed();

View File

@ -32,6 +32,8 @@ private:
ColumnConst(const ColumnConst & src) = default;
public:
bool isConst() const override { return true; }
ColumnPtr convertToFullColumn() const;
ColumnPtr convertToFullColumnIfConst() const override
@ -121,7 +123,11 @@ public:
return data->isNullAt(0);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn &, size_t /*start*/, size_t length) override
#else
void doInsertRangeFrom(const IColumn &, size_t /*start*/, size_t length) override
#endif
{
s += length;
}
@ -145,12 +151,20 @@ public:
++s;
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn &, size_t) override
#else
void doInsertFrom(const IColumn &, size_t) override
#endif
{
++s;
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertManyFrom(const IColumn & /*src*/, size_t /* position */, size_t length) override { s += length; }
#else
void doInsertManyFrom(const IColumn & /*src*/, size_t /* position */, size_t length) override { s += length; }
#endif
void insertDefault() override
{
@ -223,7 +237,11 @@ public:
return data->allocatedBytes() + sizeof(s);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t, size_t, const IColumn & rhs, int nan_direction_hint) const override
#else
int doCompareAt(size_t, size_t, const IColumn & rhs, int nan_direction_hint) const override
#endif
{
return data->compareAt(0, 0, *assert_cast<const ColumnConst &>(rhs).data, nan_direction_hint);
}

View File

@ -32,7 +32,11 @@ namespace ErrorCodes
}
template <is_decimal T>
#if !defined(ABORT_ON_LOGICAL_ERROR)
int ColumnDecimal<T>::compareAt(size_t n, size_t m, const IColumn & rhs_, int) const
#else
int ColumnDecimal<T>::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int) const
#endif
{
auto & other = static_cast<const Self &>(rhs_);
const T & a = data[n];
@ -331,7 +335,11 @@ void ColumnDecimal<T>::insertData(const char * src, size_t /*length*/)
}
template <is_decimal T>
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnDecimal<T>::insertRangeFrom(const IColumn & src, size_t start, size_t length)
#else
void ColumnDecimal<T>::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
#endif
{
const ColumnDecimal & src_vec = assert_cast<const ColumnDecimal &>(src);

View File

@ -55,9 +55,17 @@ public:
void reserve(size_t n) override { data.reserve_exact(n); }
void shrinkToFit() override { data.shrink_to_fit(); }
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & src, size_t n) override { data.push_back(static_cast<const Self &>(src).getData()[n]); }
#else
void doInsertFrom(const IColumn & src, size_t n) override { data.push_back(static_cast<const Self &>(src).getData()[n]); }
#endif
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertManyFrom(const IColumn & src, size_t position, size_t length) override
#else
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override
#endif
{
ValueType v = assert_cast<const Self &>(src).getData()[position];
data.resize_fill(data.size() + length, v);
@ -68,7 +76,11 @@ public:
void insertManyDefaults(size_t length) override { data.resize_fill(data.size() + length); }
void insert(const Field & x) override { data.push_back(x.get<T>()); }
bool tryInsert(const Field & x) override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#else
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#endif
void popBack(size_t n) override
{
@ -92,7 +104,11 @@ public:
void updateHashWithValue(size_t n, SipHash & hash) const override;
void updateWeakHash32(WeakHash32 & hash) const override;
void updateHashFast(SipHash & hash) const override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
#else
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
#endif
void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override;
void updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,

View File

@ -4,7 +4,9 @@
#include <DataTypes/DataTypeFactory.h>
#include <DataTypes/DataTypeVariant.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeNothing.h>
#include <DataTypes/FieldToDataType.h>
#include <DataTypes/DataTypesBinaryEncoding.h>
#include <Common/Arena.h>
#include <Common/SipHash.h>
#include <Processors/Transforms/ColumnGathererTransform.h>
@ -213,7 +215,11 @@ bool ColumnDynamic::tryInsert(const DB::Field & x)
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnDynamic::insertFrom(const DB::IColumn & src_, size_t n)
#else
void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n)
#endif
{
const auto & dynamic_src = assert_cast<const ColumnDynamic &>(src_);
@ -263,7 +269,11 @@ void ColumnDynamic::insertFrom(const DB::IColumn & src_, size_t n)
variant_col.insertIntoVariantFrom(string_variant_discr, *tmp_string_column, 0);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnDynamic::insertRangeFrom(const DB::IColumn & src_, size_t start, size_t length)
#else
void ColumnDynamic::doInsertRangeFrom(const DB::IColumn & src_, size_t start, size_t length)
#endif
{
if (start + length > src_.size())
throw Exception(ErrorCodes::PARAMETER_OUT_OF_BOUND, "Parameter out of bound in ColumnDynamic::insertRangeFrom method. "
@ -429,7 +439,11 @@ void ColumnDynamic::insertRangeFrom(const DB::IColumn & src_, size_t start, size
}
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnDynamic::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length)
#else
void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, size_t length)
#endif
{
const auto & dynamic_src = assert_cast<const ColumnDynamic &>(src_);
@ -481,7 +495,7 @@ StringRef ColumnDynamic::serializeValueIntoArena(size_t n, DB::Arena & arena, co
/// We cannot use Variant serialization here as it serializes discriminator + value,
/// but Dynamic doesn't have fixed mapping discriminator <-> variant type
/// as different Dynamic column can have different Variants.
/// Instead, we serialize null bit + variant type name (size + bytes) + value.
/// Instead, we serialize null bit + variant type in binary format (size + bytes) + value.
const auto & variant_col = assert_cast<const ColumnVariant &>(*variant_column);
auto discr = variant_col.globalDiscriminatorAt(n);
StringRef res;
@ -495,14 +509,15 @@ StringRef ColumnDynamic::serializeValueIntoArena(size_t n, DB::Arena & arena, co
return res;
}
const auto & variant_name = variant_info.variant_names[discr];
size_t variant_name_size = variant_name.size();
char * pos = arena.allocContinue(sizeof(UInt8) + sizeof(size_t) + variant_name.size(), begin);
const auto & variant_type = assert_cast<const DataTypeVariant &>(*variant_info.variant_type).getVariant(discr);
String variant_type_binary_data = encodeDataType(variant_type);
size_t variant_type_binary_data_size = variant_type_binary_data.size();
char * pos = arena.allocContinue(sizeof(UInt8) + sizeof(size_t) + variant_type_binary_data.size(), begin);
memcpy(pos, &null_bit, sizeof(UInt8));
memcpy(pos + sizeof(UInt8), &variant_name_size, sizeof(size_t));
memcpy(pos + sizeof(UInt8) + sizeof(size_t), variant_name.data(), variant_name.size());
memcpy(pos + sizeof(UInt8), &variant_type_binary_data_size, sizeof(size_t));
memcpy(pos + sizeof(UInt8) + sizeof(size_t), variant_type_binary_data.data(), variant_type_binary_data.size());
res.data = pos;
res.size = sizeof(UInt8) + sizeof(size_t) + variant_name.size();
res.size = sizeof(UInt8) + sizeof(size_t) + variant_type_binary_data.size();
auto value_ref = variant_col.getVariantByGlobalDiscriminator(discr).serializeValueIntoArena(variant_col.offsetAt(n), arena, begin);
res.data = value_ref.data - res.size;
@ -521,13 +536,15 @@ const char * ColumnDynamic::deserializeAndInsertFromArena(const char * pos)
return pos;
}
/// Read variant type name.
const size_t variant_name_size = unalignedLoad<size_t>(pos);
pos += sizeof(variant_name_size);
String variant_name;
variant_name.resize(variant_name_size);
memcpy(variant_name.data(), pos, variant_name_size);
pos += variant_name_size;
/// Read variant type in binary format.
const size_t variant_type_binary_data_size = unalignedLoad<size_t>(pos);
pos += sizeof(variant_type_binary_data_size);
String variant_type_binary_data;
variant_type_binary_data.resize(variant_type_binary_data_size);
memcpy(variant_type_binary_data.data(), pos, variant_type_binary_data_size);
pos += variant_type_binary_data_size;
auto variant_type = decodeDataType(variant_type_binary_data);
auto variant_name = variant_type->getName();
/// If we already have such variant, just deserialize it into corresponding variant column.
auto it = variant_info.variant_name_to_discriminator.find(variant_name);
if (it != variant_info.variant_name_to_discriminator.end())
@ -537,7 +554,6 @@ const char * ColumnDynamic::deserializeAndInsertFromArena(const char * pos)
}
/// If we don't have such variant, add it.
auto variant_type = DataTypeFactory::instance().get(variant_name);
if (likely(addNewVariant(variant_type)))
{
auto discr = variant_info.variant_name_to_discriminator[variant_name];
@ -563,13 +579,13 @@ const char * ColumnDynamic::skipSerializedInArena(const char * pos) const
if (null_bit)
return pos;
const size_t variant_name_size = unalignedLoad<size_t>(pos);
pos += sizeof(variant_name_size);
String variant_name;
variant_name.resize(variant_name_size);
memcpy(variant_name.data(), pos, variant_name_size);
pos += variant_name_size;
auto tmp_variant_column = DataTypeFactory::instance().get(variant_name)->createColumn();
const size_t variant_type_binary_data_size = unalignedLoad<size_t>(pos);
pos += sizeof(variant_type_binary_data_size);
String variant_type_binary_data;
variant_type_binary_data.resize(variant_type_binary_data_size);
memcpy(variant_type_binary_data.data(), pos, variant_type_binary_data_size);
pos += variant_type_binary_data_size;
auto tmp_variant_column = decodeDataType(variant_type_binary_data)->createColumn();
return tmp_variant_column->skipSerializedInArena(pos);
}
@ -587,7 +603,11 @@ void ColumnDynamic::updateHashWithValue(size_t n, SipHash & hash) const
variant_col.getVariantByGlobalDiscriminator(discr).updateHashWithValue(variant_col.offsetAt(n), hash);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int ColumnDynamic::compareAt(size_t n, size_t m, const DB::IColumn & rhs, int nan_direction_hint) const
#else
int ColumnDynamic::doCompareAt(size_t n, size_t m, const DB::IColumn & rhs, int nan_direction_hint) const
#endif
{
const auto & left_variant = assert_cast<const ColumnVariant &>(*variant_column);
const auto & right_dynamic = assert_cast<const ColumnDynamic &>(rhs);

View File

@ -142,9 +142,16 @@ public:
void insert(const Field & x) override;
bool tryInsert(const Field & x) override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & src_, size_t n) override;
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
#else
void doInsertFrom(const IColumn & src_, size_t n) override;
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override;
#endif
void insertDefault() override
{
@ -213,7 +220,11 @@ public:
return scattered_columns;
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
#else
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
#endif
bool hasEqualValues() const override
{

View File

@ -74,7 +74,11 @@ bool ColumnFixedString::tryInsert(const Field & x)
return true;
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnFixedString::insertFrom(const IColumn & src_, size_t index)
#else
void ColumnFixedString::doInsertFrom(const IColumn & src_, size_t index)
#endif
{
const ColumnFixedString & src = assert_cast<const ColumnFixedString &>(src_);
@ -86,7 +90,11 @@ void ColumnFixedString::insertFrom(const IColumn & src_, size_t index)
memcpySmallAllowReadWriteOverflow15(chars.data() + old_size, &src.chars[n * index], n);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnFixedString::insertManyFrom(const IColumn & src, size_t position, size_t length)
#else
void ColumnFixedString::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
#endif
{
const ColumnFixedString & src_concrete = assert_cast<const ColumnFixedString &>(src);
if (n != src_concrete.getN())
@ -219,7 +227,11 @@ size_t ColumnFixedString::estimateCardinalityInPermutedRange(const Permutation &
return elements.size();
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnFixedString::insertRangeFrom(const IColumn & src, size_t start, size_t length)
#else
void ColumnFixedString::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
#endif
{
const ColumnFixedString & src_concrete = assert_cast<const ColumnFixedString &>(src);
chassert(this->n == src_concrete.n);

View File

@ -98,9 +98,17 @@ public:
bool tryInsert(const Field & x) override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & src_, size_t index) override;
#else
void doInsertFrom(const IColumn & src_, size_t index) override;
#endif
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
#else
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override;
#endif
void insertData(const char * pos, size_t length) override;
@ -129,7 +137,11 @@ public:
void updateHashFast(SipHash & hash) const override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t p1, size_t p2, const IColumn & rhs_, int /*nan_direction_hint*/) const override
#else
int doCompareAt(size_t p1, size_t p2, const IColumn & rhs_, int /*nan_direction_hint*/) const override
#endif
{
const ColumnFixedString & rhs = assert_cast<const ColumnFixedString &>(rhs_);
chassert(this->n == rhs.n);
@ -144,7 +156,11 @@ public:
size_t estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#else
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#endif
ColumnPtr filter(const IColumn::Filter & filt, ssize_t result_size_hint) const override;

View File

@ -72,7 +72,11 @@ ColumnPtr ColumnFunction::cut(size_t start, size_t length) const
return ColumnFunction::create(length, function, capture, is_short_circuit_argument, is_function_compiled);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnFunction::insertFrom(const IColumn & src, size_t n)
#else
void ColumnFunction::doInsertFrom(const IColumn & src, size_t n)
#endif
{
const ColumnFunction & src_func = assert_cast<const ColumnFunction &>(src);
@ -89,7 +93,11 @@ void ColumnFunction::insertFrom(const IColumn & src, size_t n)
++elements_size;
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnFunction::insertRangeFrom(const IColumn & src, size_t start, size_t length)
#else
void ColumnFunction::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
#endif
{
const ColumnFunction & src_func = assert_cast<const ColumnFunction &>(src);

View File

@ -94,8 +94,16 @@ public:
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot insert into {}", getName());
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & src, size_t n) override;
#else
void doInsertFrom(const IColumn & src, size_t n) override;
#endif
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn &, size_t start, size_t length) override;
#else
void doInsertRangeFrom(const IColumn &, size_t start, size_t length) override;
#endif
void insertData(const char *, size_t) override
{
@ -137,7 +145,11 @@ public:
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "popBack is not implemented for {}", getName());
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t, size_t, const IColumn &, int) const override
#else
int doCompareAt(size_t, size_t, const IColumn &, int) const override
#endif
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "compareAt is not implemented for {}", getName());
}

View File

@ -159,7 +159,11 @@ void ColumnLowCardinality::insertDefault()
idx.insertPosition(getDictionary().getDefaultValueIndex());
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnLowCardinality::insertFrom(const IColumn & src, size_t n)
#else
void ColumnLowCardinality::doInsertFrom(const IColumn & src, size_t n)
#endif
{
const auto * low_cardinality_src = typeid_cast<const ColumnLowCardinality *>(&src);
@ -187,7 +191,11 @@ void ColumnLowCardinality::insertFromFullColumn(const IColumn & src, size_t n)
idx.insertPosition(getDictionary().uniqueInsertFrom(src, n));
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnLowCardinality::insertRangeFrom(const IColumn & src, size_t start, size_t length)
#else
void ColumnLowCardinality::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
#endif
{
const auto * low_cardinality_src = typeid_cast<const ColumnLowCardinality *>(&src);
@ -364,7 +372,11 @@ int ColumnLowCardinality::compareAtImpl(size_t n, size_t m, const IColumn & rhs,
return getDictionary().compareAt(n_index, m_index, low_cardinality_column.getDictionary(), nan_direction_hint);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int ColumnLowCardinality::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
#else
int ColumnLowCardinality::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
#endif
{
return compareAtImpl(n, m, rhs, nan_direction_hint);
}

View File

@ -78,10 +78,18 @@ public:
bool tryInsert(const Field & x) override;
void insertDefault() override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & src, size_t n) override;
#else
void doInsertFrom(const IColumn & src, size_t n) override;
#endif
void insertFromFullColumn(const IColumn & src, size_t n);
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#else
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#endif
void insertRangeFromFullColumn(const IColumn & src, size_t start, size_t length);
void insertRangeFromDictionaryEncodedColumn(const IColumn & keys, const IColumn & positions);
@ -127,7 +135,11 @@ public:
return ColumnLowCardinality::create(dictionary.getColumnUniquePtr(), getIndexes().index(indexes_, limit));
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
#else
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
#endif
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator &) const override;

View File

@ -153,17 +153,29 @@ void ColumnMap::updateHashFast(SipHash & hash) const
nested->updateHashFast(hash);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnMap::insertFrom(const IColumn & src, size_t n)
#else
void ColumnMap::doInsertFrom(const IColumn & src, size_t n)
#endif
{
nested->insertFrom(assert_cast<const ColumnMap &>(src).getNestedColumn(), n);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnMap::insertManyFrom(const IColumn & src, size_t position, size_t length)
#else
void ColumnMap::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
#endif
{
assert_cast<ColumnArray &>(*nested).insertManyFrom(assert_cast<const ColumnMap &>(src).getNestedColumn(), position, length);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnMap::insertRangeFrom(const IColumn & src, size_t start, size_t length)
#else
void ColumnMap::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
#endif
{
nested->insertRangeFrom(
assert_cast<const ColumnMap &>(src).getNestedColumn(),
@ -210,7 +222,11 @@ MutableColumns ColumnMap::scatter(ColumnIndex num_columns, const Selector & sele
return res;
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int ColumnMap::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
#else
int ColumnMap::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
#endif
{
const auto & rhs_map = assert_cast<const ColumnMap &>(rhs);
return nested->compareAt(n, m, rhs_map.getNestedColumn(), nan_direction_hint);

View File

@ -66,16 +66,28 @@ public:
void updateHashWithValue(size_t n, SipHash & hash) const override;
void updateWeakHash32(WeakHash32 & hash) const override;
void updateHashFast(SipHash & hash) const override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & src_, size_t n) override;
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#else
void doInsertFrom(const IColumn & src_, size_t n) override;
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override;
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#endif
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
void expand(const Filter & mask, bool inverted) override;
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
ColumnPtr replicate(const Offsets & offsets) const override;
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
#else
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
#endif
void getExtremes(Field & min, Field & max) const override;
void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override;

View File

@ -221,7 +221,11 @@ const char * ColumnNullable::skipSerializedInArena(const char * pos) const
return pos;
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnNullable::insertRangeFrom(const IColumn & src, size_t start, size_t length)
#else
void ColumnNullable::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
#endif
{
const ColumnNullable & nullable_col = assert_cast<const ColumnNullable &>(src);
getNullMapColumn().insertRangeFrom(*nullable_col.null_map, start, length);
@ -258,7 +262,11 @@ bool ColumnNullable::tryInsert(const Field & x)
return true;
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnNullable::insertFrom(const IColumn & src, size_t n)
#else
void ColumnNullable::doInsertFrom(const IColumn & src, size_t n)
#endif
{
const ColumnNullable & src_concrete = assert_cast<const ColumnNullable &>(src);
getNestedColumn().insertFrom(src_concrete.getNestedColumn(), n);
@ -266,7 +274,11 @@ void ColumnNullable::insertFrom(const IColumn & src, size_t n)
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnNullable::insertManyFrom(const IColumn & src, size_t position, size_t length)
#else
void ColumnNullable::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
#endif
{
const ColumnNullable & src_concrete = assert_cast<const ColumnNullable &>(src);
getNestedColumn().insertManyFrom(src_concrete.getNestedColumn(), position, length);
@ -402,7 +414,11 @@ int ColumnNullable::compareAtImpl(size_t n, size_t m, const IColumn & rhs_, int
return getNestedColumn().compareAt(n, m, nested_rhs, null_direction_hint);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int ColumnNullable::compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
#else
int ColumnNullable::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
#endif
{
return compareAtImpl(n, m, rhs_, null_direction_hint);
}

View File

@ -69,11 +69,21 @@ public:
char * serializeValueIntoMemory(size_t n, char * memory) const override;
const char * deserializeAndInsertFromArena(const char * pos) override;
const char * skipSerializedInArena(const char * pos) const override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#else
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#endif
void insert(const Field & x) override;
bool tryInsert(const Field & x) override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & src, size_t n) override;
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
#else
void doInsertFrom(const IColumn & src, size_t n) override;
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override;
#endif
void insertFromNotNullable(const IColumn & src, size_t n);
void insertRangeFromNotNullable(const IColumn & src, size_t start, size_t length);
@ -90,7 +100,11 @@ public:
void expand(const Filter & mask, bool inverted) override;
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
#else
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
#endif
#if USE_EMBEDDED_COMPILER

View File

@ -763,12 +763,20 @@ void ColumnObject::get(size_t n, Field & res) const
}
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnObject::insertFrom(const IColumn & src, size_t n)
#else
void ColumnObject::doInsertFrom(const IColumn & src, size_t n)
#endif
{
insert(src[n]);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnObject::insertRangeFrom(const IColumn & src, size_t start, size_t length)
#else
void ColumnObject::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
#endif
{
const auto & src_object = assert_cast<const ColumnObject &>(src);

View File

@ -209,8 +209,15 @@ public:
void insert(const Field & field) override;
bool tryInsert(const Field & field) override;
void insertDefault() override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & src, size_t n) override;
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#else
void doInsertFrom(const IColumn & src, size_t n) override;
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#endif
void popBack(size_t length) override;
Field operator[](size_t n) const override;
void get(size_t n, Field & res) const override;
@ -228,7 +235,11 @@ public:
/// Order of rows in ColumnObject is undefined.
void getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const override;
void updatePermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &, EqualRanges &) const override {}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
#else
int doCompareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
#endif
void getExtremes(Field & min, Field & max) const override;
/// All other methods throw exception.

View File

@ -174,7 +174,11 @@ const char * ColumnSparse::skipSerializedInArena(const char * pos) const
return values->skipSerializedInArena(pos);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnSparse::insertRangeFrom(const IColumn & src, size_t start, size_t length)
#else
void ColumnSparse::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
#endif
{
if (length == 0)
return;
@ -248,7 +252,11 @@ bool ColumnSparse::tryInsert(const Field & x)
return true;
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnSparse::insertFrom(const IColumn & src, size_t n)
#else
void ColumnSparse::doInsertFrom(const IColumn & src, size_t n)
#endif
{
if (const auto * src_sparse = typeid_cast<const ColumnSparse *>(&src))
{
@ -446,7 +454,11 @@ ColumnPtr ColumnSparse::indexImpl(const PaddedPODArray<Type> & indexes, size_t l
return ColumnSparse::create(std::move(res_values), std::move(res_offsets), limit);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int ColumnSparse::compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
#else
int ColumnSparse::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
#endif
{
if (const auto * rhs_sparse = typeid_cast<const ColumnSparse *>(&rhs_))
return values->compareAt(getValueIndex(n), rhs_sparse->getValueIndex(m), rhs_sparse->getValuesColumn(), null_direction_hint);

View File

@ -81,10 +81,18 @@ public:
char * serializeValueIntoMemory(size_t n, char * memory) const override;
const char * deserializeAndInsertFromArena(const char * pos) override;
const char * skipSerializedInArena(const char *) const override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#else
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#endif
void insert(const Field & x) override;
bool tryInsert(const Field & x) override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & src, size_t n) override;
#else
void doInsertFrom(const IColumn & src, size_t n) override;
#endif
void insertDefault() override;
void insertManyDefaults(size_t length) override;
@ -98,7 +106,11 @@ public:
template <typename Type>
ColumnPtr indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const;
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
#else
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
#endif
void compareColumn(const IColumn & rhs, size_t rhs_row_num,
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
int direction, int nan_direction_hint) const override;

View File

@ -39,7 +39,11 @@ ColumnString::ColumnString(const ColumnString & src)
last_offset, chars.size());
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnString::insertManyFrom(const IColumn & src, size_t position, size_t length)
#else
void ColumnString::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
#endif
{
const ColumnString & src_concrete = assert_cast<const ColumnString &>(src);
const UInt8 * src_buf = &src_concrete.chars[src_concrete.offsets[position - 1]];
@ -129,7 +133,11 @@ void ColumnString::updateWeakHash32(WeakHash32 & hash) const
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnString::insertRangeFrom(const IColumn & src, size_t start, size_t length)
#else
void ColumnString::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
#endif
{
if (length == 0)
return;

View File

@ -142,7 +142,11 @@ public:
return true;
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & src_, size_t n) override
#else
void doInsertFrom(const IColumn & src_, size_t n) override
#endif
{
const ColumnString & src = assert_cast<const ColumnString &>(src_);
const size_t size_to_append = src.offsets[n] - src.offsets[n - 1]; /// -1th index is Ok, see PaddedPODArray.
@ -165,7 +169,11 @@ public:
}
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
#else
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override;
#endif
void insertData(const char * pos, size_t length) override
{
@ -212,7 +220,11 @@ public:
hash.update(reinterpret_cast<const char *>(chars.data()), chars.size() * sizeof(chars[0]));
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#else
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#endif
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
@ -238,7 +250,11 @@ public:
offsets.push_back(offsets.back() + 1);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t n, size_t m, const IColumn & rhs_, int /*nan_direction_hint*/) const override
#else
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int /*nan_direction_hint*/) const override
#endif
{
const ColumnString & rhs = assert_cast<const ColumnString &>(rhs_);
return memcmpSmallAllowOverflow15(chars.data() + offsetAt(n), sizeAt(n) - 1, rhs.chars.data() + rhs.offsetAt(m), rhs.sizeAt(m) - 1);

View File

@ -205,7 +205,11 @@ bool ColumnTuple::tryInsert(const Field & x)
return true;
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnTuple::insertFrom(const IColumn & src_, size_t n)
#else
void ColumnTuple::doInsertFrom(const IColumn & src_, size_t n)
#endif
{
const ColumnTuple & src = assert_cast<const ColumnTuple &>(src_);
@ -218,7 +222,11 @@ void ColumnTuple::insertFrom(const IColumn & src_, size_t n)
columns[i]->insertFrom(*src.columns[i], n);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnTuple::insertManyFrom(const IColumn & src, size_t position, size_t length)
#else
void ColumnTuple::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
#endif
{
const ColumnTuple & src_tuple = assert_cast<const ColumnTuple &>(src);
@ -318,7 +326,11 @@ void ColumnTuple::updateHashFast(SipHash & hash) const
column->updateHashFast(hash);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnTuple::insertRangeFrom(const IColumn & src, size_t start, size_t length)
#else
void ColumnTuple::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
#endif
{
column_length += length;
const size_t tuple_size = columns.size();
@ -470,7 +482,11 @@ int ColumnTuple::compareAtImpl(size_t n, size_t m, const IColumn & rhs, int nan_
return 0;
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int ColumnTuple::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
#else
int ColumnTuple::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
#endif
{
return compareAtImpl(n, m, rhs, nan_direction_hint);
}

View File

@ -65,8 +65,15 @@ public:
void insertData(const char * pos, size_t length) override;
void insert(const Field & x) override;
bool tryInsert(const Field & x) override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & src_, size_t n) override;
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
#else
void doInsertFrom(const IColumn & src_, size_t n) override;
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override;
#endif
void insertDefault() override;
void popBack(size_t n) override;
StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
@ -76,14 +83,22 @@ public:
void updateHashWithValue(size_t n, SipHash & hash) const override;
void updateWeakHash32(WeakHash32 & hash) const override;
void updateHashFast(SipHash & hash) const override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#else
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#endif
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
void expand(const Filter & mask, bool inverted) override;
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
ColumnPtr replicate(const Offsets & offsets) const override;
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
#else
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
#endif
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator & collator) const override;
void getExtremes(Field & min, Field & max) const override;
void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,

View File

@ -90,7 +90,11 @@ public:
return getNestedColumn()->updateHashWithValue(n, hash_func);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
#else
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
#endif
void getExtremes(Field & min, Field & max) const override { column_holder->getExtremes(min, max); }
bool valuesHaveFixedSize() const override { return column_holder->valuesHaveFixedSize(); }
@ -488,7 +492,11 @@ const char * ColumnUnique<ColumnType>::skipSerializedInArena(const char *) const
}
template <typename ColumnType>
#if !defined(ABORT_ON_LOGICAL_ERROR)
int ColumnUnique<ColumnType>::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
#else
int ColumnUnique<ColumnType>::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
#endif
{
if (is_nullable)
{

View File

@ -595,17 +595,29 @@ void ColumnVariant::insertManyFromImpl(const DB::IColumn & src_, size_t position
}
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnVariant::insertFrom(const IColumn & src_, size_t n)
#else
void ColumnVariant::doInsertFrom(const IColumn & src_, size_t n)
#endif
{
insertFromImpl(src_, n, nullptr);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnVariant::insertRangeFrom(const IColumn & src_, size_t start, size_t length)
#else
void ColumnVariant::doInsertRangeFrom(const IColumn & src_, size_t start, size_t length)
#endif
{
insertRangeFromImpl(src_, start, length, nullptr);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnVariant::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length)
#else
void ColumnVariant::doInsertManyFrom(const DB::IColumn & src_, size_t position, size_t length)
#endif
{
insertManyFromImpl(src_, position, length, nullptr);
}
@ -1174,7 +1186,11 @@ bool ColumnVariant::hasEqualValues() const
return local_discriminators->hasEqualValues() && variants[localDiscriminatorAt(0)]->hasEqualValues();
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
int ColumnVariant::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
#else
int ColumnVariant::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
#endif
{
const auto & rhs_variant = assert_cast<const ColumnVariant &>(rhs);
Discriminator left_discr = globalDiscriminatorAt(n);

View File

@ -180,9 +180,19 @@ public:
void insert(const Field & x) override;
bool tryInsert(const Field & x) override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & src_, size_t n) override;
void insertRangeFrom(const IColumn & src_, size_t start, size_t length) override;
void insertManyFrom(const IColumn & src_, size_t position, size_t length) override;
#else
using IColumn::insertFrom;
using IColumn::insertManyFrom;
using IColumn::insertRangeFrom;
void doInsertFrom(const IColumn & src_, size_t n) override;
void doInsertRangeFrom(const IColumn & src_, size_t start, size_t length) override;
void doInsertManyFrom(const IColumn & src_, size_t position, size_t length) override;
#endif
/// Methods for insertion from another Variant but with known mapping between global discriminators.
void insertFrom(const IColumn & src_, size_t n, const std::vector<ColumnVariant::Discriminator> & global_discriminators_mapping);
@ -213,7 +223,11 @@ public:
ColumnPtr indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const;
ColumnPtr replicate(const Offsets & replicate_offsets) const override;
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
#else
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
#endif
bool hasEqualValues() const override;
void getExtremes(Field & min, Field & max) const override;
void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,

View File

@ -503,7 +503,11 @@ bool ColumnVector<T>::tryInsert(const DB::Field & x)
}
template <typename T>
#if !defined(ABORT_ON_LOGICAL_ERROR)
void ColumnVector<T>::insertRangeFrom(const IColumn & src, size_t start, size_t length)
#else
void ColumnVector<T>::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
#endif
{
const ColumnVector & src_vec = assert_cast<const ColumnVector &>(src);

View File

@ -64,12 +64,20 @@ public:
return data.size();
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn & src, size_t n) override
#else
void doInsertFrom(const IColumn & src, size_t n) override
#endif
{
data.push_back(assert_cast<const Self &>(src).getData()[n]);
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertManyFrom(const IColumn & src, size_t position, size_t length) override
#else
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override
#endif
{
ValueType v = assert_cast<const Self &>(src).getData()[position];
data.resize_fill(data.size() + length, v);
@ -142,7 +150,11 @@ public:
}
/// This method implemented in header because it could be possibly devirtualized.
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override
#else
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override
#endif
{
return CompareHelper<T>::compare(data[n], assert_cast<const Self &>(rhs_).data[m], nan_direction_hint);
}
@ -228,7 +240,11 @@ public:
bool tryInsert(const DB::Field & x) override;
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#else
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
#endif
ColumnPtr filter(const IColumn::Filter & filt, ssize_t result_size_hint) const override;

View File

@ -46,7 +46,11 @@ String IColumn::dumpStructure() const
return res.str();
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void IColumn::insertFrom(const IColumn & src, size_t n)
#else
void IColumn::doInsertFrom(const IColumn & src, size_t n)
#endif
{
insert(src[n]);
}

View File

@ -1,15 +1,14 @@
#pragma once
#include <Common/COW.h>
#include <Common/PODArray_fwd.h>
#include <Common/Exception.h>
#include <Common/typeid_cast.h>
#include <base/StringRef.h>
#include <Core/TypeId.h>
#include <base/StringRef.h>
#include <Common/COW.h>
#include <Common/Exception.h>
#include <Common/PODArray_fwd.h>
#include <Common/typeid_cast.h>
#include "config.h"
class SipHash;
class Collator;
@ -180,18 +179,42 @@ public:
/// Appends n-th element from other column with the same type.
/// Is used in merge-sort and merges. It could be implemented in inherited classes more optimally than default implementation.
#if !defined(ABORT_ON_LOGICAL_ERROR)
virtual void insertFrom(const IColumn & src, size_t n);
#else
void insertFrom(const IColumn & src, size_t n)
{
assertTypeEquality(src);
doInsertFrom(src, n);
}
#endif
/// Appends range of elements from other column with the same type.
/// Could be used to concatenate columns.
#if !defined(ABORT_ON_LOGICAL_ERROR)
virtual void insertRangeFrom(const IColumn & src, size_t start, size_t length) = 0;
#else
void insertRangeFrom(const IColumn & src, size_t start, size_t length)
{
assertTypeEquality(src);
doInsertRangeFrom(src, start, length);
}
#endif
/// Appends one element from other column with the same type multiple times.
#if !defined(ABORT_ON_LOGICAL_ERROR)
virtual void insertManyFrom(const IColumn & src, size_t position, size_t length)
{
for (size_t i = 0; i < length; ++i)
insertFrom(src, position);
}
#else
void insertManyFrom(const IColumn & src, size_t position, size_t length)
{
assertTypeEquality(src);
doInsertManyFrom(src, position, length);
}
#endif
/// Appends one field multiple times. Can be optimized in inherited classes.
virtual void insertMany(const Field & field, size_t length)
@ -322,7 +345,15 @@ public:
*
* For non Nullable and non floating point types, nan_direction_hint is ignored.
*/
#if !defined(ABORT_ON_LOGICAL_ERROR)
[[nodiscard]] virtual int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const = 0;
#else
[[nodiscard]] int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
{
assertTypeEquality(rhs);
return doCompareAt(n, m, rhs, nan_direction_hint);
}
#endif
#if USE_EMBEDDED_COMPILER
@ -610,6 +641,8 @@ public:
[[nodiscard]] virtual bool isSparse() const { return false; }
[[nodiscard]] virtual bool isConst() const { return false; }
[[nodiscard]] virtual bool isCollationSupported() const { return false; }
virtual ~IColumn() = default;
@ -633,6 +666,29 @@ protected:
Equals equals,
Sort full_sort,
PartialSort partial_sort) const;
#if defined(ABORT_ON_LOGICAL_ERROR)
virtual void doInsertFrom(const IColumn & src, size_t n);
virtual void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) = 0;
virtual void doInsertManyFrom(const IColumn & src, size_t position, size_t length)
{
for (size_t i = 0; i < length; ++i)
insertFrom(src, position);
}
virtual int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const = 0;
private:
void assertTypeEquality(const IColumn & rhs) const
{
/// For Sparse and Const columns, we can compare only internal types. It is considered normal to e.g. insert from normal vector column to a sparse vector column.
/// This case is specifically handled in ColumnSparse implementation. Similar situation with Const column.
/// For the rest of column types we can compare the types directly.
chassert((isConst() || isSparse()) ? getDataType() == rhs.getDataType() : typeid(*this) == typeid(rhs));
}
#endif
};
using ColumnPtr = IColumn::Ptr;

View File

@ -26,7 +26,11 @@ public:
size_t byteSize() const override { return 0; }
size_t byteSizeAt(size_t) const override { return 0; }
size_t allocatedBytes() const override { return 0; }
#if !defined(ABORT_ON_LOGICAL_ERROR)
int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
#else
int doCompareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
#endif
void compareColumn(const IColumn &, size_t, PaddedPODArray<UInt64> *, PaddedPODArray<Int8> &, int, int) const override
{
}
@ -67,12 +71,20 @@ public:
{
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertFrom(const IColumn &, size_t) override
#else
void doInsertFrom(const IColumn &, size_t) override
#endif
{
++s;
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn & /*src*/, size_t /*start*/, size_t length) override
#else
void doInsertRangeFrom(const IColumn & /*src*/, size_t /*start*/, size_t length) override
#endif
{
s += length;
}

View File

@ -85,7 +85,11 @@ public:
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method tryInsert is not supported for ColumnUnique.");
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
void insertRangeFrom(const IColumn &, size_t, size_t) override
#else
void doInsertRangeFrom(const IColumn &, size_t, size_t) override
#endif
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method insertRangeFrom is not supported for ColumnUnique.");
}

View File

@ -52,7 +52,11 @@ static ColumnPtr mockColumn(const DataTypePtr & type, size_t rows)
}
#if !defined(ABORT_ON_LOGICAL_ERROR)
static NO_INLINE void insertManyFrom(IColumn & dst, const IColumn & src)
#else
static NO_INLINE void doInsertManyFrom(IColumn & dst, const IColumn & src)
#endif
{
size_t size = src.size();
dst.insertManyFrom(src, size / 2, size);

View File

@ -1,5 +1,7 @@
#pragma once
#include <atomic>
#include <mutex>
#include <memory>
#include <base/types.h>

View File

@ -1,5 +1,6 @@
#pragma once
#include <atomic>
#include <list>
#include <memory>
#include <mutex>

View File

@ -0,0 +1,389 @@
#include <Common/FieldBinaryEncoding.h>
#include <IO/WriteHelpers.h>
#include <IO/ReadHelpers.h>
namespace DB
{
namespace ErrorCodes
{
extern const int UNSUPPORTED_METHOD;
extern const int INCORRECT_DATA;
}
namespace
{
enum class FieldBinaryTypeIndex: uint8_t
{
Null = 0x00,
UInt64 = 0x01,
Int64 = 0x02,
UInt128 = 0x03,
Int128 = 0x04,
UInt256 = 0x05,
Int256 = 0x06,
Float64 = 0x07,
Decimal32 = 0x08,
Decimal64 = 0x09,
Decimal128 = 0x0A,
Decimal256 = 0x0B,
String = 0x0C,
Array = 0x0D,
Tuple = 0x0E,
Map = 0x0F,
IPv4 = 0x10,
IPv6 = 0x11,
UUID = 0x12,
Bool = 0x13,
Object = 0x14,
AggregateFunctionState = 0x15,
NegativeInfinity = 0xFE,
PositiveInfinity = 0xFF,
};
class FieldVisitorEncodeBinary
{
public:
void operator() (const Null & x, WriteBuffer & buf) const;
void operator() (const UInt64 & x, WriteBuffer & buf) const;
void operator() (const UInt128 & x, WriteBuffer & buf) const;
void operator() (const UInt256 & x, WriteBuffer & buf) const;
void operator() (const Int64 & x, WriteBuffer & buf) const;
void operator() (const Int128 & x, WriteBuffer & buf) const;
void operator() (const Int256 & x, WriteBuffer & buf) const;
void operator() (const UUID & x, WriteBuffer & buf) const;
void operator() (const IPv4 & x, WriteBuffer & buf) const;
void operator() (const IPv6 & x, WriteBuffer & buf) const;
void operator() (const Float64 & x, WriteBuffer & buf) const;
void operator() (const String & x, WriteBuffer & buf) const;
void operator() (const Array & x, WriteBuffer & buf) const;
void operator() (const Tuple & x, WriteBuffer & buf) const;
void operator() (const Map & x, WriteBuffer & buf) const;
void operator() (const Object & x, WriteBuffer & buf) const;
void operator() (const DecimalField<Decimal32> & x, WriteBuffer & buf) const;
void operator() (const DecimalField<Decimal64> & x, WriteBuffer & buf) const;
void operator() (const DecimalField<Decimal128> & x, WriteBuffer & buf) const;
void operator() (const DecimalField<Decimal256> & x, WriteBuffer & buf) const;
void operator() (const AggregateFunctionStateData & x, WriteBuffer & buf) const;
[[noreturn]] void operator() (const CustomType & x, WriteBuffer & buf) const;
void operator() (const bool & x, WriteBuffer & buf) const;
};
void FieldVisitorEncodeBinary::operator() (const Null & x, WriteBuffer & buf) const
{
if (x.isNull())
writeBinary(UInt8(FieldBinaryTypeIndex::Null), buf);
else if (x.isPositiveInfinity())
writeBinary(UInt8(FieldBinaryTypeIndex::PositiveInfinity), buf);
else if (x.isNegativeInfinity())
writeBinary(UInt8(FieldBinaryTypeIndex::NegativeInfinity), buf);
}
void FieldVisitorEncodeBinary::operator() (const UInt64 & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::UInt64), buf);
writeVarUInt(x, buf);
}
void FieldVisitorEncodeBinary::operator() (const Int64 & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::Int64), buf);
writeVarInt(x, buf);
}
void FieldVisitorEncodeBinary::operator() (const Float64 & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::Float64), buf);
writeBinaryLittleEndian(x, buf);
}
void FieldVisitorEncodeBinary::operator() (const String & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::String), buf);
writeStringBinary(x, buf);
}
void FieldVisitorEncodeBinary::operator() (const UInt128 & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::UInt128), buf);
writeBinaryLittleEndian(x, buf);
}
void FieldVisitorEncodeBinary::operator() (const Int128 & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::Int128), buf);
writeBinaryLittleEndian(x, buf);
}
void FieldVisitorEncodeBinary::operator() (const UInt256 & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::UInt256), buf);
writeBinaryLittleEndian(x, buf);
}
void FieldVisitorEncodeBinary::operator() (const Int256 & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::Int256), buf);
writeBinaryLittleEndian(x, buf);
}
void FieldVisitorEncodeBinary::operator() (const UUID & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::UUID), buf);
writeBinaryLittleEndian(x, buf);
}
void FieldVisitorEncodeBinary::operator() (const IPv4 & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::IPv4), buf);
writeBinaryLittleEndian(x, buf);
}
void FieldVisitorEncodeBinary::operator() (const IPv6 & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::IPv6), buf);
writeBinaryLittleEndian(x, buf);
}
void FieldVisitorEncodeBinary::operator() (const DecimalField<Decimal32> & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::Decimal32), buf);
writeVarUInt(x.getScale(), buf);
writeBinaryLittleEndian(x.getValue(), buf);
}
void FieldVisitorEncodeBinary::operator() (const DecimalField<Decimal64> & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::Decimal64), buf);
writeVarUInt(x.getScale(), buf);
writeBinaryLittleEndian(x.getValue(), buf);
}
void FieldVisitorEncodeBinary::operator() (const DecimalField<Decimal128> & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::Decimal128), buf);
writeVarUInt(x.getScale(), buf);
writeBinaryLittleEndian(x.getValue(), buf);
}
void FieldVisitorEncodeBinary::operator() (const DecimalField<Decimal256> & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::Decimal256), buf);
writeVarUInt(x.getScale(), buf);
writeBinaryLittleEndian(x.getValue(), buf);
}
void FieldVisitorEncodeBinary::operator() (const AggregateFunctionStateData & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::AggregateFunctionState), buf);
writeStringBinary(x.name, buf);
writeStringBinary(x.data, buf);
}
void FieldVisitorEncodeBinary::operator() (const Array & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::Array), buf);
size_t size = x.size();
writeVarUInt(size, buf);
for (size_t i = 0; i < size; ++i)
Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, x[i]);
}
void FieldVisitorEncodeBinary::operator() (const Tuple & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::Tuple), buf);
size_t size = x.size();
writeVarUInt(size, buf);
for (size_t i = 0; i < size; ++i)
Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, x[i]);
}
void FieldVisitorEncodeBinary::operator() (const Map & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::Map), buf);
size_t size = x.size();
writeVarUInt(size, buf);
for (size_t i = 0; i < size; ++i)
{
const Tuple & key_and_value = x[i].get<Tuple>();
Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, key_and_value[0]);
Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, key_and_value[1]);
}
}
void FieldVisitorEncodeBinary::operator() (const Object & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::Object), buf);
size_t size = x.size();
writeVarUInt(size, buf);
for (const auto & [key, value] : x)
{
writeStringBinary(key, buf);
Field::dispatch([&buf] (const auto & val) { FieldVisitorEncodeBinary()(val, buf); }, value);
}
}
void FieldVisitorEncodeBinary::operator()(const bool & x, WriteBuffer & buf) const
{
writeBinary(UInt8(FieldBinaryTypeIndex::Bool), buf);
writeBinary(static_cast<UInt8>(x), buf);
}
[[noreturn]] void FieldVisitorEncodeBinary::operator()(const CustomType &, WriteBuffer &) const
{
/// TODO: Support binary encoding/decoding for custom types somehow.
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Binary encoding of Field with custom type is not supported");
}
template <typename T>
Field decodeBigInteger(ReadBuffer & buf)
{
T value;
readBinaryLittleEndian(value, buf);
return value;
}
template <typename T>
DecimalField<T> decodeDecimal(ReadBuffer & buf)
{
UInt32 scale;
readVarUInt(scale, buf);
T value;
readBinaryLittleEndian(value, buf);
return DecimalField<T>(value, scale);
}
template <typename T>
T decodeValueLittleEndian(ReadBuffer & buf)
{
T value;
readBinaryLittleEndian(value, buf);
return value;
}
template <typename T>
T decodeArrayLikeField(ReadBuffer & buf)
{
size_t size;
readVarUInt(size, buf);
T value;
for (size_t i = 0; i != size; ++i)
value.push_back(decodeField(buf));
return value;
}
}
void encodeField(const Field & x, WriteBuffer & buf)
{
Field::dispatch([&buf] (const auto & val) { FieldVisitorEncodeBinary()(val, buf); }, x);
}
Field decodeField(ReadBuffer & buf)
{
UInt8 type;
readBinary(type, buf);
switch (FieldBinaryTypeIndex(type))
{
case FieldBinaryTypeIndex::Null:
return Null();
case FieldBinaryTypeIndex::PositiveInfinity:
return POSITIVE_INFINITY;
case FieldBinaryTypeIndex::NegativeInfinity:
return NEGATIVE_INFINITY;
case FieldBinaryTypeIndex::Int64:
{
Int64 value;
readVarInt(value, buf);
return value;
}
case FieldBinaryTypeIndex::UInt64:
{
UInt64 value;
readVarUInt(value, buf);
return value;
}
case FieldBinaryTypeIndex::Int128:
return decodeBigInteger<Int128>(buf);
case FieldBinaryTypeIndex::UInt128:
return decodeBigInteger<UInt128>(buf);
case FieldBinaryTypeIndex::Int256:
return decodeBigInteger<Int256>(buf);
case FieldBinaryTypeIndex::UInt256:
return decodeBigInteger<UInt256>(buf);
case FieldBinaryTypeIndex::Float64:
return decodeValueLittleEndian<Float64>(buf);
case FieldBinaryTypeIndex::Decimal32:
return decodeDecimal<Decimal32>(buf);
case FieldBinaryTypeIndex::Decimal64:
return decodeDecimal<Decimal64>(buf);
case FieldBinaryTypeIndex::Decimal128:
return decodeDecimal<Decimal128>(buf);
case FieldBinaryTypeIndex::Decimal256:
return decodeDecimal<Decimal256>(buf);
case FieldBinaryTypeIndex::String:
{
String value;
readStringBinary(value, buf);
return value;
}
case FieldBinaryTypeIndex::UUID:
return decodeValueLittleEndian<UUID>(buf);
case FieldBinaryTypeIndex::IPv4:
return decodeValueLittleEndian<IPv4>(buf);
case FieldBinaryTypeIndex::IPv6:
return decodeValueLittleEndian<IPv6>(buf);
case FieldBinaryTypeIndex::Bool:
{
bool value;
readBinary(value, buf);
return value;
}
case FieldBinaryTypeIndex::Array:
return decodeArrayLikeField<Array>(buf);
case FieldBinaryTypeIndex::Tuple:
return decodeArrayLikeField<Tuple>(buf);
case FieldBinaryTypeIndex::Map:
{
size_t size;
readVarUInt(size, buf);
Map map;
for (size_t i = 0; i != size; ++i)
{
Tuple key_and_value;
key_and_value.push_back(decodeField(buf));
key_and_value.push_back(decodeField(buf));
map.push_back(key_and_value);
}
return map;
}
case FieldBinaryTypeIndex::Object:
{
size_t size;
readVarUInt(size, buf);
Object value;
for (size_t i = 0; i != size; ++i)
{
String name;
readStringBinary(name, buf);
value[name] = decodeField(buf);
}
return value;
}
case FieldBinaryTypeIndex::AggregateFunctionState:
{
String name;
readStringBinary(name, buf);
String data;
readStringBinary(data, buf);
return AggregateFunctionStateData{.name = name, .data = data};
}
}
throw Exception(ErrorCodes::INCORRECT_DATA, "Unknown Field type: {0:#04x}", UInt64(type));
}
}

View File

@ -0,0 +1,43 @@
#pragma once
#include <Core/Field.h>
namespace DB
{
/**
Binary encoding for Fields:
|--------------------------|--------------------------------------------------------------------------------------------------------------------------------|
| Field type | Binary encoding |
|--------------------------|--------------------------------------------------------------------------------------------------------------------------------|
| `Null` | `0x00` |
| `UInt64` | `0x01<var_uint_value>` |
| `Int64` | `0x02<var_int_value>` |
| `UInt128` | `0x03<uint128_little_endian_value>` |
| `Int128` | `0x04<int128_little_endian_value>` |
| `UInt128` | `0x05<uint128_little_endian_value>` |
| `Int128` | `0x06<int128_little_endian_value>` |
| `Float64` | `0x07<float64_little_endian_value>` |
| `Decimal32` | `0x08<var_uint_scale><int32_little_endian_value>` |
| `Decimal64` | `0x09<var_uint_scale><int64_little_endian_value>` |
| `Decimal128` | `0x0A<var_uint_scale><int128_little_endian_value>` |
| `Decimal256` | `0x0B<var_uint_scale><int256_little_endian_value>` |
| `String` | `0x0C<var_uint_size><data>` |
| `Array` | `0x0D<var_uint_size><value_encoding_1>...<value_encoding_N>` |
| `Tuple` | `0x0E<var_uint_size><value_encoding_1>...<value_encoding_N>` |
| `Map` | `0x0F<var_uint_size><key_encoding_1><value_encoding_1>...<key_endoding_N><value_encoding_N>` |
| `IPv4` | `0x10<uint32_little_endian_value>` |
| `IPv6` | `0x11<uint128_little_endian_value>` |
| `UUID` | `0x12<uuid_value>` |
| `Bool` | `0x13<bool_value>` |
| `Object` | `0x14<var_uint_size><var_uint_key_size_1><key_data_1><value_encoding_1>...<var_uint_key_size_N><key_data_N><value_encoding_N>` |
| `AggregateFunctionState` | `0x15<var_uint_name_size><name_data><var_uint_data_size><data>` |
| `Negative infinity` | `0xFE` |
| `Positive infinity` | `0xFF` |
|--------------------------|--------------------------------------------------------------------------------------------------------------------------------|
*/
void encodeField(const Field &, WriteBuffer & buf);
Field decodeField(ReadBuffer & buf);
}

View File

@ -7,19 +7,20 @@ namespace DB
/// Kind of a temporal interval.
struct IntervalKind
{
/// note: The order and numbers are important and used in binary encoding, append new interval kinds to the end of list.
enum class Kind : uint8_t
{
Nanosecond,
Microsecond,
Millisecond,
Second,
Minute,
Hour,
Day,
Week,
Month,
Quarter,
Year,
Nanosecond = 0x00,
Microsecond = 0x01,
Millisecond = 0x02,
Second = 0x03,
Minute = 0x04,
Hour = 0x05,
Day = 0x06,
Week = 0x07,
Month = 0x08,
Quarter = 0x09,
Year = 0x0A,
};
Kind kind = Kind::Second;

View File

@ -28,7 +28,6 @@ namespace ErrorCodes
namespace ProfileEvents
{
extern const Event DistributedConnectionFailTry;
extern const Event DistributedConnectionFailAtAll;
extern const Event DistributedConnectionSkipReadOnlyReplica;
}
@ -285,7 +284,6 @@ PoolWithFailoverBase<TNestedPool>::getMany(
else
{
LOG_WARNING(log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message);
ProfileEvents::increment(ProfileEvents::DistributedConnectionFailTry);
shuffled_pool.error_count = std::min(max_error_cap, shuffled_pool.error_count + 1);

View File

@ -568,6 +568,7 @@ The server successfully detected this situation and will download merged part fr
M(AggregationPreallocatedElementsInHashTables, "How many elements were preallocated in hash tables for aggregation.") \
M(AggregationHashTablesInitializedAsTwoLevel, "How many hash tables were inited as two-level for aggregation.") \
M(AggregationOptimizedEqualRangesOfKeys, "For how many blocks optimization of equal ranges of keys was applied") \
M(HashJoinPreallocatedElementsInHashTables, "How many elements were preallocated in hash tables for hash join.") \
\
M(MetadataFromKeeperCacheHit, "Number of times an object storage metadata request was answered from cache without making request to Keeper") \
M(MetadataFromKeeperCacheMiss, "Number of times an object storage metadata request had to be answered from Keeper") \

View File

@ -1,5 +1,6 @@
#pragma once
#include <atomic>
#include <string>
#include <vector>
#include <mutex>

View File

@ -5,6 +5,7 @@
#include <Common/ConcurrentBoundedQueue.h>
#include <map>
#include <variant>
#include <unordered_map>
#include <unordered_set>
#include <future>

View File

@ -2,9 +2,11 @@
#include "config.h"
#include <atomic>
#include <memory>
#include <unordered_map>
#include <string>
#include <vector>
#include <boost/noncopyable.hpp>
namespace DB

View File

@ -6,6 +6,7 @@
#include <base/hex.h>
#include "Common/ZooKeeper/IKeeper.h"
#include "Common/ZooKeeper/ZooKeeperCommon.h"
#include <Common/setThreadName.h>
#include <Common/ZooKeeper/KeeperException.h>
#include <Common/checkStackSize.h>
@ -320,7 +321,7 @@ void KeeperDispatcher::responseThread()
try
{
setResponse(response_for_session.session_id, response_for_session.response);
setResponse(response_for_session.session_id, response_for_session.response, response_for_session.request);
}
catch (...)
{
@ -355,7 +356,7 @@ void KeeperDispatcher::snapshotThread()
}
}
void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response)
void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response, Coordination::ZooKeeperRequestPtr request)
{
std::lock_guard lock(session_to_response_callback_mutex);
@ -369,7 +370,7 @@ void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKe
return;
auto callback = new_session_id_response_callback[session_id_resp.internal_id];
callback(response);
callback(response, request);
new_session_id_response_callback.erase(session_id_resp.internal_id);
}
else /// Normal response, just write to client
@ -380,7 +381,7 @@ void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKe
if (session_response_callback == session_to_response_callback.end())
return;
session_response_callback->second(response);
session_response_callback->second(response, request);
/// Session closed, no more writes
if (response->xid != Coordination::WATCH_XID && response->getOpNum() == Coordination::OpNum::Close)
@ -771,21 +772,27 @@ int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms)
{
std::lock_guard lock(session_to_response_callback_mutex);
new_session_id_response_callback[request->internal_id] = [promise, internal_id = request->internal_id] (const Coordination::ZooKeeperResponsePtr & response)
new_session_id_response_callback[request->internal_id]
= [promise, internal_id = request->internal_id](
const Coordination::ZooKeeperResponsePtr & response, Coordination::ZooKeeperRequestPtr /*request*/)
{
if (response->getOpNum() != Coordination::OpNum::SessionID)
promise->set_exception(std::make_exception_ptr(Exception(ErrorCodes::LOGICAL_ERROR,
"Incorrect response of type {} instead of SessionID response", response->getOpNum())));
promise->set_exception(std::make_exception_ptr(Exception(
ErrorCodes::LOGICAL_ERROR, "Incorrect response of type {} instead of SessionID response", response->getOpNum())));
auto session_id_response = dynamic_cast<const Coordination::ZooKeeperSessionIDResponse &>(*response);
if (session_id_response.internal_id != internal_id)
{
promise->set_exception(std::make_exception_ptr(Exception(ErrorCodes::LOGICAL_ERROR,
"Incorrect response with internal id {} instead of {}", session_id_response.internal_id, internal_id)));
promise->set_exception(std::make_exception_ptr(Exception(
ErrorCodes::LOGICAL_ERROR,
"Incorrect response with internal id {} instead of {}",
session_id_response.internal_id,
internal_id)));
}
if (response->error != Coordination::Error::ZOK)
promise->set_exception(std::make_exception_ptr(zkutil::KeeperException::fromMessage(response->error, "SessionID request failed with error")));
promise->set_exception(
std::make_exception_ptr(zkutil::KeeperException::fromMessage(response->error, "SessionID request failed with error")));
promise->set_value(session_id_response.session_id);
};

View File

@ -20,7 +20,7 @@
namespace DB
{
using ZooKeeperResponseCallback = std::function<void(const Coordination::ZooKeeperResponsePtr & response)>;
using ZooKeeperResponseCallback = std::function<void(const Coordination::ZooKeeperResponsePtr & response, Coordination::ZooKeeperRequestPtr request)>;
/// Highlevel wrapper for ClickHouse Keeper.
/// Process user requests via consensus and return responses.
@ -92,7 +92,7 @@ private:
void clusterUpdateWithReconfigDisabledThread();
void clusterUpdateThread();
void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response);
void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response, Coordination::ZooKeeperRequestPtr request = nullptr);
/// Add error responses for requests to responses queue.
/// Clears requests.

View File

@ -407,7 +407,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
if (!keeper_context->localLogsPreprocessed() && !preprocess(*request_for_session))
return nullptr;
auto try_push = [&](const KeeperStorage::ResponseForSession& response)
auto try_push = [&](const KeeperStorage::ResponseForSession & response)
{
if (!responses_queue.push(response))
{
@ -416,17 +416,6 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
"Failed to push response with session id {} to the queue, probably because of shutdown",
response.session_id);
}
using namespace std::chrono;
uint64_t elapsed = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count() - request_for_session->time;
if (elapsed > keeper_context->getCoordinationSettings()->log_slow_total_threshold_ms)
{
LOG_INFO(
log,
"Total time to process a request took too long ({}ms).\nRequest info: {}",
elapsed,
request_for_session->request->toString(/*short_format=*/true));
}
};
try
@ -443,6 +432,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
KeeperStorage::ResponseForSession response_for_session;
response_for_session.session_id = -1;
response_for_session.response = response;
response_for_session.request = request_for_session->request;
LockGuardWithStats lock(storage_and_responses_lock);
session_id = storage->getSessionID(session_id_request.session_timeout_ms);
@ -462,8 +452,14 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
LockGuardWithStats lock(storage_and_responses_lock);
KeeperStorage::ResponsesForSessions responses_for_sessions
= storage->processRequest(request_for_session->request, request_for_session->session_id, request_for_session->zxid);
for (auto & response_for_session : responses_for_sessions)
{
if (response_for_session.response->xid != Coordination::WATCH_XID)
response_for_session.request = request_for_session->request;
try_push(response_for_session);
}
if (keeper_context->digestEnabled() && request_for_session->digest)
assertDigest(*request_for_session->digest, storage->getNodesDigest(true), *request_for_session->request, request_for_session->log_idx, true);
@ -797,9 +793,14 @@ void KeeperStateMachine::processReadRequest(const KeeperStorage::RequestForSessi
LockGuardWithStats lock(storage_and_responses_lock);
auto responses = storage->processRequest(
request_for_session.request, request_for_session.session_id, std::nullopt, true /*check_acl*/, true /*is_local*/);
for (const auto & response : responses)
if (!responses_queue.push(response))
LOG_WARNING(log, "Failed to push response with session id {} to the queue, probably because of shutdown", response.session_id);
for (auto & response_for_session : responses)
{
if (response_for_session.response->xid != Coordination::WATCH_XID)
response_for_session.request = request_for_session.request;
if (!responses_queue.push(response_for_session))
LOG_WARNING(log, "Failed to push response with session id {} to the queue, probably because of shutdown", response_for_session.session_id);
}
}
void KeeperStateMachine::shutdownStorage()

View File

@ -206,6 +206,7 @@ public:
{
int64_t session_id;
Coordination::ZooKeeperResponsePtr response;
Coordination::ZooKeeperRequestPtr request = nullptr;
};
using ResponsesForSessions = std::vector<ResponseForSession>;

View File

@ -27,7 +27,8 @@ PoolWithFailover::PoolWithFailover(
size_t pool_size,
size_t pool_wait_timeout_,
size_t max_tries_,
bool auto_close_connection_)
bool auto_close_connection_,
size_t connection_attempt_timeout_)
: pool_wait_timeout(pool_wait_timeout_)
, max_tries(max_tries_)
, auto_close_connection(auto_close_connection_)
@ -39,8 +40,13 @@ PoolWithFailover::PoolWithFailover(
{
for (const auto & replica_configuration : configurations)
{
auto connection_info = formatConnectionString(replica_configuration.database,
replica_configuration.host, replica_configuration.port, replica_configuration.username, replica_configuration.password);
auto connection_info = formatConnectionString(
replica_configuration.database,
replica_configuration.host,
replica_configuration.port,
replica_configuration.username,
replica_configuration.password,
connection_attempt_timeout_);
replicas_with_priority[priority].emplace_back(connection_info, pool_size);
}
}
@ -51,7 +57,8 @@ PoolWithFailover::PoolWithFailover(
size_t pool_size,
size_t pool_wait_timeout_,
size_t max_tries_,
bool auto_close_connection_)
bool auto_close_connection_,
size_t connection_attempt_timeout_)
: pool_wait_timeout(pool_wait_timeout_)
, max_tries(max_tries_)
, auto_close_connection(auto_close_connection_)
@ -63,7 +70,13 @@ PoolWithFailover::PoolWithFailover(
for (const auto & [host, port] : configuration.addresses)
{
LOG_DEBUG(getLogger("PostgreSQLPoolWithFailover"), "Adding address host: {}, port: {} to connection pool", host, port);
auto connection_string = formatConnectionString(configuration.database, host, port, configuration.username, configuration.password);
auto connection_string = formatConnectionString(
configuration.database,
host,
port,
configuration.username,
configuration.password,
connection_attempt_timeout_);
replicas_with_priority[0].emplace_back(connection_string, pool_size);
}
}

View File

@ -14,7 +14,6 @@
static constexpr inline auto POSTGRESQL_POOL_DEFAULT_SIZE = 16;
static constexpr inline auto POSTGRESQL_POOL_WAIT_TIMEOUT = 5000;
static constexpr inline auto POSTGRESQL_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES = 2;
namespace postgres
{
@ -30,14 +29,16 @@ public:
size_t pool_size,
size_t pool_wait_timeout,
size_t max_tries_,
bool auto_close_connection_);
bool auto_close_connection_,
size_t connection_attempt_timeout_);
explicit PoolWithFailover(
const DB::StoragePostgreSQL::Configuration & configuration,
size_t pool_size,
size_t pool_wait_timeout,
size_t max_tries_,
bool auto_close_connection_);
bool auto_close_connection_,
size_t connection_attempt_timeout_);
PoolWithFailover(const PoolWithFailover & other) = delete;

View File

@ -8,7 +8,7 @@
namespace postgres
{
ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password)
ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password, UInt64 timeout)
{
DB::WriteBufferFromOwnString out;
out << "dbname=" << DB::quote << dbname
@ -16,7 +16,7 @@ ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, S
<< " port=" << port
<< " user=" << DB::quote << user
<< " password=" << DB::quote << password
<< " connect_timeout=2";
<< " connect_timeout=" << timeout;
return {out.str(), host + ':' + DB::toString(port)};
}

View File

@ -18,7 +18,7 @@ namespace pqxx
namespace postgres
{
ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password);
ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password, UInt64 timeout);
String getConnectionForLog(const String & host, UInt16 port);

View File

@ -63,7 +63,7 @@ const char USER_INTERSERVER_MARKER[] = " INTERSERVER SECRET ";
/// Marker for SSH-keys-based authentication (passed as the user name)
const char SSH_KEY_AUTHENTICAION_MARKER[] = " SSH KEY AUTHENTICATION ";
/// Market for JSON Web Token authentication
/// Marker for JSON Web Token authentication
const char JWT_AUTHENTICAION_MARKER[] = " JWT AUTHENTICATION ";
};

View File

@ -151,6 +151,7 @@ namespace DB
M(UInt64, global_profiler_real_time_period_ns, 0, "Period for real clock timer of global profiler (in nanoseconds). Set 0 value to turn off the real clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
M(UInt64, global_profiler_cpu_time_period_ns, 0, "Period for CPU clock timer of global profiler (in nanoseconds). Set 0 value to turn off the CPU clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
M(Bool, enable_azure_sdk_logging, false, "Enables logging from Azure sdk", 0) \
M(UInt64, max_entries_for_hash_table_stats, 10'000, "How many entries hash table statistics collected during aggregation is allowed to have", 0) \
M(String, merge_workload, "default", "Name of workload to be used to access resources for all merges (may be overridden by a merge tree setting)", 0) \
M(String, mutation_workload, "default", "Name of workload to be used to access resources for all mutations (may be overridden by a merge tree setting)", 0) \
M(Bool, prepare_system_log_tables_on_startup, false, "If true, ClickHouse creates all configured `system.*_log` tables before the startup. It can be helpful if some startup scripts depend on these tables.", 0) \

View File

@ -530,6 +530,7 @@ class IColumn;
M(Bool, optimize_read_in_order, true, "Enable ORDER BY optimization for reading data in corresponding order in MergeTree tables.", 0) \
M(Bool, optimize_read_in_window_order, true, "Enable ORDER BY optimization in window clause for reading data in corresponding order in MergeTree tables.", 0) \
M(Bool, optimize_aggregation_in_order, false, "Enable GROUP BY optimization for aggregating data in corresponding order in MergeTree tables.", 0) \
M(Bool, read_in_order_use_buffering, true, "Use buffering before merging while reading in order of primary key. It increases the parallelism of query execution", 0) \
M(UInt64, aggregation_in_order_max_block_bytes, 50000000, "Maximal size of block in bytes accumulated during aggregation in order of primary key. Lower block size allows to parallelize more final merge stage of aggregation.", 0) \
M(UInt64, read_in_order_two_level_merge_threshold, 100, "Minimal number of parts to read to run preliminary merge step during multithread reading in order of primary key.", 0) \
M(Bool, low_cardinality_allow_in_native_format, true, "Use LowCardinality type in Native format. Otherwise, convert LowCardinality columns to ordinary for select query, and convert ordinary columns to required LowCardinality for insert query.", 0) \
@ -560,7 +561,9 @@ class IColumn;
M(UInt64, max_partition_size_to_drop, 50000000000lu, "Same as max_table_size_to_drop, but for the partitions.", 0) \
\
M(UInt64, postgresql_connection_pool_size, 16, "Connection pool size for PostgreSQL table engine and database engine.", 0) \
M(UInt64, postgresql_connection_attempt_timeout, 2, "Connection timeout to PostgreSQL table engine and database engine in seconds.", 0) \
M(UInt64, postgresql_connection_pool_wait_timeout, 5000, "Connection pool push/pop timeout on empty pool for PostgreSQL table engine and database engine. By default it will block on empty pool.", 0) \
M(UInt64, postgresql_connection_pool_retries, 2, "Connection pool push/pop retries number for PostgreSQL table engine and database engine.", 0) \
M(Bool, postgresql_connection_pool_auto_close_connection, false, "Close connection before returning connection to the pool.", 0) \
M(UInt64, glob_expansion_max_elements, 1000, "Maximum number of allowed addresses (For external storages, table functions, etc).", 0) \
M(UInt64, odbc_bridge_connection_pool_size, 16, "Connection pool size for each connection settings string in ODBC bridge.", 0) \
@ -677,9 +680,11 @@ class IColumn;
M(UInt64, insert_shard_id, 0, "If non zero, when insert into a distributed table, the data will be inserted into the shard `insert_shard_id` synchronously. Possible values range from 1 to `shards_number` of corresponding distributed table", 0) \
\
M(Bool, collect_hash_table_stats_during_aggregation, true, "Enable collecting hash table statistics to optimize memory allocation", 0) \
M(UInt64, max_entries_for_hash_table_stats, 10'000, "How many entries hash table statistics collected during aggregation is allowed to have", 0) \
M(UInt64, max_size_to_preallocate_for_aggregation, 100'000'000, "For how many elements it is allowed to preallocate space in all hash tables in total before aggregation", 0) \
\
M(Bool, collect_hash_table_stats_during_joins, true, "Enable collecting hash table statistics to optimize memory allocation", 0) \
M(UInt64, max_size_to_preallocate_for_joins, 100'000'000, "For how many elements it is allowed to preallocate space in all hash tables in total before join", 0) \
\
M(Bool, kafka_disable_num_consumers_limit, false, "Disable limit on kafka_num_consumers that depends on the number of available CPU cores", 0) \
M(Bool, enable_software_prefetch_in_aggregation, true, "Enable use of software prefetch in aggregation", 0) \
M(Bool, allow_aggregate_partitions_independently, false, "Enable independent aggregation of partitions on separate threads when partition key suits group by key. Beneficial when number of partitions close to number of cores and partitions have roughly the same size", 0) \
@ -718,6 +723,7 @@ class IColumn;
M(Bool, optimize_group_by_function_keys, true, "Eliminates functions of other keys in GROUP BY section", 0) \
M(Bool, optimize_group_by_constant_keys, true, "Optimize GROUP BY when all keys in block are constant", 0) \
M(Bool, legacy_column_name_of_tuple_literal, false, "List all names of element of large tuple literals in their column names instead of hash. This settings exists only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher.", 0) \
M(Bool, enable_named_columns_in_function_tuple, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers.", 0) \
\
M(Bool, query_plan_enable_optimizations, true, "Globally enable/disable query optimization at the query plan level", 0) \
M(UInt64, query_plan_max_optimizations_to_apply, 10000, "Limit the total number of optimizations applied to query plan. If zero, ignored. If limit reached, throw exception", 0) \
@ -1008,6 +1014,7 @@ class IColumn;
MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, async_insert_threads, 16) \
MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, max_replicated_fetches_network_bandwidth_for_server, 0) \
MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, max_replicated_sends_network_bandwidth_for_server, 0) \
MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, max_entries_for_hash_table_stats, 10'000) \
/* ---- */ \
MAKE_OBSOLETE(M, DefaultDatabaseEngine, default_database_engine, DefaultDatabaseEngine::Atomic) \
MAKE_OBSOLETE(M, UInt64, max_pipeline_depth, 0) \
@ -1130,6 +1137,8 @@ class IColumn;
M(Bool, input_format_tsv_crlf_end_of_line, false, "If it is set true, file function will read TSV format with \\r\\n instead of \\n.", 0) \
\
M(Bool, input_format_native_allow_types_conversion, true, "Allow data types conversion in Native input format", 0) \
M(Bool, input_format_native_decode_types_in_binary_format, false, "Read data types in binary format instead of type names in Native input format", 0) \
M(Bool, output_format_native_encode_types_in_binary_format, false, "Write data types in binary format instead of type names in Native output format", 0) \
\
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic', 'best_effort' and 'best_effort_us'.", 0) \
M(DateTimeOutputFormat, date_time_output_format, FormatSettings::DateTimeOutputFormat::Simple, "Method to write DateTime to text output. Possible values: 'simple', 'iso', 'unix_timestamp'.", 0) \
@ -1149,6 +1158,8 @@ class IColumn;
M(Bool, input_format_avro_null_as_default, false, "For Avro/AvroConfluent format: insert default in case of null and non Nullable column", 0) \
M(UInt64, format_binary_max_string_size, 1_GiB, "The maximum allowed size for String in RowBinary format. It prevents allocating large amount of memory in case of corrupted data. 0 means there is no limit", 0) \
M(UInt64, format_binary_max_array_size, 1_GiB, "The maximum allowed size for Array in RowBinary format. It prevents allocating large amount of memory in case of corrupted data. 0 means there is no limit", 0) \
M(Bool, input_format_binary_decode_types_in_binary_format, false, "Read data types in binary format instead of type names in RowBinaryWithNamesAndTypes input format", 0) \
M(Bool, output_format_binary_encode_types_in_binary_format, false, "Write data types in binary format instead of type names in RowBinaryWithNamesAndTypes output format ", 0) \
M(URI, format_avro_schema_registry_url, "", "For AvroConfluent format: Confluent Schema Registry URL.", 0) \
\
M(Bool, output_format_json_quote_64bit_integers, true, "Controls quoting of 64-bit integers in JSON output format.", 0) \
@ -1169,9 +1180,9 @@ class IColumn;
M(UInt64, output_format_pretty_max_value_width, 10000, "Maximum width of value to display in Pretty formats. If greater - it will be cut.", 0) \
M(UInt64, output_format_pretty_max_value_width_apply_for_single_value, false, "Only cut values (see the `output_format_pretty_max_value_width` setting) when it is not a single value in a block. Otherwise output it entirely, which is useful for the `SHOW CREATE TABLE` query.", 0) \
M(UInt64Auto, output_format_pretty_color, "auto", "Use ANSI escape sequences in Pretty formats. 0 - disabled, 1 - enabled, 'auto' - enabled if a terminal.", 0) \
M(String, output_format_pretty_grid_charset, "UTF-8", "Charset for printing grid borders. Available charsets: ASCII, UTF-8 (default one).", 0) \
M(UInt64, output_format_pretty_display_footer_column_names, true, "Display column names in the footer if there are 999 or more rows.", 0) \
M(UInt64, output_format_pretty_display_footer_column_names_min_rows, 50, "Sets the minimum threshold value of rows for which to enable displaying column names in the footer. 50 (default)", 0) \
M(String, output_format_pretty_grid_charset, "UTF-8", "Charset for printing grid borders. Available charsets: ASCII, UTF-8 (default one).", 0) \
M(UInt64, output_format_pretty_display_footer_column_names, true, "Display column names in the footer if there are 999 or more rows.", 0) \
M(UInt64, output_format_pretty_display_footer_column_names_min_rows, 50, "Sets the minimum threshold value of rows for which to enable displaying column names in the footer. 50 (default)", 0) \
M(UInt64, output_format_parquet_row_group_size, 1000000, "Target row group size in rows.", 0) \
M(UInt64, output_format_parquet_row_group_size_bytes, 512 * 1024 * 1024, "Target row group size in bytes, before compression.", 0) \
M(Bool, output_format_parquet_string_as_string, true, "Use Parquet String type instead of Binary for String columns.", 0) \

View File

@ -58,9 +58,17 @@ String ClickHouseVersion::toString() const
static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory::SettingsChanges>> settings_changes_history_initializer =
{
{"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."},
{"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"},
{"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"},
{"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"},
{"input_format_native_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in Native output format"},
{"read_in_order_use_buffering", false, true, "Use buffering before merging while reading in order of primary key"},
{"optimize_functions_to_subcolumns", false, true, "Enable optimization by default"},
{"enable_named_columns_in_function_tuple", false, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers."},
{"input_format_json_ignore_key_case", false, false, "Ignore json key case while read json field from string."},
{"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."},
{"collect_hash_table_stats_during_joins", false, true, "New setting."},
{"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."},
{"input_format_orc_read_use_writer_time_zone", false, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT."},
{"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."},
{"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"},
@ -68,6 +76,8 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"},
{"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"},
{"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"},
{"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."},
{"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."}
}},
{"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"},
{"materialize_statistics_on_insert", true, true, "Added new setting to allow to disable materialization of statistics on insert"},

View File

@ -0,0 +1,65 @@
#include <gtest/gtest.h>
#include <Common/FieldBinaryEncoding.h>
#include <IO/WriteBufferFromString.h>
#include <IO/ReadBufferFromString.h>
using namespace DB;
namespace DB::ErrorCodes
{
extern const int UNSUPPORTED_METHOD;
}
void check(const Field & field)
{
// std::cerr << "Check " << toString(field) << "\n";
WriteBufferFromOwnString ostr;
encodeField(field, ostr);
ReadBufferFromString istr(ostr.str());
Field decoded_field = decodeField(istr);
ASSERT_TRUE(istr.eof());
ASSERT_EQ(field, decoded_field);
}
GTEST_TEST(FieldBinaryEncoding, EncodeAndDecode)
{
check(Null());
check(POSITIVE_INFINITY);
check(NEGATIVE_INFINITY);
check(true);
check(UInt64(42));
check(Int64(-42));
check(UInt128(42));
check(Int128(-42));
check(UInt256(42));
check(Int256(-42));
check(UUID(42));
check(IPv4(42));
check(IPv6(42));
check(Float64(42.42));
check(String("Hello, World!"));
check(Array({Field(UInt64(42)), Field(UInt64(43))}));
check(Tuple({Field(UInt64(42)), Field(Null()), Field(UUID(42)), Field(String("Hello, World!"))}));
check(Map({Tuple{Field(UInt64(42)), Field(String("str_42"))}, Tuple{Field(UInt64(43)), Field(String("str_43"))}}));
check(Object({{String("key_1"), Field(UInt64(42))}, {String("key_2"), Field(UInt64(43))}}));
check(DecimalField<Decimal32>(4242, 3));
check(DecimalField<Decimal64>(4242, 3));
check(DecimalField<Decimal128>(Int128(4242), 3));
check(DecimalField<Decimal256>(Int256(4242), 3));
check(AggregateFunctionStateData{.name="some_name", .data="some_data"});
try
{
check(CustomType());
}
catch (const Exception & e)
{
ASSERT_EQ(e.code(), ErrorCodes::UNSUPPORTED_METHOD);
}
check(Array({
Tuple({Field(UInt64(42)), Map({Tuple{Field(UInt64(42)), Field(String("str_42"))}, Tuple{Field(UInt64(43)), Field(String("str_43"))}}), Field(UUID(42)), Field(String("Hello, World!"))}),
Tuple({Field(UInt64(43)), Map({Tuple{Field(UInt64(43)), Field(String("str_43"))}, Tuple{Field(UInt64(44)), Field(String("str_44"))}}), Field(UUID(43)), Field(String("Hello, World 2!"))})
}));
}

View File

@ -25,7 +25,6 @@ private:
mutable std::optional<size_t> version;
String getNameImpl(bool with_version) const;
size_t getVersion() const;
public:
static constexpr bool is_parametric = true;
@ -39,6 +38,8 @@ public:
{
}
size_t getVersion() const;
String getFunctionName() const;
AggregateFunctionPtr getFunction() const { return function; }

View File

@ -165,6 +165,19 @@ static std::pair<DataTypePtr, DataTypeCustomDescPtr> create(const ASTPtr & argum
return std::make_pair(storage_type, std::make_unique<DataTypeCustomDesc>(std::move(custom_name), nullptr));
}
String DataTypeCustomSimpleAggregateFunction::getFunctionName() const
{
return function->getName();
}
DataTypePtr createSimpleAggregateFunctionType(const AggregateFunctionPtr & function, const DataTypes & argument_types, const Array & parameters)
{
auto custom_desc = std::make_unique<DataTypeCustomDesc>(
std::make_unique<DataTypeCustomSimpleAggregateFunction>(function, argument_types, parameters));
return DataTypeFactory::instance().getCustom(std::move(custom_desc));
}
void registerDataTypeDomainSimpleAggregateFunction(DataTypeFactory & factory)
{
factory.registerDataTypeCustom("SimpleAggregateFunction", create);

View File

@ -40,8 +40,13 @@ public:
: function(function_), argument_types(argument_types_), parameters(parameters_) {}
AggregateFunctionPtr getFunction() const { return function; }
String getFunctionName() const;
const DataTypes & getArgumentsDataTypes() const { return argument_types; }
const Array & getParameters() const { return parameters; }
String getName() const override;
static void checkSupportedFunctions(const AggregateFunctionPtr & function);
};
DataTypePtr createSimpleAggregateFunctionType(const AggregateFunctionPtr & function, const DataTypes & argument_types, const Array & parameters);
}

View File

@ -71,7 +71,7 @@ static DataTypePtr create(const ASTPtr & arguments)
auto * literal = argument->arguments->children[1]->as<ASTLiteral>();
if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.get<UInt64>() == 0 || literal->value.get<UInt64>() > 255)
if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.get<UInt64>() == 0 || literal->value.get<UInt64>() > ColumnVariant::MAX_NESTED_COLUMNS)
throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'max_types' argument for Dynamic type should be a positive integer between 1 and 255");
return std::make_shared<DataTypeDynamic>(literal->value.get<UInt64>());

View File

@ -19,6 +19,8 @@ public:
}
String getName() const override;
const DataTypes & getElements() const { return elems; }
const Names & getNames() const { return names; }
};
DataTypePtr createNested(const DataTypes & types, const Names & names);

View File

@ -0,0 +1,705 @@
#include <DataTypes/DataTypesBinaryEncoding.h>
#include <DataTypes/DataTypeDateTime64.h>
#include <DataTypes/DataTypeFixedString.h>
#include <DataTypes/DataTypeEnum.h>
#include <DataTypes/DataTypesDecimal.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeTuple.h>
#include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypeFunction.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <DataTypes/DataTypeMap.h>
#include <DataTypes/DataTypeVariant.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeUUID.h>
#include <DataTypes/DataTypeSet.h>
#include <DataTypes/DataTypeInterval.h>
#include <DataTypes/DataTypeIPv4andIPv6.h>
#include <DataTypes/DataTypeAggregateFunction.h>
#include <DataTypes/DataTypeCustomSimpleAggregateFunction.h>
#include <DataTypes/DataTypeNothing.h>
#include <DataTypes/DataTypeDynamic.h>
#include <DataTypes/DataTypeNested.h>
#include <DataTypes/DataTypeFactory.h>
#include <AggregateFunctions/IAggregateFunction.h>
#include <AggregateFunctions/AggregateFunctionFactory.h>
#include <Parsers/NullsAction.h>
#include <IO/WriteBuffer.h>
#include <IO/ReadBuffer.h>
#include <IO/ReadBufferFromString.h>
#include <IO/WriteHelpers.h>
#include <IO/ReadHelpers.h>
#include <Common/FieldBinaryEncoding.h>
#include <Common/assert_cast.h>
namespace DB
{
namespace ErrorCodes
{
extern const int UNSUPPORTED_METHOD;
extern const int INCORRECT_DATA;
}
namespace
{
enum class BinaryTypeIndex : uint8_t
{
Nothing = 0x00,
UInt8 = 0x01,
UInt16 = 0x02,
UInt32 = 0x03,
UInt64 = 0x04,
UInt128 = 0x05,
UInt256 = 0x06,
Int8 = 0x07,
Int16 = 0x08,
Int32 = 0x09,
Int64 = 0x0A,
Int128 = 0x0B,
Int256 = 0x0C,
Float32 = 0x0D,
Float64 = 0x0E,
Date = 0x0F,
Date32 = 0x10,
DateTimeUTC = 0x11,
DateTimeWithTimezone = 0x12,
DateTime64UTC = 0x13,
DateTime64WithTimezone = 0x14,
String = 0x15,
FixedString = 0x16,
Enum8 = 0x17,
Enum16 = 0x18,
Decimal32 = 0x19,
Decimal64 = 0x1A,
Decimal128 = 0x1B,
Decimal256 = 0x1C,
UUID = 0x1D,
Array = 0x1E,
UnnamedTuple = 0x1F,
NamedTuple = 0x20,
Set = 0x21,
Interval = 0x22,
Nullable = 0x23,
Function = 0x24,
AggregateFunction = 0x25,
LowCardinality = 0x26,
Map = 0x27,
IPv4 = 0x28,
IPv6 = 0x29,
Variant = 0x2A,
Dynamic = 0x2B,
Custom = 0x2C,
Bool = 0x2D,
SimpleAggregateFunction = 0x2E,
Nested = 0x2F,
};
BinaryTypeIndex getBinaryTypeIndex(const DataTypePtr & type)
{
/// By default custom types don't have their own BinaryTypeIndex.
if (type->hasCustomName())
{
/// Some widely used custom types have separate BinaryTypeIndex for better serialization.
/// Right now it's Bool, SimpleAggregateFunction and Nested types.
/// TODO: Consider adding BinaryTypeIndex for more custom types.
if (isBool(type))
return BinaryTypeIndex::Bool;
if (typeid_cast<const DataTypeCustomSimpleAggregateFunction *>(type->getCustomName()))
return BinaryTypeIndex::SimpleAggregateFunction;
if (isNested(type))
return BinaryTypeIndex::Nested;
return BinaryTypeIndex::Custom;
}
switch (type->getTypeId())
{
case TypeIndex::Nothing:
return BinaryTypeIndex::Nothing;
case TypeIndex::UInt8:
return BinaryTypeIndex::UInt8;
case TypeIndex::UInt16:
return BinaryTypeIndex::UInt16;
case TypeIndex::UInt32:
return BinaryTypeIndex::UInt32;
case TypeIndex::UInt64:
return BinaryTypeIndex::UInt64;
case TypeIndex::UInt128:
return BinaryTypeIndex::UInt128;
case TypeIndex::UInt256:
return BinaryTypeIndex::UInt256;
case TypeIndex::Int8:
return BinaryTypeIndex::Int8;
case TypeIndex::Int16:
return BinaryTypeIndex::Int16;
case TypeIndex::Int32:
return BinaryTypeIndex::Int32;
case TypeIndex::Int64:
return BinaryTypeIndex::Int64;
case TypeIndex::Int128:
return BinaryTypeIndex::Int128;
case TypeIndex::Int256:
return BinaryTypeIndex::Int256;
case TypeIndex::Float32:
return BinaryTypeIndex::Float32;
case TypeIndex::Float64:
return BinaryTypeIndex::Float64;
case TypeIndex::Date:
return BinaryTypeIndex::Date;
case TypeIndex::Date32:
return BinaryTypeIndex::Date32;
case TypeIndex::DateTime:
if (assert_cast<const DataTypeDateTime &>(*type).hasExplicitTimeZone())
return BinaryTypeIndex::DateTimeWithTimezone;
return BinaryTypeIndex::DateTimeUTC;
case TypeIndex::DateTime64:
if (assert_cast<const DataTypeDateTime64 &>(*type).hasExplicitTimeZone())
return BinaryTypeIndex::DateTime64WithTimezone;
return BinaryTypeIndex::DateTime64UTC;
case TypeIndex::String:
return BinaryTypeIndex::String;
case TypeIndex::FixedString:
return BinaryTypeIndex::FixedString;
case TypeIndex::Enum8:
return BinaryTypeIndex::Enum8;
case TypeIndex::Enum16:
return BinaryTypeIndex::Enum16;
case TypeIndex::Decimal32:
return BinaryTypeIndex::Decimal32;
case TypeIndex::Decimal64:
return BinaryTypeIndex::Decimal64;
case TypeIndex::Decimal128:
return BinaryTypeIndex::Decimal128;
case TypeIndex::Decimal256:
return BinaryTypeIndex::Decimal256;
case TypeIndex::UUID:
return BinaryTypeIndex::UUID;
case TypeIndex::Array:
return BinaryTypeIndex::Array;
case TypeIndex::Tuple:
{
const auto & tuple_type = assert_cast<const DataTypeTuple &>(*type);
if (tuple_type.haveExplicitNames())
return BinaryTypeIndex::NamedTuple;
return BinaryTypeIndex::UnnamedTuple;
}
case TypeIndex::Set:
return BinaryTypeIndex::Set;
case TypeIndex::Interval:
return BinaryTypeIndex::Interval;
case TypeIndex::Nullable:
return BinaryTypeIndex::Nullable;
case TypeIndex::Function:
return BinaryTypeIndex::Function;
case TypeIndex::AggregateFunction:
return BinaryTypeIndex::AggregateFunction;
case TypeIndex::LowCardinality:
return BinaryTypeIndex::LowCardinality;
case TypeIndex::Map:
return BinaryTypeIndex::Map;
case TypeIndex::Object:
/// Object type will be deprecated and replaced by new implementation. No need to support it here.
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Binary encoding of type Object is not supported");
case TypeIndex::IPv4:
return BinaryTypeIndex::IPv4;
case TypeIndex::IPv6:
return BinaryTypeIndex::IPv6;
case TypeIndex::Variant:
return BinaryTypeIndex::Variant;
case TypeIndex::Dynamic:
return BinaryTypeIndex::Dynamic;
/// JSONPaths is used only during schema inference and cannot be used anywhere else.
case TypeIndex::JSONPaths:
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Binary encoding of type JSONPaths is not supported");
}
}
template <typename T>
void encodeEnumValues(const DataTypePtr & type, WriteBuffer & buf)
{
const auto & enum_type = assert_cast<const DataTypeEnum<T> &>(*type);
const auto & values = enum_type.getValues();
writeVarUInt(values.size(), buf);
for (const auto & [name, value] : values)
{
writeStringBinary(name, buf);
writeBinaryLittleEndian(value, buf);
}
}
template <typename T>
DataTypePtr decodeEnum(ReadBuffer & buf)
{
typename DataTypeEnum<T>::Values values;
size_t size;
readVarUInt(size, buf);
for (size_t i = 0; i != size; ++i)
{
String name;
readStringBinary(name, buf);
T value;
readBinaryLittleEndian(value, buf);
values.emplace_back(name, value);
}
return std::make_shared<DataTypeEnum<T>>(values);
}
template <typename T>
void encodeDecimal(const DataTypePtr & type, WriteBuffer & buf)
{
const auto & decimal_type = assert_cast<const DataTypeDecimal<T> &>(*type);
/// Both precision and scale should be less than 76, so we can decode it in 1 byte.
writeBinary(UInt8(decimal_type.getPrecision()), buf);
writeBinary(UInt8(decimal_type.getScale()), buf);
}
template <typename T>
DataTypePtr decodeDecimal(ReadBuffer & buf)
{
UInt8 precision;
readBinary(precision, buf);
UInt8 scale;
readBinary(scale, buf);
return std::make_shared<DataTypeDecimal<T>>(precision, scale);
}
void encodeAggregateFunction(const String & function_name, const Array & parameters, const DataTypes & arguments_types, WriteBuffer & buf)
{
writeStringBinary(function_name, buf);
writeVarUInt(parameters.size(), buf);
for (const auto & param : parameters)
encodeField(param, buf);
writeVarUInt(arguments_types.size(), buf);
for (const auto & argument_type : arguments_types)
encodeDataType(argument_type, buf);
}
std::tuple<AggregateFunctionPtr, Array, DataTypes> decodeAggregateFunction(ReadBuffer & buf)
{
String function_name;
readStringBinary(function_name, buf);
size_t num_parameters;
readVarUInt(num_parameters, buf);
Array parameters;
parameters.reserve(num_parameters);
for (size_t i = 0; i != num_parameters; ++i)
parameters.push_back(decodeField(buf));
size_t num_arguments;
readVarUInt(num_arguments, buf);
DataTypes arguments_types;
arguments_types.reserve(num_arguments);
for (size_t i = 0; i != num_arguments; ++i)
arguments_types.push_back(decodeDataType(buf));
AggregateFunctionProperties properties;
auto action = NullsAction::EMPTY;
auto function = AggregateFunctionFactory::instance().get(function_name, action, arguments_types, parameters, properties);
return {function, parameters, arguments_types};
}
}
void encodeDataType(const DataTypePtr & type, WriteBuffer & buf)
{
/// First, write the BinaryTypeIndex byte.
auto binary_type_index = getBinaryTypeIndex(type);
buf.write(UInt8(binary_type_index));
/// Then, write additional information depending on the data type.
switch (binary_type_index)
{
case BinaryTypeIndex::DateTimeWithTimezone:
{
const auto & datetime_type = assert_cast<const DataTypeDateTime &>(*type);
writeStringBinary(datetime_type.getTimeZone().getTimeZone(), buf);
break;
}
case BinaryTypeIndex::DateTime64UTC:
{
const auto & datetime64_type = assert_cast<const DataTypeDateTime64 &>(*type);
/// Maximum scale for DateTime64 is 9, so we can write it as 1 byte.
buf.write(UInt8(datetime64_type.getScale()));
break;
}
case BinaryTypeIndex::DateTime64WithTimezone:
{
const auto & datetime64_type = assert_cast<const DataTypeDateTime64 &>(*type);
buf.write(UInt8(datetime64_type.getScale()));
writeStringBinary(datetime64_type.getTimeZone().getTimeZone(), buf);
break;
}
case BinaryTypeIndex::FixedString:
{
const auto & fixed_string_type = assert_cast<const DataTypeFixedString &>(*type);
writeVarUInt(fixed_string_type.getN(), buf);
break;
}
case BinaryTypeIndex::Enum8:
{
encodeEnumValues<Int8>(type, buf);
break;
}
case BinaryTypeIndex::Enum16:
{
encodeEnumValues<Int16>(type, buf);
break;
}
case BinaryTypeIndex::Decimal32:
{
encodeDecimal<Decimal32>(type, buf);
break;
}
case BinaryTypeIndex::Decimal64:
{
encodeDecimal<Decimal64>(type, buf);
break;
}
case BinaryTypeIndex::Decimal128:
{
encodeDecimal<Decimal128>(type, buf);
break;
}
case BinaryTypeIndex::Decimal256:
{
encodeDecimal<Decimal256>(type, buf);
break;
}
case BinaryTypeIndex::Array:
{
const auto & array_type = assert_cast<const DataTypeArray &>(*type);
encodeDataType(array_type.getNestedType(), buf);
break;
}
case BinaryTypeIndex::NamedTuple:
{
const auto & tuple_type = assert_cast<const DataTypeTuple &>(*type);
const auto & types = tuple_type.getElements();
const auto & names = tuple_type.getElementNames();
writeVarUInt(types.size(), buf);
for (size_t i = 0; i != types.size(); ++i)
{
writeStringBinary(names[i], buf);
encodeDataType(types[i], buf);
}
break;
}
case BinaryTypeIndex::UnnamedTuple:
{
const auto & tuple_type = assert_cast<const DataTypeTuple &>(*type);
const auto & element_types = tuple_type.getElements();
writeVarUInt(element_types.size(), buf);
for (const auto & element_type : element_types)
encodeDataType(element_type, buf);
break;
}
case BinaryTypeIndex::Interval:
{
const auto & interval_type = assert_cast<const DataTypeInterval &>(*type);
writeBinary(UInt8(interval_type.getKind().kind), buf);
break;
}
case BinaryTypeIndex::Nullable:
{
const auto & nullable_type = assert_cast<const DataTypeNullable &>(*type);
encodeDataType(nullable_type.getNestedType(), buf);
break;
}
case BinaryTypeIndex::Function:
{
const auto & function_type = assert_cast<const DataTypeFunction &>(*type);
const auto & arguments_types = function_type.getArgumentTypes();
const auto & return_type = function_type.getReturnType();
writeVarUInt(arguments_types.size(), buf);
for (const auto & argument_type : arguments_types)
encodeDataType(argument_type, buf);
encodeDataType(return_type, buf);
break;
}
case BinaryTypeIndex::LowCardinality:
{
const auto & low_cardinality_type = assert_cast<const DataTypeLowCardinality &>(*type);
encodeDataType(low_cardinality_type.getDictionaryType(), buf);
break;
}
case BinaryTypeIndex::Map:
{
const auto & map_type = assert_cast<const DataTypeMap &>(*type);
encodeDataType(map_type.getKeyType(), buf);
encodeDataType(map_type.getValueType(), buf);
break;
}
case BinaryTypeIndex::Variant:
{
const auto & variant_type = assert_cast<const DataTypeVariant &>(*type);
const auto & variants = variant_type.getVariants();
writeVarUInt(variants.size(), buf);
for (const auto & variant : variants)
encodeDataType(variant, buf);
break;
}
case BinaryTypeIndex::Dynamic:
{
const auto & dynamic_type = assert_cast<const DataTypeDynamic &>(*type);
/// Maximum number of dynamic types is 255, we can write it as 1 byte.
writeBinary(UInt8(dynamic_type.getMaxDynamicTypes()), buf);
break;
}
case BinaryTypeIndex::AggregateFunction:
{
const auto & aggregate_function_type = assert_cast<const DataTypeAggregateFunction &>(*type);
writeVarUInt(aggregate_function_type.getVersion(), buf);
encodeAggregateFunction(aggregate_function_type.getFunctionName(), aggregate_function_type.getParameters(), aggregate_function_type.getArgumentsDataTypes(), buf);
break;
}
case BinaryTypeIndex::SimpleAggregateFunction:
{
const auto & simple_aggregate_function_type = assert_cast<const DataTypeCustomSimpleAggregateFunction &>(*type->getCustomName());
encodeAggregateFunction(simple_aggregate_function_type.getFunctionName(), simple_aggregate_function_type.getParameters(), simple_aggregate_function_type.getArgumentsDataTypes(), buf);
break;
}
case BinaryTypeIndex::Nested:
{
const auto & nested_type = assert_cast<const DataTypeNestedCustomName &>(*type->getCustomName());
const auto & elements = nested_type.getElements();
const auto & names = nested_type.getNames();
writeVarUInt(elements.size(), buf);
for (size_t i = 0; i != elements.size(); ++i)
{
writeStringBinary(names[i], buf);
encodeDataType(elements[i], buf);
}
break;
}
case BinaryTypeIndex::Custom:
{
const auto & type_name = type->getName();
writeStringBinary(type_name, buf);
break;
}
default:
break;
}
}
String encodeDataType(const DataTypePtr & type)
{
WriteBufferFromOwnString buf;
encodeDataType(type, buf);
return buf.str();
}
DataTypePtr decodeDataType(ReadBuffer & buf)
{
UInt8 type;
readBinary(type, buf);
switch (BinaryTypeIndex(type))
{
case BinaryTypeIndex::Nothing:
return std::make_shared<DataTypeNothing>();
case BinaryTypeIndex::UInt8:
return std::make_shared<DataTypeUInt8>();
case BinaryTypeIndex::Bool:
return DataTypeFactory::instance().get("Bool");
case BinaryTypeIndex::UInt16:
return std::make_shared<DataTypeUInt16>();
case BinaryTypeIndex::UInt32:
return std::make_shared<DataTypeUInt32>();
case BinaryTypeIndex::UInt64:
return std::make_shared<DataTypeUInt64>();
case BinaryTypeIndex::UInt128:
return std::make_shared<DataTypeUInt128>();
case BinaryTypeIndex::UInt256:
return std::make_shared<DataTypeUInt256>();
case BinaryTypeIndex::Int8:
return std::make_shared<DataTypeInt8>();
case BinaryTypeIndex::Int16:
return std::make_shared<DataTypeInt16>();
case BinaryTypeIndex::Int32:
return std::make_shared<DataTypeInt32>();
case BinaryTypeIndex::Int64:
return std::make_shared<DataTypeInt64>();
case BinaryTypeIndex::Int128:
return std::make_shared<DataTypeInt128>();
case BinaryTypeIndex::Int256:
return std::make_shared<DataTypeInt256>();
case BinaryTypeIndex::Float32:
return std::make_shared<DataTypeFloat32>();
case BinaryTypeIndex::Float64:
return std::make_shared<DataTypeFloat64>();
case BinaryTypeIndex::Date:
return std::make_shared<DataTypeDate>();
case BinaryTypeIndex::Date32:
return std::make_shared<DataTypeDate32>();
case BinaryTypeIndex::DateTimeUTC:
return std::make_shared<DataTypeDateTime>();
case BinaryTypeIndex::DateTimeWithTimezone:
{
String time_zone;
readStringBinary(time_zone, buf);
return std::make_shared<DataTypeDateTime>(time_zone);
}
case BinaryTypeIndex::DateTime64UTC:
{
UInt8 scale;
readBinary(scale, buf);
return std::make_shared<DataTypeDateTime64>(scale);
}
case BinaryTypeIndex::DateTime64WithTimezone:
{
UInt8 scale;
readBinary(scale, buf);
String time_zone;
readStringBinary(time_zone, buf);
return std::make_shared<DataTypeDateTime64>(scale, time_zone);
}
case BinaryTypeIndex::String:
return std::make_shared<DataTypeString>();
case BinaryTypeIndex::FixedString:
{
UInt64 size;
readVarUInt(size, buf);
return std::make_shared<DataTypeFixedString>(size);
}
case BinaryTypeIndex::Enum8:
return decodeEnum<Int8>(buf);
case BinaryTypeIndex::Enum16:
return decodeEnum<Int16>(buf);
case BinaryTypeIndex::Decimal32:
return decodeDecimal<Decimal32>(buf);
case BinaryTypeIndex::Decimal64:
return decodeDecimal<Decimal64>(buf);
case BinaryTypeIndex::Decimal128:
return decodeDecimal<Decimal128>(buf);
case BinaryTypeIndex::Decimal256:
return decodeDecimal<Decimal256>(buf);
case BinaryTypeIndex::UUID:
return std::make_shared<DataTypeUUID>();
case BinaryTypeIndex::Array:
return std::make_shared<DataTypeArray>(decodeDataType(buf));
case BinaryTypeIndex::NamedTuple:
{
size_t size;
readVarUInt(size, buf);
DataTypes elements;
elements.reserve(size);
Names names;
names.reserve(size);
for (size_t i = 0; i != size; ++i)
{
names.emplace_back();
readStringBinary(names.back(), buf);
elements.push_back(decodeDataType(buf));
}
return std::make_shared<DataTypeTuple>(elements, names);
}
case BinaryTypeIndex::UnnamedTuple:
{
size_t size;
readVarUInt(size, buf);
DataTypes elements;
elements.reserve(size);
for (size_t i = 0; i != size; ++i)
elements.push_back(decodeDataType(buf));
return std::make_shared<DataTypeTuple>(elements);
}
case BinaryTypeIndex::Set:
return std::make_shared<DataTypeSet>();
case BinaryTypeIndex::Interval:
{
UInt8 kind;
readBinary(kind, buf);
return std::make_shared<DataTypeInterval>(IntervalKind(IntervalKind::Kind(kind)));
}
case BinaryTypeIndex::Nullable:
return std::make_shared<DataTypeNullable>(decodeDataType(buf));
case BinaryTypeIndex::Function:
{
size_t arguments_size;
readVarUInt(arguments_size, buf);
DataTypes arguments;
arguments.reserve(arguments_size);
for (size_t i = 0; i != arguments_size; ++i)
arguments.push_back(decodeDataType(buf));
auto return_type = decodeDataType(buf);
return std::make_shared<DataTypeFunction>(arguments, return_type);
}
case BinaryTypeIndex::LowCardinality:
return std::make_shared<DataTypeLowCardinality>(decodeDataType(buf));
case BinaryTypeIndex::Map:
{
auto key_type = decodeDataType(buf);
auto value_type = decodeDataType(buf);
return std::make_shared<DataTypeMap>(key_type, value_type);
}
case BinaryTypeIndex::IPv4:
return std::make_shared<DataTypeIPv4>();
case BinaryTypeIndex::IPv6:
return std::make_shared<DataTypeIPv6>();
case BinaryTypeIndex::Variant:
{
size_t size;
readVarUInt(size, buf);
DataTypes variants;
variants.reserve(size);
for (size_t i = 0; i != size; ++i)
variants.push_back(decodeDataType(buf));
return std::make_shared<DataTypeVariant>(variants);
}
case BinaryTypeIndex::Dynamic:
{
UInt8 max_dynamic_types;
readBinary(max_dynamic_types, buf);
return std::make_shared<DataTypeDynamic>(max_dynamic_types);
}
case BinaryTypeIndex::AggregateFunction:
{
size_t version;
readVarUInt(version, buf);
const auto & [function, parameters, arguments_types] = decodeAggregateFunction(buf);
return std::make_shared<DataTypeAggregateFunction>(function, arguments_types, parameters, version);
}
case BinaryTypeIndex::SimpleAggregateFunction:
{
const auto & [function, parameters, arguments_types] = decodeAggregateFunction(buf);
return createSimpleAggregateFunctionType(function, arguments_types, parameters);
}
case BinaryTypeIndex::Nested:
{
size_t size;
readVarUInt(size, buf);
Names names;
names.reserve(size);
DataTypes elements;
elements.reserve(size);
for (size_t i = 0; i != size; ++i)
{
names.emplace_back();
readStringBinary(names.back(), buf);
elements.push_back(decodeDataType(buf));
}
return createNested(elements, names);
}
case BinaryTypeIndex::Custom:
{
String type_name;
readStringBinary(type_name, buf);
return DataTypeFactory::instance().get(type_name);
}
}
throw Exception(ErrorCodes::INCORRECT_DATA, "Unknown type code: {0:#04x}", UInt64(type));
}
DataTypePtr decodeDataType(const String & data)
{
ReadBufferFromString buf(data);
return decodeDataType(buf);
}
}

View File

@ -0,0 +1,118 @@
#pragma once
#include <DataTypes/IDataType.h>
namespace DB
{
/**
Binary encoding for ClickHouse data types:
|------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| ClickHouse data type | Binary encoding |
|------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Nothing | 0x00 |
| UInt8 | 0x01 |
| UInt16 | 0x02 |
| UInt32 | 0x03 |
| UInt64 | 0x04 |
| UInt128 | 0x05 |
| UInt256 | 0x06 |
| Int8 | 0x07 |
| Int16 | 0x08 |
| Int32 | 0x09 |
| Int64 | 0x0A |
| Int128 | 0x0B |
| Int256 | 0x0C |
| Float32 | 0x0D |
| Float64 | 0x0E |
| Date | 0x0F |
| Date32 | 0x10 |
| DateTime | 0x11 |
| DateTime(time_zone) | 0x12<var_uint_time_zone_name_size><time_zone_name_data> |
| DateTime64(P) | 0x13<uint8_precision> |
| DateTime64(P, time_zone) | 0x14<uint8_precision><var_uint_time_zone_name_size><time_zone_name_data> |
| String | 0x15 |
| FixedString(N) | 0x16<var_uint_size> |
| Enum8 | 0x17<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int8_value_1>...<var_uint_name_size_N><name_data_N><int8_value_N> |
| Enum16 | 0x18<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int16_little_endian_value_1>...><var_uint_name_size_N><name_data_N><int16_little_endian_value_N> |
| Decimal32(P, S) | 0x19<uint8_precision><uint8_scale> |
| Decimal64(P, S) | 0x1A<uint8_precision><uint8_scale> |
| Decimal128(P, S) | 0x1B<uint8_precision><uint8_scale> |
| Decimal256(P, S) | 0x1C<uint8_precision><uint8_scale> |
| UUID | 0x1D |
| Array(T) | 0x1E<nested_type_encoding> |
| Tuple(T1, ..., TN) | 0x1F<var_uint_number_of_elements><nested_type_encoding_1>...<nested_type_encoding_N> |
| Tuple(name1 T1, ..., nameN TN) | 0x20<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N> |
| Set | 0x21 |
| Interval | 0x22<interval_kind> |
| Nullable(T) | 0x23<nested_type_encoding> |
| Function | 0x24<var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N><return_type_encoding> |
| AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x25<var_uint_version><var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N> |
| LowCardinality(T) | 0x26<nested_type_encoding> |
| Map(K, V) | 0x27<key_type_encoding><value_type_encoding> |
| IPv4 | 0x28 |
| IPv6 | 0x29 |
| Variant(T1, ..., TN) | 0x2A<var_uint_number_of_variants><variant_type_encoding_1>...<variant_type_encoding_N> |
| Dynamic(max_types=N) | 0x2B<uint8_max_types> |
| Custom type (Ring, Polygon, etc) | 0x2C<var_uint_type_name_size><type_name_data> |
| Bool | 0x2D |
| SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x2E<var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N> |
| Nested(name1 T1, ..., nameN TN) | 0x2F<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N> |
|------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
Interval kind binary encoding:
|---------------|-----------------|
| Interval kind | Binary encoding |
|---------------|-----------------|
| Nanosecond | 0x00 |
| Microsecond | 0x01 |
| Millisecond | 0x02 |
| Second | 0x03 |
| Minute | 0x04 |
| Hour | 0x05 |
| Day | 0x06 |
| Week | 0x07 |
| Month | 0x08 |
| Quarter | 0x09 |
| Year | 0x1A |
|---------------|-----------------|
Aggregate function parameter binary encoding (binary encoding of a Field, see src/Common/FieldBinaryEncoding.h):
|------------------------|------------------------------------------------------------------------------------------------------------------------------|
| Parameter type | Binary encoding |
|------------------------|------------------------------------------------------------------------------------------------------------------------------|
| Null | 0x00 |
| UInt64 | 0x01<var_uint_value> |
| Int64 | 0x02<var_int_value> |
| UInt128 | 0x03<uint128_little_endian_value> |
| Int128 | 0x04<int128_little_endian_value> |
| UInt128 | 0x05<uint128_little_endian_value> |
| Int128 | 0x06<int128_little_endian_value> |
| Float64 | 0x07<float64_little_endian_value> |
| Decimal32 | 0x08<var_uint_scale><int32_little_endian_value> |
| Decimal64 | 0x09<var_uint_scale><int64_little_endian_value> |
| Decimal128 | 0x0A<var_uint_scale><int128_little_endian_value> |
| Decimal256 | 0x0B<var_uint_scale><int256_little_endian_value> |
| String | 0x0C<var_uint_size><data> |
| Array | 0x0D<var_uint_size><value_encoding_1>...<value_encoding_N> |
| Tuple | 0x0E<var_uint_size><value_encoding_1>...<value_encoding_N> |
| Map | 0x0F<var_uint_size><key_encoding_1><value_encoding_1>...<key_endoding_N><value_encoding_N> |
| IPv4 | 0x10<uint32_little_endian_value> |
| IPv6 | 0x11<uint128_little_endian_value> |
| UUID | 0x12<uuid_value> |
| Bool | 0x13<bool_value> |
| Object | 0x14<var_uint_size><var_uint_key_size_1><key_data_1><value_encoding_1>...<var_uint_key_size_N><key_data_N><value_encoding_N> |
| AggregateFunctionState | 0x15<var_uint_name_size><name_data><var_uint_data_size><data> |
| Negative infinity | 0xFE |
| Positive infinity | 0xFF |
|------------------------|------------------------------------------------------------------------------------------------------------------------------|
*/
String encodeDataType(const DataTypePtr & type);
void encodeDataType(const DataTypePtr & type, WriteBuffer & buf);
DataTypePtr decodeDataType(const String & data);
DataTypePtr decodeDataType(ReadBuffer & buf);
}

Some files were not shown because too many files have changed in this diff Show More