mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge branch 'master' into hive_style_partitioning
This commit is contained in:
commit
052520fa03
2
.github/workflows/pull_request.yml
vendored
2
.github/workflows/pull_request.yml
vendored
@ -172,7 +172,7 @@ jobs:
|
||||
################################# Stage Final #################################
|
||||
#
|
||||
FinishCheck:
|
||||
if: ${{ !cancelled() }}
|
||||
if: ${{ !failure() }}
|
||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdlib>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
|
@ -108,6 +108,14 @@ struct make_unsigned // NOLINT(readability-identifier-naming)
|
||||
using type = std::make_unsigned_t<T>;
|
||||
};
|
||||
|
||||
template <> struct make_unsigned<Int8> { using type = UInt8; };
|
||||
template <> struct make_unsigned<UInt8> { using type = UInt8; };
|
||||
template <> struct make_unsigned<Int16> { using type = UInt16; };
|
||||
template <> struct make_unsigned<UInt16> { using type = UInt16; };
|
||||
template <> struct make_unsigned<Int32> { using type = UInt32; };
|
||||
template <> struct make_unsigned<UInt32> { using type = UInt32; };
|
||||
template <> struct make_unsigned<Int64> { using type = UInt64; };
|
||||
template <> struct make_unsigned<UInt64> { using type = UInt64; };
|
||||
template <> struct make_unsigned<Int128> { using type = UInt128; };
|
||||
template <> struct make_unsigned<UInt128> { using type = UInt128; };
|
||||
template <> struct make_unsigned<Int256> { using type = UInt256; };
|
||||
@ -121,6 +129,14 @@ struct make_signed // NOLINT(readability-identifier-naming)
|
||||
using type = std::make_signed_t<T>;
|
||||
};
|
||||
|
||||
template <> struct make_signed<Int8> { using type = Int8; };
|
||||
template <> struct make_signed<UInt8> { using type = Int8; };
|
||||
template <> struct make_signed<Int16> { using type = Int16; };
|
||||
template <> struct make_signed<UInt16> { using type = Int16; };
|
||||
template <> struct make_signed<Int32> { using type = Int32; };
|
||||
template <> struct make_signed<UInt32> { using type = Int32; };
|
||||
template <> struct make_signed<Int64> { using type = Int64; };
|
||||
template <> struct make_signed<UInt64> { using type = Int64; };
|
||||
template <> struct make_signed<Int128> { using type = Int128; };
|
||||
template <> struct make_signed<UInt128> { using type = Int128; };
|
||||
template <> struct make_signed<Int256> { using type = Int256; };
|
||||
|
9
base/base/isSharedPtrUnique.h
Normal file
9
base/base/isSharedPtrUnique.h
Normal file
@ -0,0 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
template <typename T>
|
||||
bool isSharedPtrUnique(const std::shared_ptr<T> & ptr)
|
||||
{
|
||||
return ptr.use_count() == 1;
|
||||
}
|
@ -232,7 +232,7 @@ void Foundation_API format(
|
||||
const Any & value10);
|
||||
|
||||
|
||||
void Foundation_API format(std::string & result, const std::string & fmt, const std::vector<Any> & values);
|
||||
void Foundation_API formatVector(std::string & result, const std::string & fmt, const std::vector<Any> & values);
|
||||
/// Supports a variable number of arguments and is used by
|
||||
/// all other variants of format().
|
||||
|
||||
|
@ -21,6 +21,8 @@
|
||||
#include "Poco/AtomicCounter.h"
|
||||
#include "Poco/Foundation.h"
|
||||
|
||||
#include <atomic>
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
|
@ -51,8 +51,8 @@ namespace
|
||||
}
|
||||
if (width != 0) str.width(width);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void parsePrec(std::ostream& str, std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
||||
{
|
||||
if (itFmt != endFmt && *itFmt == '.')
|
||||
@ -67,7 +67,7 @@ namespace
|
||||
if (prec >= 0) str.precision(prec);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
char parseMod(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
||||
{
|
||||
char mod = 0;
|
||||
@ -77,13 +77,13 @@ namespace
|
||||
{
|
||||
case 'l':
|
||||
case 'h':
|
||||
case 'L':
|
||||
case 'L':
|
||||
case '?': mod = *itFmt++; break;
|
||||
}
|
||||
}
|
||||
return mod;
|
||||
}
|
||||
|
||||
|
||||
std::size_t parseIndex(std::string::const_iterator& itFmt, const std::string::const_iterator& endFmt)
|
||||
{
|
||||
int index = 0;
|
||||
@ -110,8 +110,8 @@ namespace
|
||||
case 'f': str << std::fixed; break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void writeAnyInt(std::ostream& str, const Any& any)
|
||||
{
|
||||
if (any.type() == typeid(char))
|
||||
@ -201,7 +201,7 @@ namespace
|
||||
str << RefAnyCast<std::string>(*itVal++);
|
||||
break;
|
||||
case 'z':
|
||||
str << AnyCast<std::size_t>(*itVal++);
|
||||
str << AnyCast<std::size_t>(*itVal++);
|
||||
break;
|
||||
case 'I':
|
||||
case 'D':
|
||||
@ -303,7 +303,7 @@ void format(std::string& result, const std::string& fmt, const Any& value)
|
||||
{
|
||||
std::vector<Any> args;
|
||||
args.push_back(value);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -312,7 +312,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
std::vector<Any> args;
|
||||
args.push_back(value1);
|
||||
args.push_back(value2);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -322,7 +322,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value1);
|
||||
args.push_back(value2);
|
||||
args.push_back(value3);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -333,7 +333,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value2);
|
||||
args.push_back(value3);
|
||||
args.push_back(value4);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -345,7 +345,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value3);
|
||||
args.push_back(value4);
|
||||
args.push_back(value5);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -358,7 +358,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value4);
|
||||
args.push_back(value5);
|
||||
args.push_back(value6);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -372,7 +372,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value5);
|
||||
args.push_back(value6);
|
||||
args.push_back(value7);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -387,7 +387,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value6);
|
||||
args.push_back(value7);
|
||||
args.push_back(value8);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -403,7 +403,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value7);
|
||||
args.push_back(value8);
|
||||
args.push_back(value9);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
@ -420,16 +420,16 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
||||
args.push_back(value8);
|
||||
args.push_back(value9);
|
||||
args.push_back(value10);
|
||||
format(result, fmt, args);
|
||||
formatVector(result, fmt, args);
|
||||
}
|
||||
|
||||
|
||||
void format(std::string& result, const std::string& fmt, const std::vector<Any>& values)
|
||||
void formatVector(std::string& result, const std::string& fmt, const std::vector<Any>& values)
|
||||
{
|
||||
std::string::const_iterator itFmt = fmt.begin();
|
||||
std::string::const_iterator endFmt = fmt.end();
|
||||
std::vector<Any>::const_iterator itVal = values.begin();
|
||||
std::vector<Any>::const_iterator endVal = values.end();
|
||||
std::vector<Any>::const_iterator endVal = values.end();
|
||||
while (itFmt != endFmt)
|
||||
{
|
||||
switch (*itFmt)
|
||||
|
@ -57,7 +57,7 @@ std::string ObjectId::toString(const std::string& fmt) const
|
||||
|
||||
for (int i = 0; i < 12; ++i)
|
||||
{
|
||||
s += format(fmt, (unsigned int) _id[i]);
|
||||
s += Poco::format(fmt, (unsigned int) _id[i]);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
@ -43,9 +43,9 @@ namespace Poco {
|
||||
namespace MongoDB {
|
||||
|
||||
|
||||
static const std::string keyCursor {"cursor"};
|
||||
static const std::string keyFirstBatch {"firstBatch"};
|
||||
static const std::string keyNextBatch {"nextBatch"};
|
||||
[[ maybe_unused ]] static const std::string keyCursor {"cursor"};
|
||||
[[ maybe_unused ]] static const std::string keyFirstBatch {"firstBatch"};
|
||||
[[ maybe_unused ]] static const std::string keyNextBatch {"nextBatch"};
|
||||
|
||||
static Poco::Int64 cursorIdFromResponse(const MongoDB::Document& doc);
|
||||
|
||||
@ -131,7 +131,7 @@ OpMsgMessage& OpMsgCursor::next(Connection& connection)
|
||||
connection.readResponse(_response);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
#endif
|
||||
{
|
||||
_response.clear();
|
||||
_query.setCursor(_cursorID, _batchSize);
|
||||
|
@ -79,7 +79,7 @@ namespace Net
|
||||
/// Returns the value of the first name-value pair with the given name.
|
||||
/// If no value with the given name has been found, the defaultValue is returned.
|
||||
|
||||
const std::vector<std::reference_wrapper<const std::string>> getAll(const std::string & name) const;
|
||||
std::vector<std::string> getAll(const std::string & name) const;
|
||||
/// Returns all values of all name-value pairs with the given name.
|
||||
///
|
||||
/// Returns an empty vector if there are no name-value pairs with the given name.
|
||||
|
@ -17,9 +17,9 @@
|
||||
#include "Poco/NumberFormatter.h"
|
||||
#include "Poco/NumberParser.h"
|
||||
#include "Poco/String.h"
|
||||
#include <charconv>
|
||||
#include <format>
|
||||
|
||||
|
||||
using Poco::NumberFormatter;
|
||||
using Poco::NumberParser;
|
||||
using Poco::icompare;
|
||||
@ -75,7 +75,7 @@ void HTTPMessage::setContentLength(std::streamsize length)
|
||||
erase(CONTENT_LENGTH);
|
||||
}
|
||||
|
||||
|
||||
|
||||
std::streamsize HTTPMessage::getContentLength() const
|
||||
{
|
||||
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
|
||||
@ -98,7 +98,7 @@ void HTTPMessage::setContentLength64(Poco::Int64 length)
|
||||
erase(CONTENT_LENGTH);
|
||||
}
|
||||
|
||||
|
||||
|
||||
Poco::Int64 HTTPMessage::getContentLength64() const
|
||||
{
|
||||
const std::string& contentLength = get(CONTENT_LENGTH, EMPTY);
|
||||
@ -133,13 +133,13 @@ void HTTPMessage::setChunkedTransferEncoding(bool flag)
|
||||
setTransferEncoding(IDENTITY_TRANSFER_ENCODING);
|
||||
}
|
||||
|
||||
|
||||
|
||||
bool HTTPMessage::getChunkedTransferEncoding() const
|
||||
{
|
||||
return icompare(getTransferEncoding(), CHUNKED_TRANSFER_ENCODING) == 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void HTTPMessage::setContentType(const std::string& mediaType)
|
||||
{
|
||||
if (mediaType.empty())
|
||||
@ -154,7 +154,7 @@ void HTTPMessage::setContentType(const MediaType& mediaType)
|
||||
setContentType(mediaType.toString());
|
||||
}
|
||||
|
||||
|
||||
|
||||
const std::string& HTTPMessage::getContentType() const
|
||||
{
|
||||
return get(CONTENT_TYPE, UNKNOWN_CONTENT_TYPE);
|
||||
|
@ -102,9 +102,9 @@ const std::string& NameValueCollection::get(const std::string& name, const std::
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
const std::vector<std::reference_wrapper<const std::string>> NameValueCollection::getAll(const std::string& name) const
|
||||
std::vector<std::string> NameValueCollection::getAll(const std::string& name) const
|
||||
{
|
||||
std::vector<std::reference_wrapper<const std::string>> values;
|
||||
std::vector<std::string> values;
|
||||
for (ConstIterator it = _map.find(name); it != _map.end(); it++)
|
||||
if (it->first == name)
|
||||
values.push_back(it->second);
|
||||
|
2
contrib/avro
vendored
2
contrib/avro
vendored
@ -1 +1 @@
|
||||
Subproject commit d43acc84d3d455b016f847d6666fbc3cd27f16a9
|
||||
Subproject commit 545e7002683cbc2198164d93088ac8e4955b4628
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
||||
Subproject commit 77b2737a709d43d8c6895e3f03ca62b00bd9201c
|
||||
Subproject commit f5b7fdc2dff09ada06dbf6c75df298fb40f898df
|
@ -54,7 +54,6 @@ set(SRCS
|
||||
"${LIBPQ_SOURCE_DIR}/port/pgstrcasecmp.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/thread.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/path.c"
|
||||
"${LIBPQ_SOURCE_DIR}/port/explicit_bzero.c"
|
||||
)
|
||||
|
||||
add_library(_libpq ${SRCS})
|
||||
|
@ -284,6 +284,11 @@ function run_tests
|
||||
NPROC=1
|
||||
fi
|
||||
|
||||
export CLICKHOUSE_CONFIG_DIR=$FASTTEST_DATA
|
||||
export CLICKHOUSE_CONFIG="$FASTTEST_DATA/config.xml"
|
||||
export CLICKHOUSE_USER_FILES="$FASTTEST_DATA/user_files"
|
||||
export CLICKHOUSE_SCHEMA_FILES="$FASTTEST_DATA/format_schemas"
|
||||
|
||||
local test_opts=(
|
||||
--hung-check
|
||||
--fast-tests-only
|
||||
|
@ -9,19 +9,28 @@ set -e -x -a
|
||||
MAX_RUN_TIME=${MAX_RUN_TIME:-10800}
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 10800 : MAX_RUN_TIME))
|
||||
|
||||
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
|
||||
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
|
||||
|
||||
# disable for now
|
||||
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
|
||||
|
||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] || [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
|
||||
fi
|
||||
|
||||
# Choose random timezone for this test run.
|
||||
#
|
||||
# NOTE: that clickhouse-test will randomize session_timezone by itself as well
|
||||
# (it will choose between default server timezone and something specific).
|
||||
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
|
||||
echo "Choosen random timezone $TZ"
|
||||
echo "Chosen random timezone $TZ"
|
||||
ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
||||
|
||||
dpkg -i package_folder/clickhouse-common-static_*.deb
|
||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||
# Accept failure in the next two commands until 24.4 is released (for compatibility and Bugfix validation run)
|
||||
dpkg -i package_folder/clickhouse-odbc-bridge_*.deb || true
|
||||
dpkg -i package_folder/clickhouse-library-bridge_*.deb || true
|
||||
dpkg -i package_folder/clickhouse-odbc-bridge_*.deb
|
||||
dpkg -i package_folder/clickhouse-library-bridge_*.deb
|
||||
dpkg -i package_folder/clickhouse-server_*.deb
|
||||
dpkg -i package_folder/clickhouse-client_*.deb
|
||||
|
||||
@ -58,12 +67,6 @@ if [[ -n "$BUGFIX_VALIDATE_CHECK" ]] && [[ "$BUGFIX_VALIDATE_CHECK" -eq 1 ]]; th
|
||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo sed -i "s|<object_storage_type>azure<|<object_storage_type>azure_blob_storage<|" /etc/clickhouse-server/config.d/azure_storage_conf.xml
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo sed -i "s|<object_storage_type>local<|<object_storage_type>local_blob_storage<|" /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
|
||||
function remove_keeper_config()
|
||||
{
|
||||
sudo sed -i "/<$1>$2<\/$1>/d" /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
@ -96,10 +99,57 @@ if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
mkdir -p /var/run/clickhouse-server
|
||||
fi
|
||||
|
||||
# Run a CH instance to execute sequential tests on it in parallel with all other tests.
|
||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
||||
mkdir -p /var/run/clickhouse-server3 /etc/clickhouse-server3 /var/lib/clickhouse3
|
||||
cp -r -L /etc/clickhouse-server/* /etc/clickhouse-server3/
|
||||
|
||||
sudo chown clickhouse:clickhouse /var/run/clickhouse-server3 /var/lib/clickhouse3 /etc/clickhouse-server3/
|
||||
sudo chown -R clickhouse:clickhouse /etc/clickhouse-server3/*
|
||||
|
||||
function replace(){
|
||||
sudo find /etc/clickhouse-server3/ -type f -name '*.xml' -exec sed -i "$1" {} \;
|
||||
}
|
||||
|
||||
replace "s|<port>9000</port>|<port>19000</port>|g"
|
||||
replace "s|<port>9440</port>|<port>19440</port>|g"
|
||||
replace "s|<port>9988</port>|<port>19988</port>|g"
|
||||
replace "s|<port>9234</port>|<port>19234</port>|g"
|
||||
replace "s|<port>9181</port>|<port>19181</port>|g"
|
||||
replace "s|<https_port>8443</https_port>|<https_port>18443</https_port>|g"
|
||||
replace "s|<tcp_port>9000</tcp_port>|<tcp_port>19000</tcp_port>|g"
|
||||
replace "s|<tcp_port>9181</tcp_port>|<tcp_port>19181</tcp_port>|g"
|
||||
replace "s|<tcp_port_secure>9440</tcp_port_secure>|<tcp_port_secure>19440</tcp_port_secure>|g"
|
||||
replace "s|<tcp_with_proxy_port>9010</tcp_with_proxy_port>|<tcp_with_proxy_port>19010</tcp_with_proxy_port>|g"
|
||||
replace "s|<mysql_port>9004</mysql_port>|<mysql_port>19004</mysql_port>|g"
|
||||
replace "s|<postgresql_port>9005</postgresql_port>|<postgresql_port>19005</postgresql_port>|g"
|
||||
replace "s|<interserver_http_port>9009</interserver_http_port>|<interserver_http_port>19009</interserver_http_port>|g"
|
||||
replace "s|8123|18123|g"
|
||||
replace "s|/var/lib/clickhouse/|/var/lib/clickhouse3/|g"
|
||||
replace "s|/etc/clickhouse-server/|/etc/clickhouse-server3/|g"
|
||||
# distributed cache
|
||||
replace "s|<tcp_port>10001</tcp_port>|<tcp_port>10003</tcp_port>|g"
|
||||
replace "s|<tcp_port>10002</tcp_port>|<tcp_port>10004</tcp_port>|g"
|
||||
|
||||
sudo -E -u clickhouse /usr/bin/clickhouse server --daemon --config /etc/clickhouse-server3/config.xml \
|
||||
--pid-file /var/run/clickhouse-server3/clickhouse-server.pid \
|
||||
-- --path /var/lib/clickhouse3/ --logger.stderr /var/log/clickhouse-server/stderr3.log \
|
||||
--logger.log /var/log/clickhouse-server/clickhouse-server3.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server3.err.log \
|
||||
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
|
||||
--prometheus.port 19988 --keeper_server.raft_configuration.server.port 19234 --keeper_server.tcp_port 19181 \
|
||||
--mysql_port 19004 --postgresql_port 19005
|
||||
|
||||
for _ in {1..100}
|
||||
do
|
||||
clickhouse-client --port 19000 --query "SELECT 1" && break
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
|
||||
# simplest way to forward env variables to server
|
||||
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
|
||||
|
||||
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_2/</filesystem_caches_path>|" /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
|
||||
@ -136,7 +186,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
sudo cat /etc/clickhouse-server1/config.d/filesystem_caches_path.xml \
|
||||
| sed "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" \
|
||||
> /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp
|
||||
@ -216,15 +266,15 @@ function run_tests()
|
||||
ADDITIONAL_OPTIONS+=('--no-random-merge-tree-settings')
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
ADDITIONAL_OPTIONS+=('--shared-catalog')
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||
# Too many tests fail for DatabaseReplicated in parallel.
|
||||
ADDITIONAL_OPTIONS+=('--jobs')
|
||||
ADDITIONAL_OPTIONS+=('2')
|
||||
ADDITIONAL_OPTIONS+=('3')
|
||||
elif [[ 1 == $(clickhouse-client --query "SELECT value LIKE '%SANITIZE_COVERAGE%' FROM system.build_options WHERE name = 'CXX_FLAGS'") ]]; then
|
||||
# Coverage on a per-test basis could only be collected sequentially.
|
||||
# Do not set the --jobs parameter.
|
||||
@ -232,7 +282,11 @@ function run_tests()
|
||||
else
|
||||
# All other configurations are OK.
|
||||
ADDITIONAL_OPTIONS+=('--jobs')
|
||||
ADDITIONAL_OPTIONS+=('8')
|
||||
ADDITIONAL_OPTIONS+=('5')
|
||||
fi
|
||||
|
||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
||||
ADDITIONAL_OPTIONS+=('--run-sequential-tests-in-parallel')
|
||||
fi
|
||||
|
||||
if [[ -n "$RUN_BY_HASH_NUM" ]] && [[ -n "$RUN_BY_HASH_TOTAL" ]]; then
|
||||
@ -256,7 +310,7 @@ function run_tests()
|
||||
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
|
||||
|
||||
set +e
|
||||
timeout -s TERM --preserve-status 120m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||
| ts '%Y-%m-%d %H:%M:%S' \
|
||||
| tee -a test_output/test_result.txt
|
||||
@ -267,7 +321,7 @@ export -f run_tests
|
||||
|
||||
|
||||
# This should be enough to setup job and collect artifacts
|
||||
TIMEOUT=$((MAX_RUN_TIME - 300))
|
||||
TIMEOUT=$((MAX_RUN_TIME - 600))
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
# We don't run tests with Ordinary database in PRs, only in master.
|
||||
# So run new/changed tests with Ordinary at least once in flaky check.
|
||||
@ -296,7 +350,7 @@ do
|
||||
err=$(clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.gz' format TSVWithNamesAndTypes")
|
||||
echo "$err"
|
||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
|
||||
echo "$err"
|
||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||
@ -305,7 +359,7 @@ do
|
||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
err=$( { clickhouse-client --port 19000 -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst; } 2>&1 )
|
||||
echo "$err"
|
||||
[[ "0" != "${#err}" ]] && failed_to_save_logs=1
|
||||
@ -316,12 +370,17 @@ done
|
||||
# Why do we read data with clickhouse-local?
|
||||
# Because it's the simplest way to read it when server has crashed.
|
||||
sudo clickhouse stop ||:
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
|
||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server3 ||:
|
||||
fi
|
||||
|
||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
|
||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server2 ||:
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
|
||||
fi
|
||||
|
||||
@ -329,6 +388,12 @@ rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server.log ||:
|
||||
rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log ||:
|
||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
|
||||
|
||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
||||
rg -Fa "<Fatal>" /var/log/clickhouse-server3/clickhouse-server.log ||:
|
||||
rg -A50 -Fa "============" /var/log/clickhouse-server3/stderr.log ||:
|
||||
zstd --threads=0 < /var/log/clickhouse-server3/clickhouse-server.log > /test_output/clickhouse-server3.log.zst &
|
||||
fi
|
||||
|
||||
data_path_config="--path=/var/lib/clickhouse/"
|
||||
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||
# We need s3 storage configuration (but it's more likely that clickhouse-local will fail for some reason)
|
||||
@ -348,12 +413,17 @@ if [ $failed_to_save_logs -ne 0 ]; then
|
||||
for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log
|
||||
do
|
||||
clickhouse-local "$data_path_config" --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
|
||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
||||
clickhouse-local --path /var/lib/clickhouse3/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.3.tsv.zst ||:
|
||||
fi
|
||||
|
||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
|
||||
clickhouse-local --path /var/lib/clickhouse2/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst ||:
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
|
||||
fi
|
||||
done
|
||||
@ -389,7 +459,14 @@ rm -rf /var/lib/clickhouse/data/system/*/
|
||||
tar -chf /test_output/store.tar /var/lib/clickhouse/store ||:
|
||||
tar -chf /test_output/metadata.tar /var/lib/clickhouse/metadata/*.sql ||:
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
|
||||
rm -rf /var/lib/clickhouse3/data/system/*/
|
||||
tar -chf /test_output/store.tar /var/lib/clickhouse3/store ||:
|
||||
tar -chf /test_output/metadata.tar /var/lib/clickhouse3/metadata/*.sql ||:
|
||||
fi
|
||||
|
||||
|
||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
|
||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server2.log ||:
|
||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst ||:
|
||||
@ -400,7 +477,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
||||
tar -chf /test_output/coordination2.tar /var/lib/clickhouse2/coordination ||:
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_SHARED_CATALOG" ]] && [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
|
||||
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server1.log > /test_output/clickhouse-server1.log.zst ||:
|
||||
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
||||
|
@ -75,7 +75,7 @@ SETTINGS
|
||||
Possible values:
|
||||
|
||||
- unordered — With unordered mode, the set of all already processed files is tracked with persistent nodes in ZooKeeper.
|
||||
- ordered — With ordered mode, only the max name of the successfully consumed file, and the names of files that will be retried after unsuccessful loading attempt are being stored in ZooKeeper.
|
||||
- ordered — With ordered mode, the files are processed in lexicographic order. It means that if file named 'BBB' was processed at some point and later on a file named 'AA' is added to the bucket, it will be ignored. Only the max name (in lexicographic sense) of the successfully consumed file, and the names of files that will be retried after unsuccessful loading attempt are being stored in ZooKeeper.
|
||||
|
||||
Default value: `ordered` in versions before 24.6. Starting with 24.6 there is no default value, the setting becomes required to be specified manually. For tables created on earlier versions the default value will remain `Ordered` for compatibility.
|
||||
|
||||
|
@ -1535,6 +1535,10 @@ the columns from input data will be mapped to the columns from the table by thei
|
||||
Otherwise, the first row will be skipped.
|
||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
|
||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||
If setting [output_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#output_format_binary_encode_types_in_binary_format) is set to 1,
|
||||
the types in header will be written using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes output format.
|
||||
If setting [input_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#input_format_binary_encode_types_in_binary_format) is set to 1,
|
||||
the types in header will be read using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes input format.
|
||||
:::
|
||||
|
||||
## RowBinaryWithDefaults {#rowbinarywithdefaults}
|
||||
|
@ -2,15 +2,11 @@
|
||||
slug: /en/operations/opentelemetry
|
||||
sidebar_position: 62
|
||||
sidebar_label: Tracing ClickHouse with OpenTelemetry
|
||||
title: "[experimental] Tracing ClickHouse with OpenTelemetry"
|
||||
title: "Tracing ClickHouse with OpenTelemetry"
|
||||
---
|
||||
|
||||
[OpenTelemetry](https://opentelemetry.io/) is an open standard for collecting traces and metrics from the distributed application. ClickHouse has some support for OpenTelemetry.
|
||||
|
||||
:::note
|
||||
This is an experimental feature that will change in backwards-incompatible ways in future releases.
|
||||
:::
|
||||
|
||||
## Supplying Trace Context to ClickHouse
|
||||
|
||||
ClickHouse accepts trace context HTTP headers, as described by the [W3C recommendation](https://www.w3.org/TR/trace-context/). It also accepts trace context over a native protocol that is used for communication between ClickHouse servers or between the client and server. For manual testing, trace context headers conforming to the Trace Context recommendation can be supplied to `clickhouse-client` using `--opentelemetry-traceparent` and `--opentelemetry-tracestate` flags.
|
||||
|
@ -1951,6 +1951,18 @@ The maximum allowed size for String in RowBinary format. It prevents allocating
|
||||
|
||||
Default value: `1GiB`.
|
||||
|
||||
### output_format_binary_encode_types_in_binary_format {#output_format_binary_encode_types_in_binary_format}
|
||||
|
||||
Write data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in RowBinaryWithNamesAndTypes output format.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
### input_format_binary_decode_types_in_binary_format {#input_format_binary_decode_types_in_binary_format}
|
||||
|
||||
Read data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in RowBinaryWithNamesAndTypes input format.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
## Native format settings {#native-format-settings}
|
||||
|
||||
### input_format_native_allow_types_conversion {#input_format_native_allow_types_conversion}
|
||||
@ -1958,3 +1970,15 @@ Default value: `1GiB`.
|
||||
Allow types conversion in Native input format between columns from input data and requested columns.
|
||||
|
||||
Enabled by default.
|
||||
|
||||
### output_format_native_encode_types_in_binary_format {#output_format_native_encode_types_in_binary_format}
|
||||
|
||||
Write data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in Native output format.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
### input_format_native_decode_types_in_binary_format {#input_format_native_decode_types_in_binary_format}
|
||||
|
||||
Read data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in Native input format.
|
||||
|
||||
Disabled by default.
|
@ -1170,6 +1170,10 @@ Data in the VALUES clause of INSERT queries is processed by a separate stream pa
|
||||
|
||||
Default value: 262144 (= 256 KiB).
|
||||
|
||||
:::note
|
||||
`max_query_size` cannot be set within an SQL query (e.g., `SELECT now() SETTINGS max_query_size=10000`) because ClickHouse needs to allocate a buffer to parse the query, and this buffer size is determined by the `max_query_size` setting, which must be configured before the query is executed.
|
||||
:::
|
||||
|
||||
## max_parser_depth {#max_parser_depth}
|
||||
|
||||
Limits maximum recursion depth in the recursive descent parser. Allows controlling the stack size.
|
||||
@ -1354,12 +1358,25 @@ Connection pool size for PostgreSQL table engine and database engine.
|
||||
|
||||
Default value: 16
|
||||
|
||||
## postgresql_connection_attempt_timeout {#postgresql-connection-attempt-timeout}
|
||||
|
||||
Connection timeout in seconds of a single attempt to connect PostgreSQL end-point.
|
||||
The value is passed as a `connect_timeout` parameter of the connection URL.
|
||||
|
||||
Default value: `2`.
|
||||
|
||||
## postgresql_connection_pool_wait_timeout {#postgresql-connection-pool-wait-timeout}
|
||||
|
||||
Connection pool push/pop timeout on empty pool for PostgreSQL table engine and database engine. By default it will block on empty pool.
|
||||
|
||||
Default value: 5000
|
||||
|
||||
## postgresql_connection_pool_retries {#postgresql-connection-pool-retries}
|
||||
|
||||
The maximum number of retries to establish a connection with the PostgreSQL end-point.
|
||||
|
||||
Default value: `2`.
|
||||
|
||||
## postgresql_connection_pool_auto_close_connection {#postgresql-connection-pool-auto-close-connection}
|
||||
|
||||
Close connection before returning connection to the pool.
|
||||
|
@ -5,23 +5,45 @@ sidebar_position: 165
|
||||
|
||||
# maxMap
|
||||
|
||||
Syntax: `maxMap(key, value)` or `maxMap(Tuple(key, value))`
|
||||
|
||||
Calculates the maximum from `value` array according to the keys specified in the `key` array.
|
||||
|
||||
Passing a tuple of keys and value arrays is identical to passing two arrays of keys and values.
|
||||
**Syntax**
|
||||
|
||||
The number of elements in `key` and `value` must be the same for each row that is totaled.
|
||||
```sql
|
||||
maxMap(key, value)
|
||||
```
|
||||
or
|
||||
```sql
|
||||
maxMap(Tuple(key, value))
|
||||
```
|
||||
|
||||
Returns a tuple of two arrays: keys and values calculated for the corresponding keys.
|
||||
Alias: `maxMappedArrays`
|
||||
|
||||
Example:
|
||||
:::note
|
||||
- Passing a tuple of keys and value arrays is identical to passing two arrays of keys and values.
|
||||
- The number of elements in `key` and `value` must be the same for each row that is totaled.
|
||||
:::
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `key` — Array of keys. [Array](../../data-types/array.md).
|
||||
- `value` — Array of values. [Array](../../data-types/array.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a tuple of two arrays: keys in sorted order, and values calculated for the corresponding keys. [Tuple](../../data-types/tuple.md)([Array](../../data-types/array.md), [Array](../../data-types/array.md)).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT maxMap(a, b)
|
||||
FROM values('a Array(Char), b Array(Int64)', (['x', 'y'], [2, 2]), (['y', 'z'], [3, 1]))
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─maxMap(a, b)───────────┐
|
||||
│ [['x','y','z'],[2,3,1]]│
|
||||
|
@ -5,23 +5,45 @@ sidebar_position: 169
|
||||
|
||||
# minMap
|
||||
|
||||
Syntax: `minMap(key, value)` or `minMap(Tuple(key, value))`
|
||||
|
||||
Calculates the minimum from `value` array according to the keys specified in the `key` array.
|
||||
|
||||
Passing a tuple of keys and value arrays is identical to passing two arrays of keys and values.
|
||||
**Syntax**
|
||||
|
||||
The number of elements in `key` and `value` must be the same for each row that is totaled.
|
||||
```sql
|
||||
`minMap(key, value)`
|
||||
```
|
||||
or
|
||||
```sql
|
||||
minMap(Tuple(key, value))
|
||||
```
|
||||
|
||||
Returns a tuple of two arrays: keys in sorted order, and values calculated for the corresponding keys.
|
||||
Alias: `minMappedArrays`
|
||||
|
||||
Example:
|
||||
:::note
|
||||
- Passing a tuple of keys and value arrays is identical to passing an array of keys and an array of values.
|
||||
- The number of elements in `key` and `value` must be the same for each row that is totaled.
|
||||
:::
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `key` — Array of keys. [Array](../../data-types/array.md).
|
||||
- `value` — Array of values. [Array](../../data-types/array.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns a tuple of two arrays: keys in sorted order, and values calculated for the corresponding keys. [Tuple](../../data-types/tuple.md)([Array](../../data-types/array.md), [Array](../../data-types/array.md)).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT minMap(a, b)
|
||||
FROM values('a Array(Int32), b Array(Int64)', ([1, 2], [2, 2]), ([2, 3], [1, 1]))
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─minMap(a, b)──────┐
|
||||
│ ([1,2,3],[2,1,1]) │
|
||||
|
@ -16,7 +16,7 @@ singleValueOrNull(x)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — Column of any [data type](../../data-types/index.md).
|
||||
- `x` — Column of any [data type](../../data-types/index.md) (except [Map](../../data-types/map.md), [Array](../../data-types/array.md) or [Tuple](../../data-types/tuple) which cannot be of type [Nullable](../../data-types/nullable.md)).
|
||||
|
||||
**Returned values**
|
||||
|
||||
|
115
docs/en/sql-reference/data-types/data-types-binary-encoding.md
Normal file
115
docs/en/sql-reference/data-types/data-types-binary-encoding.md
Normal file
@ -0,0 +1,115 @@
|
||||
---
|
||||
slug: /en/sql-reference/data-types/data-types-binary-encoding
|
||||
sidebar_position: 56
|
||||
sidebar_label: Data types binary encoding specification.
|
||||
---
|
||||
|
||||
|
||||
# Data types binary encoding specification
|
||||
|
||||
This specification describes the binary format that can be used for binary encoding and decoding of ClickHouse data types. This format is used in `Dynamic` column [binary serialization](dynamic.md#binary-output-format) and can be used in input/output formats [RowBinaryWithNamesAndTypes](../../interfaces/formats.md#rowbinarywithnamesandtypes) and [Native](../../interfaces/formats.md#native) under corresponding settings.
|
||||
|
||||
The table below describes how each data type is represented in binary format. Each data type encoding consist of 1 byte that indicates the type and some optional additional information.
|
||||
`var_uint` in the binary encoding means that the size is encoded using Variable-Length Quantity compression.
|
||||
|
||||
| ClickHouse data type | Binary encoding |
|
||||
|--------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `Nothing` | `0x00` |
|
||||
| `UInt8` | `0x01` |
|
||||
| `UInt16` | `0x02` |
|
||||
| `UInt32` | `0x03` |
|
||||
| `UInt64` | `0x04` |
|
||||
| `UInt128` | `0x05` |
|
||||
| `UInt256` | `0x06` |
|
||||
| `Int8` | `0x07` |
|
||||
| `Int16` | `0x08` |
|
||||
| `Int32` | `0x09` |
|
||||
| `Int64` | `0x0A` |
|
||||
| `Int128` | `0x0B` |
|
||||
| `Int256` | `0x0C` |
|
||||
| `Float32` | `0x0D` |
|
||||
| `Float64` | `0x0E` |
|
||||
| `Date` | `0x0F` |
|
||||
| `Date32` | `0x10` |
|
||||
| `DateTime` | `0x11` |
|
||||
| `DateTime(time_zone)` | `0x12<var_uint_time_zone_name_size><time_zone_name_data>` |
|
||||
| `DateTime64(P)` | `0x13<uint8_precision>` |
|
||||
| `DateTime64(P, time_zone)` | `0x14<uint8_precision><var_uint_time_zone_name_size><time_zone_name_data>` |
|
||||
| `String` | `0x15` |
|
||||
| `FixedString(N)` | `0x16<var_uint_size>` |
|
||||
| `Enum8` | `0x17<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int8_value_1>...<var_uint_name_size_N><name_data_N><int8_value_N>` |
|
||||
| `Enum16` | `0x18<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int16_little_endian_value_1>...><var_uint_name_size_N><name_data_N><int16_little_endian_value_N>` |
|
||||
| `Decimal32(P, S)` | `0x19<uint8_precision><uint8_scale>` |
|
||||
| `Decimal64(P, S)` | `0x1A<uint8_precision><uint8_scale>` |
|
||||
| `Decimal128(P, S)` | `0x1B<uint8_precision><uint8_scale>` |
|
||||
| `Decimal256(P, S)` | `0x1C<uint8_precision><uint8_scale>` |
|
||||
| `UUID` | `0x1D` |
|
||||
| `Array(T)` | `0x1E<nested_type_encoding>` |
|
||||
| `Tuple(T1, ..., TN)` | `0x1F<var_uint_number_of_elements><nested_type_encoding_1>...<nested_type_encoding_N>` |
|
||||
| `Tuple(name1 T1, ..., nameN TN)` | `0x20<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N>` |
|
||||
| `Set` | `0x21` |
|
||||
| `Interval` | `0x22<interval_kind>` (see [interval kind binary encoding](#interval-kind-binary-encoding)) |
|
||||
| `Nullable(T)` | `0x23<nested_type_encoding>` |
|
||||
| `Function` | `0x24<var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N><return_type_encoding>` |
|
||||
| `AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x25<var_uint_version><var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N>` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) |
|
||||
| `LowCardinality(T)` | `0x26<nested_type_encoding>` |
|
||||
| `Map(K, V)` | `0x27<key_type_encoding><value_type_encoding>` |
|
||||
| `IPv4` | `0x28` |
|
||||
| `IPv6` | `0x29` |
|
||||
| `Variant(T1, ..., TN)` | `0x2A<var_uint_number_of_variants><variant_type_encoding_1>...<variant_type_encoding_N>` |
|
||||
| `Dynamic(max_types=N)` | `0x2B<uint8_max_types>` |
|
||||
| `Custom type` (`Ring`, `Polygon`, etc) | `0x2C<var_uint_type_name_size><type_name_data>` |
|
||||
| `Bool` | `0x2D` |
|
||||
| `SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x2E<var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N>` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) |
|
||||
| `Nested(name1 T1, ..., nameN TN)` | `0x2F<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N>` |
|
||||
|
||||
|
||||
### Interval kind binary encoding
|
||||
|
||||
The table below describes how different interval kinds of `Interval` data type are encoded.
|
||||
|
||||
| Interval kind | Binary encoding |
|
||||
|---------------|-----------------|
|
||||
| `Nanosecond` | `0x00` |
|
||||
| `Microsecond` | `0x01` |
|
||||
| `Millisecond` | `0x02` |
|
||||
| `Second` | `0x03` |
|
||||
| `Minute` | `0x04` |
|
||||
| `Hour` | `0x05` |
|
||||
| `Day` | `0x06` |
|
||||
| `Week` | `0x07` |
|
||||
| `Month` | `0x08` |
|
||||
| `Quarter` | `0x09` |
|
||||
| `Year` | `0x1A` |
|
||||
|
||||
### Aggregate function parameter binary encoding
|
||||
|
||||
The table below describes how parameters of `AggragateFunction` and `SimpleAggregateFunction` are encoded.
|
||||
The encoding of a parameter consists of 1 byte indicating the type of the parameter and the value itself.
|
||||
|
||||
| Parameter type | Binary encoding |
|
||||
|--------------------------|--------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `Null` | `0x00` |
|
||||
| `UInt64` | `0x01<var_uint_value>` |
|
||||
| `Int64` | `0x02<var_int_value>` |
|
||||
| `UInt128` | `0x03<uint128_little_endian_value>` |
|
||||
| `Int128` | `0x04<int128_little_endian_value>` |
|
||||
| `UInt128` | `0x05<uint128_little_endian_value>` |
|
||||
| `Int128` | `0x06<int128_little_endian_value>` |
|
||||
| `Float64` | `0x07<float64_little_endian_value>` |
|
||||
| `Decimal32` | `0x08<var_uint_scale><int32_little_endian_value>` |
|
||||
| `Decimal64` | `0x09<var_uint_scale><int64_little_endian_value>` |
|
||||
| `Decimal128` | `0x0A<var_uint_scale><int128_little_endian_value>` |
|
||||
| `Decimal256` | `0x0B<var_uint_scale><int256_little_endian_value>` |
|
||||
| `String` | `0x0C<var_uint_size><data>` |
|
||||
| `Array` | `0x0D<var_uint_size><value_encoding_1>...<value_encoding_N>` |
|
||||
| `Tuple` | `0x0E<var_uint_size><value_encoding_1>...<value_encoding_N>` |
|
||||
| `Map` | `0x0F<var_uint_size><key_encoding_1><value_encoding_1>...<key_endoding_N><value_encoding_N>` |
|
||||
| `IPv4` | `0x10<uint32_little_endian_value>` |
|
||||
| `IPv6` | `0x11<uint128_little_endian_value>` |
|
||||
| `UUID` | `0x12<uuid_value>` |
|
||||
| `Bool` | `0x13<bool_value>` |
|
||||
| `Object` | `0x14<var_uint_size><var_uint_key_size_1><key_data_1><value_encoding_1>...<var_uint_key_size_N><key_data_N><value_encoding_N>` |
|
||||
| `AggregateFunctionState` | `0x15<var_uint_name_size><name_data><var_uint_data_size><data>` |
|
||||
| `Negative infinity` | `0xFE` |
|
||||
| `Positive infinity` | `0xFF` |
|
@ -493,3 +493,14 @@ SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) O
|
||||
```
|
||||
|
||||
As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`.
|
||||
|
||||
|
||||
### Binary output format
|
||||
|
||||
In [RowBinary](../../interfaces/formats.md#rowbinary-rowbinary) format values of `Dynamic` type are serialized in the following format:
|
||||
|
||||
```text
|
||||
<binary_encoded_data_type><value_in_binary_format_according_to_the_data_type>
|
||||
```
|
||||
|
||||
See the [data types binary encoding specification](../../sql-reference/data-types/data-types-binary-encoding.md)
|
||||
|
@ -12,9 +12,7 @@ Functions for [searching](string-search-functions.md) in strings and for [replac
|
||||
|
||||
## empty
|
||||
|
||||
Checks whether the input string is empty.
|
||||
|
||||
A string is considered non-empty if it contains at least one byte, even if this byte is a space or the null byte.
|
||||
Checks whether the input string is empty. A string is considered non-empty if it contains at least one byte, even if this byte is a space or the null byte.
|
||||
|
||||
The function is also available for [arrays](array-functions.md#function-empty) and [UUIDs](uuid-functions.md#empty).
|
||||
|
||||
@ -48,9 +46,7 @@ Result:
|
||||
|
||||
## notEmpty
|
||||
|
||||
Checks whether the input string is non-empty.
|
||||
|
||||
A string is considered non-empty if it contains at least one byte, even if this byte is a space or the null byte.
|
||||
Checks whether the input string is non-empty. A string is considered non-empty if it contains at least one byte, even if this byte is a space or the null byte.
|
||||
|
||||
The function is also available for [arrays](array-functions.md#function-notempty) and [UUIDs](uuid-functions.md#notempty).
|
||||
|
||||
@ -96,7 +92,7 @@ length(s)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `s`: An input string or array. [String](../data-types/string)/[Array](../data-types/array).
|
||||
- `s` — An input string or array. [String](../data-types/string)/[Array](../data-types/array).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -149,7 +145,7 @@ lengthUTF8(s)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `s`: String containing valid UTF-8 encoded text. [String](../data-types/string).
|
||||
- `s` — String containing valid UTF-8 encoded text. [String](../data-types/string).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -183,8 +179,8 @@ left(s, offset)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `s`: The string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
|
||||
- `offset`: The number of bytes of the offset. [UInt*](../data-types/int-uint).
|
||||
- `s` — The string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
|
||||
- `offset` — The number of bytes of the offset. [UInt*](../data-types/int-uint).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -230,8 +226,8 @@ leftUTF8(s, offset)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `s`: The UTF-8 encoded string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
|
||||
- `offset`: The number of bytes of the offset. [UInt*](../data-types/int-uint).
|
||||
- `s` — The UTF-8 encoded string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
|
||||
- `offset` — The number of bytes of the offset. [UInt*](../data-types/int-uint).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -347,8 +343,8 @@ right(s, offset)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `s`: The string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
|
||||
- `offset`: The number of bytes of the offset. [UInt*](../data-types/int-uint).
|
||||
- `s` — The string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
|
||||
- `offset` — The number of bytes of the offset. [UInt*](../data-types/int-uint).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -394,8 +390,8 @@ rightUTF8(s, offset)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `s`: The UTF-8 encoded string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
|
||||
- `offset`: The number of bytes of the offset. [UInt*](../data-types/int-uint).
|
||||
- `s` — The UTF-8 encoded string to calculate a substring from. [String](../data-types/string.md) or [FixedString](../data-types/fixedstring.md).
|
||||
- `offset` — The number of bytes of the offset. [UInt*](../data-types/int-uint).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -547,7 +543,7 @@ Alias: `ucase`
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `input`: A string type [String](../data-types/string.md).
|
||||
- `input` — A string type [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -571,16 +567,47 @@ SELECT upper('clickhouse');
|
||||
|
||||
Converts a string to lowercase, assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
|
||||
Does not detect the language, e.g. for Turkish the result might not be exactly correct (i/İ vs. i/I).
|
||||
:::note
|
||||
Does not detect the language, e.g. for Turkish the result might not be exactly correct (i/İ vs. i/I). If the length of the UTF-8 byte sequence is different for upper and lower case of a code point (such as `ẞ` and `ß`), the result may be incorrect for this code point.
|
||||
:::
|
||||
|
||||
If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point.
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
lowerUTF8(input)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `input` — A string type [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A [String](../data-types/string.md) data type value.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT lowerUTF8('MÜNCHEN') as Lowerutf8;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` response
|
||||
┌─Lowerutf8─┐
|
||||
│ münchen │
|
||||
└───────────┘
|
||||
```
|
||||
|
||||
## upperUTF8
|
||||
|
||||
Converts a string to uppercase, assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
|
||||
|
||||
If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point.
|
||||
:::note
|
||||
Does not detect the language, e.g. for Turkish the result might not be exactly correct (i/İ vs. i/I). If the length of the UTF-8 byte sequence is different for upper and lower case of a code point (such as `ẞ` and `ß`), the result may be incorrect for this code point.
|
||||
:::
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -590,7 +617,7 @@ upperUTF8(input)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `input`: A string type [String](../data-types/string.md).
|
||||
- `input` — A string type [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -604,6 +631,8 @@ Query:
|
||||
SELECT upperUTF8('München') as Upperutf8;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` response
|
||||
┌─Upperutf8─┐
|
||||
│ MÜNCHEN │
|
||||
@ -614,6 +643,34 @@ SELECT upperUTF8('München') as Upperutf8;
|
||||
|
||||
Returns 1, if the set of bytes constitutes valid UTF-8-encoded text, otherwise 0.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
isValidUTF8(input)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `input` — A string type [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `1`, if the set of bytes constitutes valid UTF-8-encoded text, otherwise `0`.
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT isValidUTF8('\xc3\xb1') AS valid, isValidUTF8('\xc3\x28') AS invalid;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` response
|
||||
┌─valid─┬─invalid─┐
|
||||
│ 1 │ 0 │
|
||||
└───────┴─────────┘
|
||||
```
|
||||
|
||||
## toValidUTF8
|
||||
|
||||
Replaces invalid UTF-8 characters by the `<60>` (U+FFFD) character. All running in a row invalid characters are collapsed into the one replacement character.
|
||||
@ -883,7 +940,7 @@ Returns the substring of a string `s` which starts at the specified byte index `
|
||||
substring(s, offset[, length])
|
||||
```
|
||||
|
||||
Alias:
|
||||
Aliases:
|
||||
- `substr`
|
||||
- `mid`
|
||||
- `byteSlice`
|
||||
@ -926,9 +983,9 @@ substringUTF8(s, offset[, length])
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s`: The string to calculate a substring from. [String](../data-types/string.md), [FixedString](../data-types/fixedstring.md) or [Enum](../data-types/enum.md)
|
||||
- `offset`: The starting position of the substring in `s` . [(U)Int*](../data-types/int-uint.md).
|
||||
- `length`: The maximum length of the substring. [(U)Int*](../data-types/int-uint.md). Optional.
|
||||
- `s` — The string to calculate a substring from. [String](../data-types/string.md), [FixedString](../data-types/fixedstring.md) or [Enum](../data-types/enum.md)
|
||||
- `offset` — The starting position of the substring in `s` . [(U)Int*](../data-types/int-uint.md).
|
||||
- `length` — The maximum length of the substring. [(U)Int*](../data-types/int-uint.md). Optional.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -964,9 +1021,9 @@ Alias: `SUBSTRING_INDEX`
|
||||
|
||||
**Arguments**
|
||||
|
||||
- s: The string to extract substring from. [String](../data-types/string.md).
|
||||
- delim: The character to split. [String](../data-types/string.md).
|
||||
- count: The number of occurrences of the delimiter to count before extracting the substring. If count is positive, everything to the left of the final delimiter (counting from the left) is returned. If count is negative, everything to the right of the final delimiter (counting from the right) is returned. [UInt or Int](../data-types/int-uint.md)
|
||||
- s — The string to extract substring from. [String](../data-types/string.md).
|
||||
- delim — The character to split. [String](../data-types/string.md).
|
||||
- count — The number of occurrences of the delimiter to count before extracting the substring. If count is positive, everything to the left of the final delimiter (counting from the left) is returned. If count is negative, everything to the right of the final delimiter (counting from the right) is returned. [UInt or Int](../data-types/int-uint.md)
|
||||
|
||||
**Example**
|
||||
|
||||
@ -995,9 +1052,9 @@ substringIndexUTF8(s, delim, count)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s`: The string to extract substring from. [String](../data-types/string.md).
|
||||
- `delim`: The character to split. [String](../data-types/string.md).
|
||||
- `count`: The number of occurrences of the delimiter to count before extracting the substring. If count is positive, everything to the left of the final delimiter (counting from the left) is returned. If count is negative, everything to the right of the final delimiter (counting from the right) is returned. [UInt or Int](../data-types/int-uint.md)
|
||||
- `s` — The string to extract substring from. [String](../data-types/string.md).
|
||||
- `delim` — The character to split. [String](../data-types/string.md).
|
||||
- `count` — The number of occurrences of the delimiter to count before extracting the substring. If count is positive, everything to the left of the final delimiter (counting from the left) is returned. If count is negative, everything to the right of the final delimiter (counting from the right) is returned. [UInt or Int](../data-types/int-uint.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -1277,7 +1334,7 @@ tryBase64Decode(encoded)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `encoded`: [String](../data-types/string.md) column or constant. If the string is not a valid Base64-encoded value, returns an empty string.
|
||||
- `encoded` — [String](../data-types/string.md) column or constant. If the string is not a valid Base64-encoded value, returns an empty string.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -1309,7 +1366,7 @@ tryBase64URLDecode(encodedUrl)
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `encodedURL`: [String](../data-types/string.md) column or constant. If the string is not a valid Base64-encoded value with URL-specific modifications, returns an empty string.
|
||||
- `encodedURL` — [String](../data-types/string.md) column or constant. If the string is not a valid Base64-encoded value with URL-specific modifications, returns an empty string.
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -2011,7 +2068,7 @@ soundex(val)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Input value. [String](../data-types/string.md)
|
||||
- `val` — Input value. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -2044,7 +2101,7 @@ punycodeEncode(val)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Input value. [String](../data-types/string.md)
|
||||
- `val` — Input value. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -2077,7 +2134,7 @@ punycodeEncode(val)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Punycode-encoded string. [String](../data-types/string.md)
|
||||
- `val` — Punycode-encoded string. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -2103,7 +2160,7 @@ Like `punycodeDecode` but returns an empty string if no valid Punycode-encoded s
|
||||
|
||||
## idnaEncode
|
||||
|
||||
Returns the the ASCII representation (ToASCII algorithm) of a domain name according to the [Internationalized Domain Names in Applications](https://en.wikipedia.org/wiki/Internationalized_domain_name#Internationalizing_Domain_Names_in_Applications) (IDNA) mechanism.
|
||||
Returns the ASCII representation (ToASCII algorithm) of a domain name according to the [Internationalized Domain Names in Applications](https://en.wikipedia.org/wiki/Internationalized_domain_name#Internationalizing_Domain_Names_in_Applications) (IDNA) mechanism.
|
||||
The input string must be UTF-encoded and translatable to an ASCII string, otherwise an exception is thrown.
|
||||
Note: No percent decoding or trimming of tabs, spaces or control characters is performed.
|
||||
|
||||
@ -2115,7 +2172,7 @@ idnaEncode(val)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Input value. [String](../data-types/string.md)
|
||||
- `val` — Input value. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -2141,7 +2198,7 @@ Like `idnaEncode` but returns an empty string in case of an error instead of thr
|
||||
|
||||
## idnaDecode
|
||||
|
||||
Returns the the Unicode (UTF-8) representation (ToUnicode algorithm) of a domain name according to the [Internationalized Domain Names in Applications](https://en.wikipedia.org/wiki/Internationalized_domain_name#Internationalizing_Domain_Names_in_Applications) (IDNA) mechanism.
|
||||
Returns the Unicode (UTF-8) representation (ToUnicode algorithm) of a domain name according to the [Internationalized Domain Names in Applications](https://en.wikipedia.org/wiki/Internationalized_domain_name#Internationalizing_Domain_Names_in_Applications) (IDNA) mechanism.
|
||||
In case of an error (e.g. because the input is invalid), the input string is returned.
|
||||
Note that repeated application of `idnaEncode()` and `idnaDecode()` does not necessarily return the original string due to case normalization.
|
||||
|
||||
@ -2153,7 +2210,7 @@ idnaDecode(val)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Input value. [String](../data-types/string.md)
|
||||
- `val` — Input value. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -2197,7 +2254,7 @@ Result:
|
||||
└───────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Alias: mismatches
|
||||
Alias: `mismatches`
|
||||
|
||||
## stringJaccardIndex
|
||||
|
||||
@ -2251,7 +2308,7 @@ Result:
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Alias: levenshteinDistance
|
||||
Alias: `levenshteinDistance`
|
||||
|
||||
## editDistanceUTF8
|
||||
|
||||
@ -2277,7 +2334,7 @@ Result:
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Alias: levenshteinDistanceUTF8
|
||||
Alias: `levenshteinDistanceUTF8`
|
||||
|
||||
## damerauLevenshteinDistance
|
||||
|
||||
@ -2355,13 +2412,93 @@ Result:
|
||||
|
||||
Convert the first letter of each word to upper case and the rest to lower case. Words are sequences of alphanumeric characters separated by non-alphanumeric characters.
|
||||
|
||||
:::note
|
||||
Because `initCap` converts only the first letter of each word to upper case you may observe unexpected behaviour for words containing apostrophes or capital letters. For example:
|
||||
|
||||
```sql
|
||||
SELECT initCap('mother''s daughter'), initCap('joe McAdam');
|
||||
```
|
||||
|
||||
will return
|
||||
|
||||
```response
|
||||
┌─initCap('mother\'s daughter')─┬─initCap('joe McAdam')─┐
|
||||
│ Mother'S Daughter │ Joe Mcadam │
|
||||
└───────────────────────────────┴───────────────────────┘
|
||||
```
|
||||
|
||||
This is a known behaviour, with no plans currently to fix it.
|
||||
:::
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
initcap(val)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` — Input value. [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `val` with the first letter of each word converted to upper case. [String](../data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT initcap('building for fast');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─initcap('building for fast')─┐
|
||||
│ Building For Fast │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
## initcapUTF8
|
||||
|
||||
Like [initcap](#initcap), assuming that the string contains valid UTF-8 encoded text. If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
|
||||
Does not detect the language, e.g. for Turkish the result might not be exactly correct (i/İ vs. i/I).
|
||||
Like [initcap](#initcap), `initcapUTF8` converts the first letter of each word to upper case and the rest to lower case. Assumes that the string contains valid UTF-8 encoded text.
|
||||
If this assumption is violated, no exception is thrown and the result is undefined.
|
||||
|
||||
:::note
|
||||
This function does not detect the language, e.g. for Turkish the result might not be exactly correct (i/İ vs. i/I).
|
||||
If the length of the UTF-8 byte sequence is different for upper and lower case of a code point, the result may be incorrect for this code point.
|
||||
:::
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
initcapUTF8(val)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` — Input value. [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `val` with the first letter of each word converted to upper case. [String](../data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT initcapUTF8('не тормозит');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─initcapUTF8('не тормозит')─┐
|
||||
│ Не Тормозит │
|
||||
└────────────────────────────┘
|
||||
```
|
||||
|
||||
## firstLine
|
||||
|
||||
@ -2375,7 +2512,7 @@ firstLine(val)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `val` - Input value. [String](../data-types/string.md)
|
||||
- `val` — Input value. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
|
@ -34,7 +34,7 @@ Alias: `replace`.
|
||||
|
||||
Replaces the first occurrence of the substring matching the regular expression `pattern` (in [re2 syntax](https://github.com/google/re2/wiki/Syntax)) in `haystack` by the `replacement` string.
|
||||
|
||||
`replacement` can containing substitutions `\0-\9`.
|
||||
`replacement` can contain substitutions `\0-\9`.
|
||||
Substitutions `\1-\9` correspond to the 1st to 9th capturing group (submatch), substitution `\0` corresponds to the entire match.
|
||||
|
||||
To use a verbatim `\` character in the `pattern` or `replacement` strings, escape it using `\`.
|
||||
|
@ -7,7 +7,7 @@ sidebar_label: Tuples
|
||||
## tuple
|
||||
|
||||
A function that allows grouping multiple columns.
|
||||
For columns with the types T1, T2, ..., it returns a Tuple(T1, T2, ...) type tuple containing these columns. There is no cost to execute the function.
|
||||
For columns C1, C2, ... with the types T1, T2, ..., it returns a named Tuple(C1 T1, C2 T2, ...) type tuple containing these columns if their names are unique and can be treated as unquoted identifiers, otherwise a Tuple(T1, T2, ...) is returned. There is no cost to execute the function.
|
||||
Tuples are normally used as intermediate values for an argument of IN operators, or for creating a list of formal parameters of lambda functions. Tuples can’t be written to a table.
|
||||
|
||||
The function implements the operator `(x, y, ...)`.
|
||||
@ -259,6 +259,60 @@ Result:
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## tupleNames
|
||||
|
||||
Converts a tuple into an array of column names. For a tuple in the form `Tuple(a T, b T, ...)`, it returns an array of strings representing the named columns of the tuple. If the tuple elements do not have explicit names, their indices will be used as the column names instead.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
tupleNames(tuple)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `tuple` — Named tuple. [Tuple](../../sql-reference/data-types/tuple.md) with any types of values.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- An array with strings.
|
||||
|
||||
Type: [Array](../../sql-reference/data-types/array.md)([Tuple](../../sql-reference/data-types/tuple.md)([String](../../sql-reference/data-types/string.md), ...)).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE tupletest (col Tuple(user_ID UInt64, session_ID UInt64)) ENGINE = Memory;
|
||||
|
||||
INSERT INTO tupletest VALUES (tuple(1, 2));
|
||||
|
||||
SELECT tupleNames(col) FROM tupletest;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─tupleNames(col)──────────┐
|
||||
│ ['user_ID','session_ID'] │
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
||||
If you pass a simple tuple to the function, ClickHouse uses the indexes of the columns as their names:
|
||||
|
||||
``` sql
|
||||
SELECT tupleNames(tuple(3, 2, 1));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─tupleNames((3, 2, 1))─┐
|
||||
│ ['1','2','3'] │
|
||||
└───────────────────────┘
|
||||
```
|
||||
|
||||
## tuplePlus
|
||||
|
||||
Calculates the sum of corresponding values of two tuples of the same size.
|
||||
|
@ -130,7 +130,9 @@ SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv');
|
||||
|
||||
## Globs in path
|
||||
|
||||
Paths may use globbing. Files must match the whole path pattern, not only the suffix or prefix.
|
||||
Paths may use globbing. Files must match the whole path pattern, not only the suffix or prefix. There is one exception that if the path refers to an existing
|
||||
directory and does not use globs, a `*` will be implicitly added to the path so
|
||||
all the files in the directory are selected.
|
||||
|
||||
- `*` — Represents arbitrarily many characters except `/` but including the empty string.
|
||||
- `?` — Represents an arbitrary single character.
|
||||
@ -163,6 +165,12 @@ An alternative path expression which achieves the same:
|
||||
SELECT count(*) FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32');
|
||||
```
|
||||
|
||||
Query the total number of rows in `some_dir` using the implicit `*`:
|
||||
|
||||
```sql
|
||||
SELECT count(*) FROM file('some_dir', 'TSV', 'name String, value UInt32');
|
||||
```
|
||||
|
||||
:::note
|
||||
If your listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
:::
|
||||
|
@ -12,7 +12,7 @@ sidebar_label: "Отличительные возможности ClickHouse"
|
||||
|
||||
Этот пункт пришлось выделить, так как существуют системы, которые могут хранить значения отдельных столбцов по отдельности, но не могут эффективно выполнять аналитические запросы в силу оптимизации под другой сценарий работы. Примеры: HBase, BigTable, Cassandra, HyperTable. В этих системах вы получите пропускную способность в районе сотен тысяч строк в секунду, но не сотен миллионов строк в секунду.
|
||||
|
||||
Также стоит заметить, что ClickHouse является системой управления базами данных, а не системой для одной базой данных. То есть, ClickHouse позволяет создавать таблицы и базы данных во время выполнения (runtime), загружать данные и выполнять запросы без переконфигурирования и перезапуска сервера.
|
||||
Также стоит заметить, что ClickHouse является системой управления базами данных, а не системой для одной базы данных. То есть, ClickHouse позволяет создавать таблицы и базы данных во время выполнения (runtime), загружать данные и выполнять запросы без переконфигурирования и перезапуска сервера.
|
||||
|
||||
## Сжатие данных {#szhatie-dannykh}
|
||||
|
||||
|
@ -1117,6 +1117,7 @@ void Client::processOptions(const OptionsDescription & options_description,
|
||||
if (!options["user"].defaulted())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User and JWT flags can't be specified together");
|
||||
config().setString("jwt", options["jwt"].as<std::string>());
|
||||
config().setString("user", "");
|
||||
}
|
||||
if (options.count("accept-invalid-certificate"))
|
||||
{
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <Analyzer/Utils.h>
|
||||
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -26,12 +27,103 @@ static constexpr std::array boolean_functions{
|
||||
"like"sv, "notLike"sv, "ilike"sv, "notILike"sv, "empty"sv, "notEmpty"sv, "not"sv, "and"sv,
|
||||
"or"sv};
|
||||
|
||||
static bool isBooleanFunction(const String & func_name)
|
||||
|
||||
bool isBooleanFunction(const String & func_name)
|
||||
{
|
||||
return std::any_of(
|
||||
boolean_functions.begin(), boolean_functions.end(), [&](const auto boolean_func) { return func_name == boolean_func; });
|
||||
}
|
||||
|
||||
bool isNodeFunction(const QueryTreeNodePtr & node, const String & func_name)
|
||||
{
|
||||
if (const auto * function_node = node->as<FunctionNode>())
|
||||
return function_node->getFunctionName() == func_name;
|
||||
return false;
|
||||
}
|
||||
|
||||
QueryTreeNodePtr getFunctionArgument(const QueryTreeNodePtr & node, size_t idx)
|
||||
{
|
||||
if (const auto * function_node = node->as<FunctionNode>())
|
||||
{
|
||||
const auto & args = function_node->getArguments().getNodes();
|
||||
if (idx < args.size())
|
||||
return args[idx];
|
||||
}
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected '{}' to be a function with at least {} arguments", node->formatASTForErrorMessage(), idx + 1);
|
||||
}
|
||||
|
||||
QueryTreeNodePtr findEqualsFunction(const QueryTreeNodes & nodes)
|
||||
{
|
||||
for (const auto & node : nodes)
|
||||
{
|
||||
const auto * function_node = node->as<FunctionNode>();
|
||||
if (function_node && function_node->getFunctionName() == "equals" &&
|
||||
function_node->getArguments().getNodes().size() == 2)
|
||||
{
|
||||
return node;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool isBooleanConstant(const QueryTreeNodePtr & node, bool expected_value)
|
||||
{
|
||||
const auto * constant_node = node->as<ConstantNode>();
|
||||
if (!constant_node || !constant_node->getResultType()->equals(DataTypeUInt8()))
|
||||
return false;
|
||||
|
||||
UInt64 constant_value;
|
||||
return (constant_node->getValue().tryGet<UInt64>(constant_value) && constant_value == expected_value);
|
||||
}
|
||||
|
||||
/// Returns true if expression consists of only conjunctions of functions with the specified name or true constants
|
||||
bool isOnlyConjunctionOfFunctions(
|
||||
const QueryTreeNodePtr & node,
|
||||
const String & func_name,
|
||||
const QueryTreeNodePtrWithHashSet & allowed_arguments)
|
||||
{
|
||||
if (isBooleanConstant(node, true))
|
||||
return true;
|
||||
|
||||
const auto * node_function = node->as<FunctionNode>();
|
||||
if (!node_function)
|
||||
return false;
|
||||
|
||||
if (node_function->getFunctionName() == func_name
|
||||
&& allowed_arguments.contains(node_function->getArgumentsNode()))
|
||||
return true;
|
||||
|
||||
if (node_function->getFunctionName() == "and")
|
||||
{
|
||||
for (const auto & and_argument : node_function->getArguments().getNodes())
|
||||
{
|
||||
if (!isOnlyConjunctionOfFunctions(and_argument, func_name, allowed_arguments))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/// We can rewrite to a <=> b only if we are joining on a and b,
|
||||
/// because the function is not yet implemented for other cases.
|
||||
bool isTwoArgumentsFromDifferentSides(const FunctionNode & node_function, const JoinNode & join_node)
|
||||
{
|
||||
const auto & argument_nodes = node_function.getArguments().getNodes();
|
||||
if (argument_nodes.size() != 2)
|
||||
return false;
|
||||
|
||||
auto first_src = getExpressionSource(argument_nodes[0]);
|
||||
auto second_src = getExpressionSource(argument_nodes[1]);
|
||||
if (!first_src || !second_src)
|
||||
return false;
|
||||
|
||||
const auto & lhs_join = *join_node.getLeftTableExpression();
|
||||
const auto & rhs_join = *join_node.getRightTableExpression();
|
||||
return (first_src->isEqual(lhs_join) && second_src->isEqual(rhs_join)) ||
|
||||
(first_src->isEqual(rhs_join) && second_src->isEqual(lhs_join));
|
||||
}
|
||||
|
||||
/// Visitor that optimizes logical expressions _only_ in JOIN ON section
|
||||
class JoinOnLogicalExpressionOptimizerVisitor : public InDepthQueryTreeVisitorWithContext<JoinOnLogicalExpressionOptimizerVisitor>
|
||||
{
|
||||
@ -47,15 +139,16 @@ public:
|
||||
{
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
|
||||
if (!function_node)
|
||||
return;
|
||||
QueryTreeNodePtr new_node = nullptr;
|
||||
if (function_node && function_node->getFunctionName() == "or")
|
||||
new_node = tryOptimizeJoinOnNulls(function_node->getArguments().getNodes(), getContext());
|
||||
else
|
||||
new_node = tryOptimizeJoinOnNulls({node}, getContext());
|
||||
|
||||
if (function_node->getFunctionName() == "or")
|
||||
if (new_node)
|
||||
{
|
||||
bool is_argument_type_changed = tryOptimizeIsNotDistinctOrIsNull(node, getContext());
|
||||
if (is_argument_type_changed)
|
||||
need_rerun_resolve = true;
|
||||
return;
|
||||
need_rerun_resolve |= !new_node->getResultType()->equals(*node->getResultType());
|
||||
node = new_node;
|
||||
}
|
||||
}
|
||||
|
||||
@ -72,15 +165,11 @@ private:
|
||||
const JoinNode * join_node;
|
||||
bool need_rerun_resolve = false;
|
||||
|
||||
/// Returns true if type of some operand is changed and parent function needs to be re-resolved
|
||||
bool tryOptimizeIsNotDistinctOrIsNull(QueryTreeNodePtr & node, const ContextPtr & context)
|
||||
/// Returns optimized node or nullptr if nothing have been changed
|
||||
QueryTreeNodePtr tryOptimizeJoinOnNulls(const QueryTreeNodes & nodes, const ContextPtr & context)
|
||||
{
|
||||
auto & function_node = node->as<FunctionNode &>();
|
||||
chassert(function_node.getFunctionName() == "or");
|
||||
|
||||
|
||||
QueryTreeNodes or_operands;
|
||||
or_operands.reserve(function_node.getArguments().getNodes().size());
|
||||
or_operands.reserve(nodes.size());
|
||||
|
||||
/// Indices of `equals` or `isNotDistinctFrom` functions in the vector above
|
||||
std::vector<size_t> equals_functions_indices;
|
||||
@ -93,47 +182,73 @@ private:
|
||||
* b => [(a IS NULL AND b IS NULL)]
|
||||
* c => [(a IS NULL AND c IS NULL)]
|
||||
* }
|
||||
* Then for each a <=> b we can find all operands that contains both a IS NULL and b IS NULL
|
||||
* Then for each equality a = b we can check if we have operand (a IS NULL AND b IS NULL)
|
||||
*/
|
||||
QueryTreeNodePtrWithHashMap<std::vector<size_t>> is_null_argument_to_indices;
|
||||
|
||||
for (const auto & argument : function_node.getArguments())
|
||||
{
|
||||
or_operands.push_back(argument);
|
||||
bool is_anything_changed = false;
|
||||
|
||||
auto * argument_function = argument->as<FunctionNode>();
|
||||
for (const auto & node : nodes)
|
||||
{
|
||||
if (isBooleanConstant(node, false))
|
||||
{
|
||||
/// Remove false constants from OR
|
||||
is_anything_changed = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
or_operands.push_back(node);
|
||||
auto * argument_function = node->as<FunctionNode>();
|
||||
if (!argument_function)
|
||||
continue;
|
||||
|
||||
const auto & func_name = argument_function->getFunctionName();
|
||||
if (func_name == "equals" || func_name == "isNotDistinctFrom")
|
||||
{
|
||||
const auto & argument_nodes = argument_function->getArguments().getNodes();
|
||||
if (argument_nodes.size() != 2)
|
||||
continue;
|
||||
/// We can rewrite to a <=> b only if we are joining on a and b,
|
||||
/// because the function is not yet implemented for other cases.
|
||||
auto first_src = getExpressionSource(argument_nodes[0]);
|
||||
auto second_src = getExpressionSource(argument_nodes[1]);
|
||||
if (!first_src || !second_src)
|
||||
continue;
|
||||
const auto & lhs_join = *join_node->getLeftTableExpression();
|
||||
const auto & rhs_join = *join_node->getRightTableExpression();
|
||||
bool arguments_from_both_sides = (first_src->isEqual(lhs_join) && second_src->isEqual(rhs_join)) ||
|
||||
(first_src->isEqual(rhs_join) && second_src->isEqual(lhs_join));
|
||||
if (!arguments_from_both_sides)
|
||||
continue;
|
||||
equals_functions_indices.push_back(or_operands.size() - 1);
|
||||
if (isTwoArgumentsFromDifferentSides(*argument_function, *join_node))
|
||||
equals_functions_indices.push_back(or_operands.size() - 1);
|
||||
}
|
||||
else if (func_name == "and")
|
||||
{
|
||||
for (const auto & and_argument : argument_function->getArguments().getNodes())
|
||||
const auto & and_arguments = argument_function->getArguments().getNodes();
|
||||
bool all_are_is_null = and_arguments.size() == 2 && isNodeFunction(and_arguments[0], "isNull") && isNodeFunction(and_arguments[1], "isNull");
|
||||
if (all_are_is_null)
|
||||
{
|
||||
auto * and_argument_function = and_argument->as<FunctionNode>();
|
||||
if (and_argument_function && and_argument_function->getFunctionName() == "isNull")
|
||||
is_null_argument_to_indices[getFunctionArgument(and_arguments.front(), 0)].push_back(or_operands.size() - 1);
|
||||
is_null_argument_to_indices[getFunctionArgument(and_arguments.back(), 0)].push_back(or_operands.size() - 1);
|
||||
}
|
||||
|
||||
/// Expression `a = b AND (a IS NOT NULL) AND true AND (b IS NOT NULL)` we can be replaced with `a = b`
|
||||
/// Even though this expression are not equivalent (first is NULL on NULLs, while second is FALSE),
|
||||
/// it is still correct since for JOIN ON condition NULL is treated as FALSE
|
||||
if (const auto & equals_function = findEqualsFunction(and_arguments))
|
||||
{
|
||||
const auto & equals_arguments = equals_function->as<FunctionNode>()->getArguments().getNodes();
|
||||
/// Expected isNotNull arguments
|
||||
QueryTreeNodePtrWithHashSet allowed_arguments;
|
||||
allowed_arguments.insert(QueryTreeNodePtrWithHash(std::make_shared<ListNode>(QueryTreeNodes{equals_arguments[0]})));
|
||||
allowed_arguments.insert(QueryTreeNodePtrWithHash(std::make_shared<ListNode>(QueryTreeNodes{equals_arguments[1]})));
|
||||
|
||||
bool can_be_optimized = true;
|
||||
for (const auto & and_argument : and_arguments)
|
||||
{
|
||||
const auto & is_null_argument = and_argument_function->getArguments().getNodes()[0];
|
||||
is_null_argument_to_indices[is_null_argument].push_back(or_operands.size() - 1);
|
||||
if (and_argument.get() == equals_function.get())
|
||||
continue;
|
||||
|
||||
if (isOnlyConjunctionOfFunctions(and_argument, "isNotNull", allowed_arguments))
|
||||
continue;
|
||||
|
||||
can_be_optimized = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (can_be_optimized)
|
||||
{
|
||||
is_anything_changed = true;
|
||||
or_operands.pop_back();
|
||||
or_operands.push_back(equals_function);
|
||||
if (isTwoArgumentsFromDifferentSides(equals_function->as<FunctionNode &>(), *join_node))
|
||||
equals_functions_indices.push_back(or_operands.size() - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -144,9 +259,9 @@ private:
|
||||
|
||||
for (size_t equals_function_idx : equals_functions_indices)
|
||||
{
|
||||
auto * equals_function = or_operands[equals_function_idx]->as<FunctionNode>();
|
||||
const auto * equals_function = or_operands[equals_function_idx]->as<FunctionNode>();
|
||||
|
||||
/// For a <=> b we are looking for expressions containing both `a IS NULL` and `b IS NULL` combined with AND
|
||||
/// For a = b we are looking for all expressions `a IS NULL AND b IS NULL`
|
||||
const auto & argument_nodes = equals_function->getArguments().getNodes();
|
||||
const auto & lhs_is_null_parents = is_null_argument_to_indices[argument_nodes[0]];
|
||||
const auto & rhs_is_null_parents = is_null_argument_to_indices[argument_nodes[1]];
|
||||
@ -161,60 +276,40 @@ private:
|
||||
|
||||
for (size_t to_optimize_idx : operands_to_optimize)
|
||||
{
|
||||
/// We are looking for operand `a IS NULL AND b IS NULL AND ...`
|
||||
auto * operand_to_optimize = or_operands[to_optimize_idx]->as<FunctionNode>();
|
||||
|
||||
/// Remove `a IS NULL` and `b IS NULL` arguments from AND
|
||||
QueryTreeNodes new_arguments;
|
||||
for (const auto & and_argument : operand_to_optimize->getArguments().getNodes())
|
||||
{
|
||||
bool to_eliminate = false;
|
||||
|
||||
const auto * and_argument_function = and_argument->as<FunctionNode>();
|
||||
if (and_argument_function && and_argument_function->getFunctionName() == "isNull")
|
||||
{
|
||||
const auto & is_null_argument = and_argument_function->getArguments().getNodes()[0];
|
||||
to_eliminate = (is_null_argument->isEqual(*argument_nodes[0]) || is_null_argument->isEqual(*argument_nodes[1]));
|
||||
}
|
||||
|
||||
if (to_eliminate)
|
||||
arguments_to_reresolve.insert(to_optimize_idx);
|
||||
else
|
||||
new_arguments.emplace_back(and_argument);
|
||||
}
|
||||
/// If less than two arguments left, we will remove or replace the whole AND below
|
||||
operand_to_optimize->getArguments().getNodes() = std::move(new_arguments);
|
||||
/// Remove `a IS NULL AND b IS NULL`
|
||||
or_operands[to_optimize_idx] = nullptr;
|
||||
is_anything_changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (arguments_to_reresolve.empty())
|
||||
if (arguments_to_reresolve.empty() && !is_anything_changed)
|
||||
/// Nothing have been changed
|
||||
return false;
|
||||
return nullptr;
|
||||
|
||||
auto and_function_resolver = FunctionFactory::instance().get("and", context);
|
||||
auto strict_equals_function_resolver = FunctionFactory::instance().get("isNotDistinctFrom", context);
|
||||
|
||||
bool need_reresolve = false;
|
||||
QueryTreeNodes new_or_operands;
|
||||
for (size_t i = 0; i < or_operands.size(); ++i)
|
||||
{
|
||||
if (arguments_to_reresolve.contains(i))
|
||||
{
|
||||
auto * function = or_operands[i]->as<FunctionNode>();
|
||||
const auto * function = or_operands[i]->as<FunctionNode>();
|
||||
if (function->getFunctionName() == "equals")
|
||||
{
|
||||
/// We should replace `a = b` with `a <=> b` because we removed checks for IS NULL
|
||||
need_reresolve |= function->getResultType()->isNullable();
|
||||
function->resolveAsFunction(strict_equals_function_resolver);
|
||||
new_or_operands.emplace_back(std::move(or_operands[i]));
|
||||
auto new_function = or_operands[i]->clone();
|
||||
new_function->as<FunctionNode>()->resolveAsFunction(strict_equals_function_resolver);
|
||||
new_or_operands.emplace_back(std::move(new_function));
|
||||
}
|
||||
else if (function->getFunctionName() == "and")
|
||||
{
|
||||
const auto & and_arguments = function->getArguments().getNodes();
|
||||
if (and_arguments.size() > 1)
|
||||
{
|
||||
function->resolveAsFunction(and_function_resolver);
|
||||
new_or_operands.emplace_back(std::move(or_operands[i]));
|
||||
auto new_function = or_operands[i]->clone();
|
||||
new_function->as<FunctionNode>()->resolveAsFunction(and_function_resolver);
|
||||
new_or_operands.emplace_back(std::move(new_function));
|
||||
}
|
||||
else if (and_arguments.size() == 1)
|
||||
{
|
||||
@ -223,25 +318,26 @@ private:
|
||||
}
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected function name: '{}'", function->getFunctionName());
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected function '{}'", function->getFunctionName());
|
||||
}
|
||||
else
|
||||
else if (or_operands[i])
|
||||
{
|
||||
new_or_operands.emplace_back(std::move(or_operands[i]));
|
||||
}
|
||||
}
|
||||
|
||||
if (new_or_operands.empty())
|
||||
return nullptr;
|
||||
|
||||
if (new_or_operands.size() == 1)
|
||||
{
|
||||
node = std::move(new_or_operands[0]);
|
||||
return need_reresolve;
|
||||
}
|
||||
return new_or_operands[0];
|
||||
|
||||
/// Rebuild OR function
|
||||
auto or_function_resolver = FunctionFactory::instance().get("or", context);
|
||||
function_node.getArguments().getNodes() = std::move(new_or_operands);
|
||||
function_node.resolveAsFunction(or_function_resolver);
|
||||
return need_reresolve;
|
||||
auto function_node = std::make_shared<FunctionNode>("or");
|
||||
function_node->getArguments().getNodes() = std::move(new_or_operands);
|
||||
function_node->resolveAsFunction(or_function_resolver);
|
||||
return function_node;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -940,6 +940,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildJoinTree(const ASTPtr & tables_in_select
|
||||
table_join.locality,
|
||||
result_join_strictness,
|
||||
result_join_kind);
|
||||
join_node->setOriginalAST(table_element.table_join);
|
||||
|
||||
/** Original AST is not set because it will contain only join part and does
|
||||
* not include left table expression.
|
||||
|
@ -3,6 +3,8 @@
|
||||
#include <Backups/BackupStatus.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
|
||||
#include <exception>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
|
@ -4,7 +4,9 @@
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/DataTypeVariant.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeNothing.h>
|
||||
#include <DataTypes/FieldToDataType.h>
|
||||
#include <DataTypes/DataTypesBinaryEncoding.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||
@ -481,7 +483,7 @@ StringRef ColumnDynamic::serializeValueIntoArena(size_t n, DB::Arena & arena, co
|
||||
/// We cannot use Variant serialization here as it serializes discriminator + value,
|
||||
/// but Dynamic doesn't have fixed mapping discriminator <-> variant type
|
||||
/// as different Dynamic column can have different Variants.
|
||||
/// Instead, we serialize null bit + variant type name (size + bytes) + value.
|
||||
/// Instead, we serialize null bit + variant type in binary format (size + bytes) + value.
|
||||
const auto & variant_col = assert_cast<const ColumnVariant &>(*variant_column);
|
||||
auto discr = variant_col.globalDiscriminatorAt(n);
|
||||
StringRef res;
|
||||
@ -495,14 +497,15 @@ StringRef ColumnDynamic::serializeValueIntoArena(size_t n, DB::Arena & arena, co
|
||||
return res;
|
||||
}
|
||||
|
||||
const auto & variant_name = variant_info.variant_names[discr];
|
||||
size_t variant_name_size = variant_name.size();
|
||||
char * pos = arena.allocContinue(sizeof(UInt8) + sizeof(size_t) + variant_name.size(), begin);
|
||||
const auto & variant_type = assert_cast<const DataTypeVariant &>(*variant_info.variant_type).getVariant(discr);
|
||||
String variant_type_binary_data = encodeDataType(variant_type);
|
||||
size_t variant_type_binary_data_size = variant_type_binary_data.size();
|
||||
char * pos = arena.allocContinue(sizeof(UInt8) + sizeof(size_t) + variant_type_binary_data.size(), begin);
|
||||
memcpy(pos, &null_bit, sizeof(UInt8));
|
||||
memcpy(pos + sizeof(UInt8), &variant_name_size, sizeof(size_t));
|
||||
memcpy(pos + sizeof(UInt8) + sizeof(size_t), variant_name.data(), variant_name.size());
|
||||
memcpy(pos + sizeof(UInt8), &variant_type_binary_data_size, sizeof(size_t));
|
||||
memcpy(pos + sizeof(UInt8) + sizeof(size_t), variant_type_binary_data.data(), variant_type_binary_data.size());
|
||||
res.data = pos;
|
||||
res.size = sizeof(UInt8) + sizeof(size_t) + variant_name.size();
|
||||
res.size = sizeof(UInt8) + sizeof(size_t) + variant_type_binary_data.size();
|
||||
|
||||
auto value_ref = variant_col.getVariantByGlobalDiscriminator(discr).serializeValueIntoArena(variant_col.offsetAt(n), arena, begin);
|
||||
res.data = value_ref.data - res.size;
|
||||
@ -521,13 +524,15 @@ const char * ColumnDynamic::deserializeAndInsertFromArena(const char * pos)
|
||||
return pos;
|
||||
}
|
||||
|
||||
/// Read variant type name.
|
||||
const size_t variant_name_size = unalignedLoad<size_t>(pos);
|
||||
pos += sizeof(variant_name_size);
|
||||
String variant_name;
|
||||
variant_name.resize(variant_name_size);
|
||||
memcpy(variant_name.data(), pos, variant_name_size);
|
||||
pos += variant_name_size;
|
||||
/// Read variant type in binary format.
|
||||
const size_t variant_type_binary_data_size = unalignedLoad<size_t>(pos);
|
||||
pos += sizeof(variant_type_binary_data_size);
|
||||
String variant_type_binary_data;
|
||||
variant_type_binary_data.resize(variant_type_binary_data_size);
|
||||
memcpy(variant_type_binary_data.data(), pos, variant_type_binary_data_size);
|
||||
pos += variant_type_binary_data_size;
|
||||
auto variant_type = decodeDataType(variant_type_binary_data);
|
||||
auto variant_name = variant_type->getName();
|
||||
/// If we already have such variant, just deserialize it into corresponding variant column.
|
||||
auto it = variant_info.variant_name_to_discriminator.find(variant_name);
|
||||
if (it != variant_info.variant_name_to_discriminator.end())
|
||||
@ -537,7 +542,6 @@ const char * ColumnDynamic::deserializeAndInsertFromArena(const char * pos)
|
||||
}
|
||||
|
||||
/// If we don't have such variant, add it.
|
||||
auto variant_type = DataTypeFactory::instance().get(variant_name);
|
||||
if (likely(addNewVariant(variant_type)))
|
||||
{
|
||||
auto discr = variant_info.variant_name_to_discriminator[variant_name];
|
||||
@ -563,13 +567,13 @@ const char * ColumnDynamic::skipSerializedInArena(const char * pos) const
|
||||
if (null_bit)
|
||||
return pos;
|
||||
|
||||
const size_t variant_name_size = unalignedLoad<size_t>(pos);
|
||||
pos += sizeof(variant_name_size);
|
||||
String variant_name;
|
||||
variant_name.resize(variant_name_size);
|
||||
memcpy(variant_name.data(), pos, variant_name_size);
|
||||
pos += variant_name_size;
|
||||
auto tmp_variant_column = DataTypeFactory::instance().get(variant_name)->createColumn();
|
||||
const size_t variant_type_binary_data_size = unalignedLoad<size_t>(pos);
|
||||
pos += sizeof(variant_type_binary_data_size);
|
||||
String variant_type_binary_data;
|
||||
variant_type_binary_data.resize(variant_type_binary_data_size);
|
||||
memcpy(variant_type_binary_data.data(), pos, variant_type_binary_data_size);
|
||||
pos += variant_type_binary_data_size;
|
||||
auto tmp_variant_column = decodeDataType(variant_type_binary_data)->createColumn();
|
||||
return tmp_variant_column->skipSerializedInArena(pos);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <memory>
|
||||
|
||||
#include <base/types.h>
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
389
src/Common/FieldBinaryEncoding.cpp
Normal file
389
src/Common/FieldBinaryEncoding.cpp
Normal file
@ -0,0 +1,389 @@
|
||||
#include <Common/FieldBinaryEncoding.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNSUPPORTED_METHOD;
|
||||
extern const int INCORRECT_DATA;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
enum class FieldBinaryTypeIndex: uint8_t
|
||||
{
|
||||
Null = 0x00,
|
||||
UInt64 = 0x01,
|
||||
Int64 = 0x02,
|
||||
UInt128 = 0x03,
|
||||
Int128 = 0x04,
|
||||
UInt256 = 0x05,
|
||||
Int256 = 0x06,
|
||||
Float64 = 0x07,
|
||||
Decimal32 = 0x08,
|
||||
Decimal64 = 0x09,
|
||||
Decimal128 = 0x0A,
|
||||
Decimal256 = 0x0B,
|
||||
String = 0x0C,
|
||||
Array = 0x0D,
|
||||
Tuple = 0x0E,
|
||||
Map = 0x0F,
|
||||
IPv4 = 0x10,
|
||||
IPv6 = 0x11,
|
||||
UUID = 0x12,
|
||||
Bool = 0x13,
|
||||
Object = 0x14,
|
||||
AggregateFunctionState = 0x15,
|
||||
|
||||
NegativeInfinity = 0xFE,
|
||||
PositiveInfinity = 0xFF,
|
||||
};
|
||||
|
||||
class FieldVisitorEncodeBinary
|
||||
{
|
||||
public:
|
||||
void operator() (const Null & x, WriteBuffer & buf) const;
|
||||
void operator() (const UInt64 & x, WriteBuffer & buf) const;
|
||||
void operator() (const UInt128 & x, WriteBuffer & buf) const;
|
||||
void operator() (const UInt256 & x, WriteBuffer & buf) const;
|
||||
void operator() (const Int64 & x, WriteBuffer & buf) const;
|
||||
void operator() (const Int128 & x, WriteBuffer & buf) const;
|
||||
void operator() (const Int256 & x, WriteBuffer & buf) const;
|
||||
void operator() (const UUID & x, WriteBuffer & buf) const;
|
||||
void operator() (const IPv4 & x, WriteBuffer & buf) const;
|
||||
void operator() (const IPv6 & x, WriteBuffer & buf) const;
|
||||
void operator() (const Float64 & x, WriteBuffer & buf) const;
|
||||
void operator() (const String & x, WriteBuffer & buf) const;
|
||||
void operator() (const Array & x, WriteBuffer & buf) const;
|
||||
void operator() (const Tuple & x, WriteBuffer & buf) const;
|
||||
void operator() (const Map & x, WriteBuffer & buf) const;
|
||||
void operator() (const Object & x, WriteBuffer & buf) const;
|
||||
void operator() (const DecimalField<Decimal32> & x, WriteBuffer & buf) const;
|
||||
void operator() (const DecimalField<Decimal64> & x, WriteBuffer & buf) const;
|
||||
void operator() (const DecimalField<Decimal128> & x, WriteBuffer & buf) const;
|
||||
void operator() (const DecimalField<Decimal256> & x, WriteBuffer & buf) const;
|
||||
void operator() (const AggregateFunctionStateData & x, WriteBuffer & buf) const;
|
||||
[[noreturn]] void operator() (const CustomType & x, WriteBuffer & buf) const;
|
||||
void operator() (const bool & x, WriteBuffer & buf) const;
|
||||
};
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const Null & x, WriteBuffer & buf) const
|
||||
{
|
||||
if (x.isNull())
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::Null), buf);
|
||||
else if (x.isPositiveInfinity())
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::PositiveInfinity), buf);
|
||||
else if (x.isNegativeInfinity())
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::NegativeInfinity), buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const UInt64 & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::UInt64), buf);
|
||||
writeVarUInt(x, buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const Int64 & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::Int64), buf);
|
||||
writeVarInt(x, buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const Float64 & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::Float64), buf);
|
||||
writeBinaryLittleEndian(x, buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const String & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::String), buf);
|
||||
writeStringBinary(x, buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const UInt128 & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::UInt128), buf);
|
||||
writeBinaryLittleEndian(x, buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const Int128 & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::Int128), buf);
|
||||
writeBinaryLittleEndian(x, buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const UInt256 & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::UInt256), buf);
|
||||
writeBinaryLittleEndian(x, buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const Int256 & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::Int256), buf);
|
||||
writeBinaryLittleEndian(x, buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const UUID & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::UUID), buf);
|
||||
writeBinaryLittleEndian(x, buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const IPv4 & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::IPv4), buf);
|
||||
writeBinaryLittleEndian(x, buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const IPv6 & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::IPv6), buf);
|
||||
writeBinaryLittleEndian(x, buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const DecimalField<Decimal32> & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::Decimal32), buf);
|
||||
writeVarUInt(x.getScale(), buf);
|
||||
writeBinaryLittleEndian(x.getValue(), buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const DecimalField<Decimal64> & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::Decimal64), buf);
|
||||
writeVarUInt(x.getScale(), buf);
|
||||
writeBinaryLittleEndian(x.getValue(), buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const DecimalField<Decimal128> & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::Decimal128), buf);
|
||||
writeVarUInt(x.getScale(), buf);
|
||||
writeBinaryLittleEndian(x.getValue(), buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const DecimalField<Decimal256> & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::Decimal256), buf);
|
||||
writeVarUInt(x.getScale(), buf);
|
||||
writeBinaryLittleEndian(x.getValue(), buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const AggregateFunctionStateData & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::AggregateFunctionState), buf);
|
||||
writeStringBinary(x.name, buf);
|
||||
writeStringBinary(x.data, buf);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const Array & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::Array), buf);
|
||||
size_t size = x.size();
|
||||
writeVarUInt(size, buf);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, x[i]);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const Tuple & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::Tuple), buf);
|
||||
size_t size = x.size();
|
||||
writeVarUInt(size, buf);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, x[i]);
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const Map & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::Map), buf);
|
||||
size_t size = x.size();
|
||||
writeVarUInt(size, buf);
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
const Tuple & key_and_value = x[i].get<Tuple>();
|
||||
Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, key_and_value[0]);
|
||||
Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, key_and_value[1]);
|
||||
}
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator() (const Object & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::Object), buf);
|
||||
|
||||
size_t size = x.size();
|
||||
writeVarUInt(size, buf);
|
||||
for (const auto & [key, value] : x)
|
||||
{
|
||||
writeStringBinary(key, buf);
|
||||
Field::dispatch([&buf] (const auto & val) { FieldVisitorEncodeBinary()(val, buf); }, value);
|
||||
}
|
||||
}
|
||||
|
||||
void FieldVisitorEncodeBinary::operator()(const bool & x, WriteBuffer & buf) const
|
||||
{
|
||||
writeBinary(UInt8(FieldBinaryTypeIndex::Bool), buf);
|
||||
writeBinary(static_cast<UInt8>(x), buf);
|
||||
}
|
||||
|
||||
[[noreturn]] void FieldVisitorEncodeBinary::operator()(const CustomType &, WriteBuffer &) const
|
||||
{
|
||||
/// TODO: Support binary encoding/decoding for custom types somehow.
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Binary encoding of Field with custom type is not supported");
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
Field decodeBigInteger(ReadBuffer & buf)
|
||||
{
|
||||
T value;
|
||||
readBinaryLittleEndian(value, buf);
|
||||
return value;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
DecimalField<T> decodeDecimal(ReadBuffer & buf)
|
||||
{
|
||||
UInt32 scale;
|
||||
readVarUInt(scale, buf);
|
||||
T value;
|
||||
readBinaryLittleEndian(value, buf);
|
||||
return DecimalField<T>(value, scale);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T decodeValueLittleEndian(ReadBuffer & buf)
|
||||
{
|
||||
T value;
|
||||
readBinaryLittleEndian(value, buf);
|
||||
return value;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T decodeArrayLikeField(ReadBuffer & buf)
|
||||
{
|
||||
size_t size;
|
||||
readVarUInt(size, buf);
|
||||
T value;
|
||||
for (size_t i = 0; i != size; ++i)
|
||||
value.push_back(decodeField(buf));
|
||||
return value;
|
||||
}
|
||||
|
||||
}
|
||||
void encodeField(const Field & x, WriteBuffer & buf)
|
||||
{
|
||||
Field::dispatch([&buf] (const auto & val) { FieldVisitorEncodeBinary()(val, buf); }, x);
|
||||
}
|
||||
|
||||
Field decodeField(ReadBuffer & buf)
|
||||
{
|
||||
UInt8 type;
|
||||
readBinary(type, buf);
|
||||
switch (FieldBinaryTypeIndex(type))
|
||||
{
|
||||
case FieldBinaryTypeIndex::Null:
|
||||
return Null();
|
||||
case FieldBinaryTypeIndex::PositiveInfinity:
|
||||
return POSITIVE_INFINITY;
|
||||
case FieldBinaryTypeIndex::NegativeInfinity:
|
||||
return NEGATIVE_INFINITY;
|
||||
case FieldBinaryTypeIndex::Int64:
|
||||
{
|
||||
Int64 value;
|
||||
readVarInt(value, buf);
|
||||
return value;
|
||||
}
|
||||
case FieldBinaryTypeIndex::UInt64:
|
||||
{
|
||||
UInt64 value;
|
||||
readVarUInt(value, buf);
|
||||
return value;
|
||||
}
|
||||
case FieldBinaryTypeIndex::Int128:
|
||||
return decodeBigInteger<Int128>(buf);
|
||||
case FieldBinaryTypeIndex::UInt128:
|
||||
return decodeBigInteger<UInt128>(buf);
|
||||
case FieldBinaryTypeIndex::Int256:
|
||||
return decodeBigInteger<Int256>(buf);
|
||||
case FieldBinaryTypeIndex::UInt256:
|
||||
return decodeBigInteger<UInt256>(buf);
|
||||
case FieldBinaryTypeIndex::Float64:
|
||||
return decodeValueLittleEndian<Float64>(buf);
|
||||
case FieldBinaryTypeIndex::Decimal32:
|
||||
return decodeDecimal<Decimal32>(buf);
|
||||
case FieldBinaryTypeIndex::Decimal64:
|
||||
return decodeDecimal<Decimal64>(buf);
|
||||
case FieldBinaryTypeIndex::Decimal128:
|
||||
return decodeDecimal<Decimal128>(buf);
|
||||
case FieldBinaryTypeIndex::Decimal256:
|
||||
return decodeDecimal<Decimal256>(buf);
|
||||
case FieldBinaryTypeIndex::String:
|
||||
{
|
||||
String value;
|
||||
readStringBinary(value, buf);
|
||||
return value;
|
||||
}
|
||||
case FieldBinaryTypeIndex::UUID:
|
||||
return decodeValueLittleEndian<UUID>(buf);
|
||||
case FieldBinaryTypeIndex::IPv4:
|
||||
return decodeValueLittleEndian<IPv4>(buf);
|
||||
case FieldBinaryTypeIndex::IPv6:
|
||||
return decodeValueLittleEndian<IPv6>(buf);
|
||||
case FieldBinaryTypeIndex::Bool:
|
||||
{
|
||||
bool value;
|
||||
readBinary(value, buf);
|
||||
return value;
|
||||
}
|
||||
case FieldBinaryTypeIndex::Array:
|
||||
return decodeArrayLikeField<Array>(buf);
|
||||
case FieldBinaryTypeIndex::Tuple:
|
||||
return decodeArrayLikeField<Tuple>(buf);
|
||||
case FieldBinaryTypeIndex::Map:
|
||||
{
|
||||
size_t size;
|
||||
readVarUInt(size, buf);
|
||||
Map map;
|
||||
for (size_t i = 0; i != size; ++i)
|
||||
{
|
||||
Tuple key_and_value;
|
||||
key_and_value.push_back(decodeField(buf));
|
||||
key_and_value.push_back(decodeField(buf));
|
||||
map.push_back(key_and_value);
|
||||
}
|
||||
return map;
|
||||
}
|
||||
case FieldBinaryTypeIndex::Object:
|
||||
{
|
||||
size_t size;
|
||||
readVarUInt(size, buf);
|
||||
Object value;
|
||||
for (size_t i = 0; i != size; ++i)
|
||||
{
|
||||
String name;
|
||||
readStringBinary(name, buf);
|
||||
value[name] = decodeField(buf);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
case FieldBinaryTypeIndex::AggregateFunctionState:
|
||||
{
|
||||
String name;
|
||||
readStringBinary(name, buf);
|
||||
String data;
|
||||
readStringBinary(data, buf);
|
||||
return AggregateFunctionStateData{.name = name, .data = data};
|
||||
}
|
||||
}
|
||||
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Unknown Field type: {0:#04x}", UInt64(type));
|
||||
}
|
||||
|
||||
}
|
43
src/Common/FieldBinaryEncoding.h
Normal file
43
src/Common/FieldBinaryEncoding.h
Normal file
@ -0,0 +1,43 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/Field.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/**
|
||||
Binary encoding for Fields:
|
||||
|--------------------------|--------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Field type | Binary encoding |
|
||||
|--------------------------|--------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `Null` | `0x00` |
|
||||
| `UInt64` | `0x01<var_uint_value>` |
|
||||
| `Int64` | `0x02<var_int_value>` |
|
||||
| `UInt128` | `0x03<uint128_little_endian_value>` |
|
||||
| `Int128` | `0x04<int128_little_endian_value>` |
|
||||
| `UInt128` | `0x05<uint128_little_endian_value>` |
|
||||
| `Int128` | `0x06<int128_little_endian_value>` |
|
||||
| `Float64` | `0x07<float64_little_endian_value>` |
|
||||
| `Decimal32` | `0x08<var_uint_scale><int32_little_endian_value>` |
|
||||
| `Decimal64` | `0x09<var_uint_scale><int64_little_endian_value>` |
|
||||
| `Decimal128` | `0x0A<var_uint_scale><int128_little_endian_value>` |
|
||||
| `Decimal256` | `0x0B<var_uint_scale><int256_little_endian_value>` |
|
||||
| `String` | `0x0C<var_uint_size><data>` |
|
||||
| `Array` | `0x0D<var_uint_size><value_encoding_1>...<value_encoding_N>` |
|
||||
| `Tuple` | `0x0E<var_uint_size><value_encoding_1>...<value_encoding_N>` |
|
||||
| `Map` | `0x0F<var_uint_size><key_encoding_1><value_encoding_1>...<key_endoding_N><value_encoding_N>` |
|
||||
| `IPv4` | `0x10<uint32_little_endian_value>` |
|
||||
| `IPv6` | `0x11<uint128_little_endian_value>` |
|
||||
| `UUID` | `0x12<uuid_value>` |
|
||||
| `Bool` | `0x13<bool_value>` |
|
||||
| `Object` | `0x14<var_uint_size><var_uint_key_size_1><key_data_1><value_encoding_1>...<var_uint_key_size_N><key_data_N><value_encoding_N>` |
|
||||
| `AggregateFunctionState` | `0x15<var_uint_name_size><name_data><var_uint_data_size><data>` |
|
||||
| `Negative infinity` | `0xFE` |
|
||||
| `Positive infinity` | `0xFF` |
|
||||
|--------------------------|--------------------------------------------------------------------------------------------------------------------------------|
|
||||
*/
|
||||
|
||||
void encodeField(const Field &, WriteBuffer & buf);
|
||||
Field decodeField(ReadBuffer & buf);
|
||||
|
||||
}
|
@ -7,19 +7,20 @@ namespace DB
|
||||
/// Kind of a temporal interval.
|
||||
struct IntervalKind
|
||||
{
|
||||
/// note: The order and numbers are important and used in binary encoding, append new interval kinds to the end of list.
|
||||
enum class Kind : uint8_t
|
||||
{
|
||||
Nanosecond,
|
||||
Microsecond,
|
||||
Millisecond,
|
||||
Second,
|
||||
Minute,
|
||||
Hour,
|
||||
Day,
|
||||
Week,
|
||||
Month,
|
||||
Quarter,
|
||||
Year,
|
||||
Nanosecond = 0x00,
|
||||
Microsecond = 0x01,
|
||||
Millisecond = 0x02,
|
||||
Second = 0x03,
|
||||
Minute = 0x04,
|
||||
Hour = 0x05,
|
||||
Day = 0x06,
|
||||
Week = 0x07,
|
||||
Month = 0x08,
|
||||
Quarter = 0x09,
|
||||
Year = 0x0A,
|
||||
};
|
||||
Kind kind = Kind::Second;
|
||||
|
||||
|
@ -568,6 +568,7 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(AggregationPreallocatedElementsInHashTables, "How many elements were preallocated in hash tables for aggregation.") \
|
||||
M(AggregationHashTablesInitializedAsTwoLevel, "How many hash tables were inited as two-level for aggregation.") \
|
||||
M(AggregationOptimizedEqualRangesOfKeys, "For how many blocks optimization of equal ranges of keys was applied") \
|
||||
M(HashJoinPreallocatedElementsInHashTables, "How many elements were preallocated in hash tables for hash join.") \
|
||||
\
|
||||
M(MetadataFromKeeperCacheHit, "Number of times an object storage metadata request was answered from cache without making request to Keeper") \
|
||||
M(MetadataFromKeeperCacheMiss, "Number of times an object storage metadata request had to be answered from Keeper") \
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <mutex>
|
||||
|
@ -31,8 +31,10 @@ target_link_libraries (arena_with_free_lists PRIVATE dbms)
|
||||
clickhouse_add_executable (lru_hash_map_perf lru_hash_map_perf.cpp)
|
||||
target_link_libraries (lru_hash_map_perf PRIVATE dbms)
|
||||
|
||||
clickhouse_add_executable (thread_creation_latency thread_creation_latency.cpp)
|
||||
target_link_libraries (thread_creation_latency PRIVATE clickhouse_common_io)
|
||||
if (OS_LINUX)
|
||||
clickhouse_add_executable (thread_creation_latency thread_creation_latency.cpp)
|
||||
target_link_libraries (thread_creation_latency PRIVATE clickhouse_common_io)
|
||||
endif()
|
||||
|
||||
clickhouse_add_executable (array_cache array_cache.cpp)
|
||||
target_link_libraries (array_cache PRIVATE clickhouse_common_io)
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Common/ConcurrentBoundedQueue.h>
|
||||
|
||||
#include <map>
|
||||
#include <variant>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <future>
|
||||
|
@ -2,9 +2,11 @@
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <boost/noncopyable.hpp>
|
||||
|
||||
namespace DB
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <base/hex.h>
|
||||
#include "Common/ZooKeeper/IKeeper.h"
|
||||
#include "Common/ZooKeeper/ZooKeeperCommon.h"
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Common/ZooKeeper/KeeperException.h>
|
||||
#include <Common/checkStackSize.h>
|
||||
@ -320,7 +321,7 @@ void KeeperDispatcher::responseThread()
|
||||
|
||||
try
|
||||
{
|
||||
setResponse(response_for_session.session_id, response_for_session.response);
|
||||
setResponse(response_for_session.session_id, response_for_session.response, response_for_session.request);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -355,7 +356,7 @@ void KeeperDispatcher::snapshotThread()
|
||||
}
|
||||
}
|
||||
|
||||
void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response)
|
||||
void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response, Coordination::ZooKeeperRequestPtr request)
|
||||
{
|
||||
std::lock_guard lock(session_to_response_callback_mutex);
|
||||
|
||||
@ -369,7 +370,7 @@ void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKe
|
||||
return;
|
||||
|
||||
auto callback = new_session_id_response_callback[session_id_resp.internal_id];
|
||||
callback(response);
|
||||
callback(response, request);
|
||||
new_session_id_response_callback.erase(session_id_resp.internal_id);
|
||||
}
|
||||
else /// Normal response, just write to client
|
||||
@ -380,7 +381,7 @@ void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKe
|
||||
if (session_response_callback == session_to_response_callback.end())
|
||||
return;
|
||||
|
||||
session_response_callback->second(response);
|
||||
session_response_callback->second(response, request);
|
||||
|
||||
/// Session closed, no more writes
|
||||
if (response->xid != Coordination::WATCH_XID && response->getOpNum() == Coordination::OpNum::Close)
|
||||
@ -771,21 +772,27 @@ int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms)
|
||||
|
||||
{
|
||||
std::lock_guard lock(session_to_response_callback_mutex);
|
||||
new_session_id_response_callback[request->internal_id] = [promise, internal_id = request->internal_id] (const Coordination::ZooKeeperResponsePtr & response)
|
||||
new_session_id_response_callback[request->internal_id]
|
||||
= [promise, internal_id = request->internal_id](
|
||||
const Coordination::ZooKeeperResponsePtr & response, Coordination::ZooKeeperRequestPtr /*request*/)
|
||||
{
|
||||
if (response->getOpNum() != Coordination::OpNum::SessionID)
|
||||
promise->set_exception(std::make_exception_ptr(Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Incorrect response of type {} instead of SessionID response", response->getOpNum())));
|
||||
promise->set_exception(std::make_exception_ptr(Exception(
|
||||
ErrorCodes::LOGICAL_ERROR, "Incorrect response of type {} instead of SessionID response", response->getOpNum())));
|
||||
|
||||
auto session_id_response = dynamic_cast<const Coordination::ZooKeeperSessionIDResponse &>(*response);
|
||||
if (session_id_response.internal_id != internal_id)
|
||||
{
|
||||
promise->set_exception(std::make_exception_ptr(Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Incorrect response with internal id {} instead of {}", session_id_response.internal_id, internal_id)));
|
||||
promise->set_exception(std::make_exception_ptr(Exception(
|
||||
ErrorCodes::LOGICAL_ERROR,
|
||||
"Incorrect response with internal id {} instead of {}",
|
||||
session_id_response.internal_id,
|
||||
internal_id)));
|
||||
}
|
||||
|
||||
if (response->error != Coordination::Error::ZOK)
|
||||
promise->set_exception(std::make_exception_ptr(zkutil::KeeperException::fromMessage(response->error, "SessionID request failed with error")));
|
||||
promise->set_exception(
|
||||
std::make_exception_ptr(zkutil::KeeperException::fromMessage(response->error, "SessionID request failed with error")));
|
||||
|
||||
promise->set_value(session_id_response.session_id);
|
||||
};
|
||||
|
@ -20,7 +20,7 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
using ZooKeeperResponseCallback = std::function<void(const Coordination::ZooKeeperResponsePtr & response)>;
|
||||
using ZooKeeperResponseCallback = std::function<void(const Coordination::ZooKeeperResponsePtr & response, Coordination::ZooKeeperRequestPtr request)>;
|
||||
|
||||
/// Highlevel wrapper for ClickHouse Keeper.
|
||||
/// Process user requests via consensus and return responses.
|
||||
@ -92,7 +92,7 @@ private:
|
||||
void clusterUpdateWithReconfigDisabledThread();
|
||||
void clusterUpdateThread();
|
||||
|
||||
void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response);
|
||||
void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response, Coordination::ZooKeeperRequestPtr request = nullptr);
|
||||
|
||||
/// Add error responses for requests to responses queue.
|
||||
/// Clears requests.
|
||||
|
@ -407,7 +407,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
||||
if (!keeper_context->localLogsPreprocessed() && !preprocess(*request_for_session))
|
||||
return nullptr;
|
||||
|
||||
auto try_push = [&](const KeeperStorage::ResponseForSession& response)
|
||||
auto try_push = [&](const KeeperStorage::ResponseForSession & response)
|
||||
{
|
||||
if (!responses_queue.push(response))
|
||||
{
|
||||
@ -416,17 +416,6 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
||||
"Failed to push response with session id {} to the queue, probably because of shutdown",
|
||||
response.session_id);
|
||||
}
|
||||
|
||||
using namespace std::chrono;
|
||||
uint64_t elapsed = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count() - request_for_session->time;
|
||||
if (elapsed > keeper_context->getCoordinationSettings()->log_slow_total_threshold_ms)
|
||||
{
|
||||
LOG_INFO(
|
||||
log,
|
||||
"Total time to process a request took too long ({}ms).\nRequest info: {}",
|
||||
elapsed,
|
||||
request_for_session->request->toString(/*short_format=*/true));
|
||||
}
|
||||
};
|
||||
|
||||
try
|
||||
@ -443,6 +432,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
||||
KeeperStorage::ResponseForSession response_for_session;
|
||||
response_for_session.session_id = -1;
|
||||
response_for_session.response = response;
|
||||
response_for_session.request = request_for_session->request;
|
||||
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
session_id = storage->getSessionID(session_id_request.session_timeout_ms);
|
||||
@ -462,8 +452,14 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
KeeperStorage::ResponsesForSessions responses_for_sessions
|
||||
= storage->processRequest(request_for_session->request, request_for_session->session_id, request_for_session->zxid);
|
||||
|
||||
for (auto & response_for_session : responses_for_sessions)
|
||||
{
|
||||
if (response_for_session.response->xid != Coordination::WATCH_XID)
|
||||
response_for_session.request = request_for_session->request;
|
||||
|
||||
try_push(response_for_session);
|
||||
}
|
||||
|
||||
if (keeper_context->digestEnabled() && request_for_session->digest)
|
||||
assertDigest(*request_for_session->digest, storage->getNodesDigest(true), *request_for_session->request, request_for_session->log_idx, true);
|
||||
@ -797,9 +793,14 @@ void KeeperStateMachine::processReadRequest(const KeeperStorage::RequestForSessi
|
||||
LockGuardWithStats lock(storage_and_responses_lock);
|
||||
auto responses = storage->processRequest(
|
||||
request_for_session.request, request_for_session.session_id, std::nullopt, true /*check_acl*/, true /*is_local*/);
|
||||
for (const auto & response : responses)
|
||||
if (!responses_queue.push(response))
|
||||
LOG_WARNING(log, "Failed to push response with session id {} to the queue, probably because of shutdown", response.session_id);
|
||||
|
||||
for (auto & response_for_session : responses)
|
||||
{
|
||||
if (response_for_session.response->xid != Coordination::WATCH_XID)
|
||||
response_for_session.request = request_for_session.request;
|
||||
if (!responses_queue.push(response_for_session))
|
||||
LOG_WARNING(log, "Failed to push response with session id {} to the queue, probably because of shutdown", response_for_session.session_id);
|
||||
}
|
||||
}
|
||||
|
||||
void KeeperStateMachine::shutdownStorage()
|
||||
|
@ -206,6 +206,7 @@ public:
|
||||
{
|
||||
int64_t session_id;
|
||||
Coordination::ZooKeeperResponsePtr response;
|
||||
Coordination::ZooKeeperRequestPtr request = nullptr;
|
||||
};
|
||||
using ResponsesForSessions = std::vector<ResponseForSession>;
|
||||
|
||||
|
@ -27,7 +27,8 @@ PoolWithFailover::PoolWithFailover(
|
||||
size_t pool_size,
|
||||
size_t pool_wait_timeout_,
|
||||
size_t max_tries_,
|
||||
bool auto_close_connection_)
|
||||
bool auto_close_connection_,
|
||||
size_t connection_attempt_timeout_)
|
||||
: pool_wait_timeout(pool_wait_timeout_)
|
||||
, max_tries(max_tries_)
|
||||
, auto_close_connection(auto_close_connection_)
|
||||
@ -39,8 +40,13 @@ PoolWithFailover::PoolWithFailover(
|
||||
{
|
||||
for (const auto & replica_configuration : configurations)
|
||||
{
|
||||
auto connection_info = formatConnectionString(replica_configuration.database,
|
||||
replica_configuration.host, replica_configuration.port, replica_configuration.username, replica_configuration.password);
|
||||
auto connection_info = formatConnectionString(
|
||||
replica_configuration.database,
|
||||
replica_configuration.host,
|
||||
replica_configuration.port,
|
||||
replica_configuration.username,
|
||||
replica_configuration.password,
|
||||
connection_attempt_timeout_);
|
||||
replicas_with_priority[priority].emplace_back(connection_info, pool_size);
|
||||
}
|
||||
}
|
||||
@ -51,7 +57,8 @@ PoolWithFailover::PoolWithFailover(
|
||||
size_t pool_size,
|
||||
size_t pool_wait_timeout_,
|
||||
size_t max_tries_,
|
||||
bool auto_close_connection_)
|
||||
bool auto_close_connection_,
|
||||
size_t connection_attempt_timeout_)
|
||||
: pool_wait_timeout(pool_wait_timeout_)
|
||||
, max_tries(max_tries_)
|
||||
, auto_close_connection(auto_close_connection_)
|
||||
@ -63,7 +70,13 @@ PoolWithFailover::PoolWithFailover(
|
||||
for (const auto & [host, port] : configuration.addresses)
|
||||
{
|
||||
LOG_DEBUG(getLogger("PostgreSQLPoolWithFailover"), "Adding address host: {}, port: {} to connection pool", host, port);
|
||||
auto connection_string = formatConnectionString(configuration.database, host, port, configuration.username, configuration.password);
|
||||
auto connection_string = formatConnectionString(
|
||||
configuration.database,
|
||||
host,
|
||||
port,
|
||||
configuration.username,
|
||||
configuration.password,
|
||||
connection_attempt_timeout_);
|
||||
replicas_with_priority[0].emplace_back(connection_string, pool_size);
|
||||
}
|
||||
}
|
||||
|
@ -14,7 +14,6 @@
|
||||
|
||||
static constexpr inline auto POSTGRESQL_POOL_DEFAULT_SIZE = 16;
|
||||
static constexpr inline auto POSTGRESQL_POOL_WAIT_TIMEOUT = 5000;
|
||||
static constexpr inline auto POSTGRESQL_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES = 2;
|
||||
|
||||
namespace postgres
|
||||
{
|
||||
@ -30,14 +29,16 @@ public:
|
||||
size_t pool_size,
|
||||
size_t pool_wait_timeout,
|
||||
size_t max_tries_,
|
||||
bool auto_close_connection_);
|
||||
bool auto_close_connection_,
|
||||
size_t connection_attempt_timeout_);
|
||||
|
||||
explicit PoolWithFailover(
|
||||
const DB::StoragePostgreSQL::Configuration & configuration,
|
||||
size_t pool_size,
|
||||
size_t pool_wait_timeout,
|
||||
size_t max_tries_,
|
||||
bool auto_close_connection_);
|
||||
bool auto_close_connection_,
|
||||
size_t connection_attempt_timeout_);
|
||||
|
||||
PoolWithFailover(const PoolWithFailover & other) = delete;
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
namespace postgres
|
||||
{
|
||||
|
||||
ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password)
|
||||
ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password, UInt64 timeout)
|
||||
{
|
||||
DB::WriteBufferFromOwnString out;
|
||||
out << "dbname=" << DB::quote << dbname
|
||||
@ -16,7 +16,7 @@ ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, S
|
||||
<< " port=" << port
|
||||
<< " user=" << DB::quote << user
|
||||
<< " password=" << DB::quote << password
|
||||
<< " connect_timeout=2";
|
||||
<< " connect_timeout=" << timeout;
|
||||
return {out.str(), host + ':' + DB::toString(port)};
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@ namespace pqxx
|
||||
namespace postgres
|
||||
{
|
||||
|
||||
ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password);
|
||||
ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password, UInt64 timeout);
|
||||
|
||||
String getConnectionForLog(const String & host, UInt16 port);
|
||||
|
||||
|
@ -63,7 +63,7 @@ const char USER_INTERSERVER_MARKER[] = " INTERSERVER SECRET ";
|
||||
/// Marker for SSH-keys-based authentication (passed as the user name)
|
||||
const char SSH_KEY_AUTHENTICAION_MARKER[] = " SSH KEY AUTHENTICATION ";
|
||||
|
||||
/// Market for JSON Web Token authentication
|
||||
/// Marker for JSON Web Token authentication
|
||||
const char JWT_AUTHENTICAION_MARKER[] = " JWT AUTHENTICATION ";
|
||||
|
||||
};
|
||||
|
@ -151,6 +151,7 @@ namespace DB
|
||||
M(UInt64, global_profiler_real_time_period_ns, 0, "Period for real clock timer of global profiler (in nanoseconds). Set 0 value to turn off the real clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
||||
M(UInt64, global_profiler_cpu_time_period_ns, 0, "Period for CPU clock timer of global profiler (in nanoseconds). Set 0 value to turn off the CPU clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
||||
M(Bool, enable_azure_sdk_logging, false, "Enables logging from Azure sdk", 0) \
|
||||
M(UInt64, max_entries_for_hash_table_stats, 10'000, "How many entries hash table statistics collected during aggregation is allowed to have", 0) \
|
||||
M(String, merge_workload, "default", "Name of workload to be used to access resources for all merges (may be overridden by a merge tree setting)", 0) \
|
||||
M(String, mutation_workload, "default", "Name of workload to be used to access resources for all mutations (may be overridden by a merge tree setting)", 0) \
|
||||
M(Bool, prepare_system_log_tables_on_startup, false, "If true, ClickHouse creates all configured `system.*_log` tables before the startup. It can be helpful if some startup scripts depend on these tables.", 0) \
|
||||
|
@ -530,6 +530,7 @@ class IColumn;
|
||||
M(Bool, optimize_read_in_order, true, "Enable ORDER BY optimization for reading data in corresponding order in MergeTree tables.", 0) \
|
||||
M(Bool, optimize_read_in_window_order, true, "Enable ORDER BY optimization in window clause for reading data in corresponding order in MergeTree tables.", 0) \
|
||||
M(Bool, optimize_aggregation_in_order, false, "Enable GROUP BY optimization for aggregating data in corresponding order in MergeTree tables.", 0) \
|
||||
M(Bool, read_in_order_use_buffering, true, "Use buffering before merging while reading in order of primary key. It increases the parallelism of query execution", 0) \
|
||||
M(UInt64, aggregation_in_order_max_block_bytes, 50000000, "Maximal size of block in bytes accumulated during aggregation in order of primary key. Lower block size allows to parallelize more final merge stage of aggregation.", 0) \
|
||||
M(UInt64, read_in_order_two_level_merge_threshold, 100, "Minimal number of parts to read to run preliminary merge step during multithread reading in order of primary key.", 0) \
|
||||
M(Bool, low_cardinality_allow_in_native_format, true, "Use LowCardinality type in Native format. Otherwise, convert LowCardinality columns to ordinary for select query, and convert ordinary columns to required LowCardinality for insert query.", 0) \
|
||||
@ -560,7 +561,9 @@ class IColumn;
|
||||
M(UInt64, max_partition_size_to_drop, 50000000000lu, "Same as max_table_size_to_drop, but for the partitions.", 0) \
|
||||
\
|
||||
M(UInt64, postgresql_connection_pool_size, 16, "Connection pool size for PostgreSQL table engine and database engine.", 0) \
|
||||
M(UInt64, postgresql_connection_attempt_timeout, 2, "Connection timeout to PostgreSQL table engine and database engine in seconds.", 0) \
|
||||
M(UInt64, postgresql_connection_pool_wait_timeout, 5000, "Connection pool push/pop timeout on empty pool for PostgreSQL table engine and database engine. By default it will block on empty pool.", 0) \
|
||||
M(UInt64, postgresql_connection_pool_retries, 2, "Connection pool push/pop retries number for PostgreSQL table engine and database engine.", 0) \
|
||||
M(Bool, postgresql_connection_pool_auto_close_connection, false, "Close connection before returning connection to the pool.", 0) \
|
||||
M(UInt64, glob_expansion_max_elements, 1000, "Maximum number of allowed addresses (For external storages, table functions, etc).", 0) \
|
||||
M(UInt64, odbc_bridge_connection_pool_size, 16, "Connection pool size for each connection settings string in ODBC bridge.", 0) \
|
||||
@ -677,9 +680,11 @@ class IColumn;
|
||||
M(UInt64, insert_shard_id, 0, "If non zero, when insert into a distributed table, the data will be inserted into the shard `insert_shard_id` synchronously. Possible values range from 1 to `shards_number` of corresponding distributed table", 0) \
|
||||
\
|
||||
M(Bool, collect_hash_table_stats_during_aggregation, true, "Enable collecting hash table statistics to optimize memory allocation", 0) \
|
||||
M(UInt64, max_entries_for_hash_table_stats, 10'000, "How many entries hash table statistics collected during aggregation is allowed to have", 0) \
|
||||
M(UInt64, max_size_to_preallocate_for_aggregation, 100'000'000, "For how many elements it is allowed to preallocate space in all hash tables in total before aggregation", 0) \
|
||||
\
|
||||
M(Bool, collect_hash_table_stats_during_joins, true, "Enable collecting hash table statistics to optimize memory allocation", 0) \
|
||||
M(UInt64, max_size_to_preallocate_for_joins, 100'000'000, "For how many elements it is allowed to preallocate space in all hash tables in total before join", 0) \
|
||||
\
|
||||
M(Bool, kafka_disable_num_consumers_limit, false, "Disable limit on kafka_num_consumers that depends on the number of available CPU cores", 0) \
|
||||
M(Bool, enable_software_prefetch_in_aggregation, true, "Enable use of software prefetch in aggregation", 0) \
|
||||
M(Bool, allow_aggregate_partitions_independently, false, "Enable independent aggregation of partitions on separate threads when partition key suits group by key. Beneficial when number of partitions close to number of cores and partitions have roughly the same size", 0) \
|
||||
@ -718,6 +723,7 @@ class IColumn;
|
||||
M(Bool, optimize_group_by_function_keys, true, "Eliminates functions of other keys in GROUP BY section", 0) \
|
||||
M(Bool, optimize_group_by_constant_keys, true, "Optimize GROUP BY when all keys in block are constant", 0) \
|
||||
M(Bool, legacy_column_name_of_tuple_literal, false, "List all names of element of large tuple literals in their column names instead of hash. This settings exists only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher.", 0) \
|
||||
M(Bool, enable_named_columns_in_function_tuple, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers.", 0) \
|
||||
\
|
||||
M(Bool, query_plan_enable_optimizations, true, "Globally enable/disable query optimization at the query plan level", 0) \
|
||||
M(UInt64, query_plan_max_optimizations_to_apply, 10000, "Limit the total number of optimizations applied to query plan. If zero, ignored. If limit reached, throw exception", 0) \
|
||||
@ -1009,6 +1015,7 @@ class IColumn;
|
||||
MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, async_insert_threads, 16) \
|
||||
MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, max_replicated_fetches_network_bandwidth_for_server, 0) \
|
||||
MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, max_replicated_sends_network_bandwidth_for_server, 0) \
|
||||
MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, max_entries_for_hash_table_stats, 10'000) \
|
||||
/* ---- */ \
|
||||
MAKE_OBSOLETE(M, DefaultDatabaseEngine, default_database_engine, DefaultDatabaseEngine::Atomic) \
|
||||
MAKE_OBSOLETE(M, UInt64, max_pipeline_depth, 0) \
|
||||
@ -1063,6 +1070,7 @@ class IColumn;
|
||||
M(Bool, input_format_orc_allow_missing_columns, true, "Allow missing columns while reading ORC input formats", 0) \
|
||||
M(Bool, input_format_orc_use_fast_decoder, true, "Use a faster ORC decoder implementation.", 0) \
|
||||
M(Bool, input_format_orc_filter_push_down, true, "When reading ORC files, skip whole stripes or row groups based on the WHERE/PREWHERE expressions, min/max statistics or bloom filter in the ORC metadata.", 0) \
|
||||
M(Bool, input_format_orc_read_use_writer_time_zone, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT.", 0) \
|
||||
M(Bool, input_format_parquet_allow_missing_columns, true, "Allow missing columns while reading Parquet input formats", 0) \
|
||||
M(UInt64, input_format_parquet_local_file_min_bytes_for_seek, 8192, "Min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format", 0) \
|
||||
M(Bool, input_format_arrow_allow_missing_columns, true, "Allow missing columns while reading Arrow input formats", 0) \
|
||||
@ -1130,6 +1138,8 @@ class IColumn;
|
||||
M(Bool, input_format_tsv_crlf_end_of_line, false, "If it is set true, file function will read TSV format with \\r\\n instead of \\n.", 0) \
|
||||
\
|
||||
M(Bool, input_format_native_allow_types_conversion, true, "Allow data types conversion in Native input format", 0) \
|
||||
M(Bool, input_format_native_decode_types_in_binary_format, false, "Read data types in binary format instead of type names in Native input format", 0) \
|
||||
M(Bool, output_format_native_encode_types_in_binary_format, false, "Write data types in binary format instead of type names in Native output format", 0) \
|
||||
\
|
||||
M(DateTimeInputFormat, date_time_input_format, FormatSettings::DateTimeInputFormat::Basic, "Method to read DateTime from text input formats. Possible values: 'basic', 'best_effort' and 'best_effort_us'.", 0) \
|
||||
M(DateTimeOutputFormat, date_time_output_format, FormatSettings::DateTimeOutputFormat::Simple, "Method to write DateTime to text output. Possible values: 'simple', 'iso', 'unix_timestamp'.", 0) \
|
||||
@ -1149,6 +1159,8 @@ class IColumn;
|
||||
M(Bool, input_format_avro_null_as_default, false, "For Avro/AvroConfluent format: insert default in case of null and non Nullable column", 0) \
|
||||
M(UInt64, format_binary_max_string_size, 1_GiB, "The maximum allowed size for String in RowBinary format. It prevents allocating large amount of memory in case of corrupted data. 0 means there is no limit", 0) \
|
||||
M(UInt64, format_binary_max_array_size, 1_GiB, "The maximum allowed size for Array in RowBinary format. It prevents allocating large amount of memory in case of corrupted data. 0 means there is no limit", 0) \
|
||||
M(Bool, input_format_binary_decode_types_in_binary_format, false, "Read data types in binary format instead of type names in RowBinaryWithNamesAndTypes input format", 0) \
|
||||
M(Bool, output_format_binary_encode_types_in_binary_format, false, "Write data types in binary format instead of type names in RowBinaryWithNamesAndTypes output format ", 0) \
|
||||
M(URI, format_avro_schema_registry_url, "", "For AvroConfluent format: Confluent Schema Registry URL.", 0) \
|
||||
\
|
||||
M(Bool, output_format_json_quote_64bit_integers, true, "Controls quoting of 64-bit integers in JSON output format.", 0) \
|
||||
@ -1169,9 +1181,9 @@ class IColumn;
|
||||
M(UInt64, output_format_pretty_max_value_width, 10000, "Maximum width of value to display in Pretty formats. If greater - it will be cut.", 0) \
|
||||
M(UInt64, output_format_pretty_max_value_width_apply_for_single_value, false, "Only cut values (see the `output_format_pretty_max_value_width` setting) when it is not a single value in a block. Otherwise output it entirely, which is useful for the `SHOW CREATE TABLE` query.", 0) \
|
||||
M(UInt64Auto, output_format_pretty_color, "auto", "Use ANSI escape sequences in Pretty formats. 0 - disabled, 1 - enabled, 'auto' - enabled if a terminal.", 0) \
|
||||
M(String, output_format_pretty_grid_charset, "UTF-8", "Charset for printing grid borders. Available charsets: ASCII, UTF-8 (default one).", 0) \
|
||||
M(UInt64, output_format_pretty_display_footer_column_names, true, "Display column names in the footer if there are 999 or more rows.", 0) \
|
||||
M(UInt64, output_format_pretty_display_footer_column_names_min_rows, 50, "Sets the minimum threshold value of rows for which to enable displaying column names in the footer. 50 (default)", 0) \
|
||||
M(String, output_format_pretty_grid_charset, "UTF-8", "Charset for printing grid borders. Available charsets: ASCII, UTF-8 (default one).", 0) \
|
||||
M(UInt64, output_format_pretty_display_footer_column_names, true, "Display column names in the footer if there are 999 or more rows.", 0) \
|
||||
M(UInt64, output_format_pretty_display_footer_column_names_min_rows, 50, "Sets the minimum threshold value of rows for which to enable displaying column names in the footer. 50 (default)", 0) \
|
||||
M(UInt64, output_format_parquet_row_group_size, 1000000, "Target row group size in rows.", 0) \
|
||||
M(UInt64, output_format_parquet_row_group_size_bytes, 512 * 1024 * 1024, "Target row group size in bytes, before compression.", 0) \
|
||||
M(Bool, output_format_parquet_string_as_string, true, "Use Parquet String type instead of Binary for String columns.", 0) \
|
||||
|
@ -58,15 +58,26 @@ String ClickHouseVersion::toString() const
|
||||
static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory::SettingsChanges>> settings_changes_history_initializer =
|
||||
{
|
||||
{"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."},
|
||||
{"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"},
|
||||
{"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"},
|
||||
{"output_format_native_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in Native output format"},
|
||||
{"input_format_native_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in Native output format"},
|
||||
{"read_in_order_use_buffering", false, true, "Use buffering before merging while reading in order of primary key"},
|
||||
{"optimize_functions_to_subcolumns", false, true, "Enable optimization by default"},
|
||||
{"enable_named_columns_in_function_tuple", false, true, "Generate named tuples in function tuple() when all names are unique and can be treated as unquoted identifiers."},
|
||||
{"input_format_json_ignore_key_case", false, false, "Ignore json key case while read json field from string."},
|
||||
{"optimize_trivial_insert_select", true, false, "The optimization does not make sense in many cases."},
|
||||
{"collect_hash_table_stats_during_joins", false, true, "New setting."},
|
||||
{"max_size_to_preallocate_for_joins", 0, 100'000'000, "New setting."},
|
||||
{"input_format_orc_read_use_writer_time_zone", false, false, "Whether use the writer's time zone in ORC stripe for ORC row reader, the default ORC row reader's time zone is GMT."},
|
||||
{"lightweight_mutation_projection_mode", "throw", "throw", "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete."},
|
||||
{"database_replicated_allow_heavy_create", true, false, "Long-running DDL queries (CREATE AS SELECT and POPULATE) for Replicated database engine was forbidden"},
|
||||
{"query_plan_merge_filters", false, false, "Allow to merge filters in the query plan"},
|
||||
{"azure_sdk_max_retries", 10, 10, "Maximum number of retries in azure sdk"},
|
||||
{"azure_sdk_retry_initial_backoff_ms", 10, 10, "Minimal backoff between retries in azure sdk"},
|
||||
{"azure_sdk_retry_max_backoff_ms", 1000, 1000, "Maximal backoff between retries in azure sdk"},
|
||||
{"postgresql_connection_attempt_timeout", 2, 2, "Allow to control 'connect_timeout' parameter of PostgreSQL connection."},
|
||||
{"postgresql_connection_pool_retries", 2, 2, "Allow to control the number of retries in PostgreSQL connection pool."},
|
||||
{"use_hive_partitioning", false, false, "Allows to use hive partitioning for File, URL, S3, AzureBlobStorage and HDFS engines."},
|
||||
}},
|
||||
{"24.6", {{"materialize_skip_indexes_on_insert", true, true, "Added new setting to allow to disable materialization of skip indexes on insert"},
|
||||
|
65
src/Core/tests/gtest_fields_binary_enciding.cpp
Normal file
65
src/Core/tests/gtest_fields_binary_enciding.cpp
Normal file
@ -0,0 +1,65 @@
|
||||
#include <gtest/gtest.h>
|
||||
#include <Common/FieldBinaryEncoding.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
namespace DB::ErrorCodes
|
||||
{
|
||||
extern const int UNSUPPORTED_METHOD;
|
||||
}
|
||||
|
||||
|
||||
void check(const Field & field)
|
||||
{
|
||||
// std::cerr << "Check " << toString(field) << "\n";
|
||||
WriteBufferFromOwnString ostr;
|
||||
encodeField(field, ostr);
|
||||
ReadBufferFromString istr(ostr.str());
|
||||
Field decoded_field = decodeField(istr);
|
||||
ASSERT_TRUE(istr.eof());
|
||||
ASSERT_EQ(field, decoded_field);
|
||||
}
|
||||
|
||||
GTEST_TEST(FieldBinaryEncoding, EncodeAndDecode)
|
||||
{
|
||||
check(Null());
|
||||
check(POSITIVE_INFINITY);
|
||||
check(NEGATIVE_INFINITY);
|
||||
check(true);
|
||||
check(UInt64(42));
|
||||
check(Int64(-42));
|
||||
check(UInt128(42));
|
||||
check(Int128(-42));
|
||||
check(UInt256(42));
|
||||
check(Int256(-42));
|
||||
check(UUID(42));
|
||||
check(IPv4(42));
|
||||
check(IPv6(42));
|
||||
check(Float64(42.42));
|
||||
check(String("Hello, World!"));
|
||||
check(Array({Field(UInt64(42)), Field(UInt64(43))}));
|
||||
check(Tuple({Field(UInt64(42)), Field(Null()), Field(UUID(42)), Field(String("Hello, World!"))}));
|
||||
check(Map({Tuple{Field(UInt64(42)), Field(String("str_42"))}, Tuple{Field(UInt64(43)), Field(String("str_43"))}}));
|
||||
check(Object({{String("key_1"), Field(UInt64(42))}, {String("key_2"), Field(UInt64(43))}}));
|
||||
check(DecimalField<Decimal32>(4242, 3));
|
||||
check(DecimalField<Decimal64>(4242, 3));
|
||||
check(DecimalField<Decimal128>(Int128(4242), 3));
|
||||
check(DecimalField<Decimal256>(Int256(4242), 3));
|
||||
check(AggregateFunctionStateData{.name="some_name", .data="some_data"});
|
||||
try
|
||||
{
|
||||
check(CustomType());
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
ASSERT_EQ(e.code(), ErrorCodes::UNSUPPORTED_METHOD);
|
||||
}
|
||||
|
||||
check(Array({
|
||||
Tuple({Field(UInt64(42)), Map({Tuple{Field(UInt64(42)), Field(String("str_42"))}, Tuple{Field(UInt64(43)), Field(String("str_43"))}}), Field(UUID(42)), Field(String("Hello, World!"))}),
|
||||
Tuple({Field(UInt64(43)), Map({Tuple{Field(UInt64(43)), Field(String("str_43"))}, Tuple{Field(UInt64(44)), Field(String("str_44"))}}), Field(UUID(43)), Field(String("Hello, World 2!"))})
|
||||
}));
|
||||
}
|
||||
|
@ -25,7 +25,6 @@ private:
|
||||
mutable std::optional<size_t> version;
|
||||
|
||||
String getNameImpl(bool with_version) const;
|
||||
size_t getVersion() const;
|
||||
|
||||
public:
|
||||
static constexpr bool is_parametric = true;
|
||||
@ -39,6 +38,8 @@ public:
|
||||
{
|
||||
}
|
||||
|
||||
size_t getVersion() const;
|
||||
|
||||
String getFunctionName() const;
|
||||
AggregateFunctionPtr getFunction() const { return function; }
|
||||
|
||||
|
@ -165,6 +165,19 @@ static std::pair<DataTypePtr, DataTypeCustomDescPtr> create(const ASTPtr & argum
|
||||
return std::make_pair(storage_type, std::make_unique<DataTypeCustomDesc>(std::move(custom_name), nullptr));
|
||||
}
|
||||
|
||||
String DataTypeCustomSimpleAggregateFunction::getFunctionName() const
|
||||
{
|
||||
return function->getName();
|
||||
}
|
||||
|
||||
DataTypePtr createSimpleAggregateFunctionType(const AggregateFunctionPtr & function, const DataTypes & argument_types, const Array & parameters)
|
||||
{
|
||||
auto custom_desc = std::make_unique<DataTypeCustomDesc>(
|
||||
std::make_unique<DataTypeCustomSimpleAggregateFunction>(function, argument_types, parameters));
|
||||
|
||||
return DataTypeFactory::instance().getCustom(std::move(custom_desc));
|
||||
}
|
||||
|
||||
void registerDataTypeDomainSimpleAggregateFunction(DataTypeFactory & factory)
|
||||
{
|
||||
factory.registerDataTypeCustom("SimpleAggregateFunction", create);
|
||||
|
@ -40,8 +40,13 @@ public:
|
||||
: function(function_), argument_types(argument_types_), parameters(parameters_) {}
|
||||
|
||||
AggregateFunctionPtr getFunction() const { return function; }
|
||||
String getFunctionName() const;
|
||||
const DataTypes & getArgumentsDataTypes() const { return argument_types; }
|
||||
const Array & getParameters() const { return parameters; }
|
||||
String getName() const override;
|
||||
static void checkSupportedFunctions(const AggregateFunctionPtr & function);
|
||||
};
|
||||
|
||||
DataTypePtr createSimpleAggregateFunctionType(const AggregateFunctionPtr & function, const DataTypes & argument_types, const Array & parameters);
|
||||
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ static DataTypePtr create(const ASTPtr & arguments)
|
||||
|
||||
auto * literal = argument->arguments->children[1]->as<ASTLiteral>();
|
||||
|
||||
if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.get<UInt64>() == 0 || literal->value.get<UInt64>() > 255)
|
||||
if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.get<UInt64>() == 0 || literal->value.get<UInt64>() > ColumnVariant::MAX_NESTED_COLUMNS)
|
||||
throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'max_types' argument for Dynamic type should be a positive integer between 1 and 255");
|
||||
|
||||
return std::make_shared<DataTypeDynamic>(literal->value.get<UInt64>());
|
||||
|
@ -19,6 +19,8 @@ public:
|
||||
}
|
||||
|
||||
String getName() const override;
|
||||
const DataTypes & getElements() const { return elems; }
|
||||
const Names & getNames() const { return names; }
|
||||
};
|
||||
|
||||
DataTypePtr createNested(const DataTypes & types, const Names & names);
|
||||
|
705
src/DataTypes/DataTypesBinaryEncoding.cpp
Normal file
705
src/DataTypes/DataTypesBinaryEncoding.cpp
Normal file
@ -0,0 +1,705 @@
|
||||
#include <DataTypes/DataTypesBinaryEncoding.h>
|
||||
#include <DataTypes/DataTypeDateTime64.h>
|
||||
#include <DataTypes/DataTypeFixedString.h>
|
||||
#include <DataTypes/DataTypeEnum.h>
|
||||
#include <DataTypes/DataTypesDecimal.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <DataTypes/DataTypeNullable.h>
|
||||
#include <DataTypes/DataTypeFunction.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypeMap.h>
|
||||
#include <DataTypes/DataTypeVariant.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeUUID.h>
|
||||
#include <DataTypes/DataTypeSet.h>
|
||||
#include <DataTypes/DataTypeInterval.h>
|
||||
#include <DataTypes/DataTypeIPv4andIPv6.h>
|
||||
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||
#include <DataTypes/DataTypeCustomSimpleAggregateFunction.h>
|
||||
#include <DataTypes/DataTypeNothing.h>
|
||||
#include <DataTypes/DataTypeDynamic.h>
|
||||
#include <DataTypes/DataTypeNested.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <Parsers/NullsAction.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <Common/FieldBinaryEncoding.h>
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNSUPPORTED_METHOD;
|
||||
extern const int INCORRECT_DATA;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
enum class BinaryTypeIndex : uint8_t
|
||||
{
|
||||
Nothing = 0x00,
|
||||
UInt8 = 0x01,
|
||||
UInt16 = 0x02,
|
||||
UInt32 = 0x03,
|
||||
UInt64 = 0x04,
|
||||
UInt128 = 0x05,
|
||||
UInt256 = 0x06,
|
||||
Int8 = 0x07,
|
||||
Int16 = 0x08,
|
||||
Int32 = 0x09,
|
||||
Int64 = 0x0A,
|
||||
Int128 = 0x0B,
|
||||
Int256 = 0x0C,
|
||||
Float32 = 0x0D,
|
||||
Float64 = 0x0E,
|
||||
Date = 0x0F,
|
||||
Date32 = 0x10,
|
||||
DateTimeUTC = 0x11,
|
||||
DateTimeWithTimezone = 0x12,
|
||||
DateTime64UTC = 0x13,
|
||||
DateTime64WithTimezone = 0x14,
|
||||
String = 0x15,
|
||||
FixedString = 0x16,
|
||||
Enum8 = 0x17,
|
||||
Enum16 = 0x18,
|
||||
Decimal32 = 0x19,
|
||||
Decimal64 = 0x1A,
|
||||
Decimal128 = 0x1B,
|
||||
Decimal256 = 0x1C,
|
||||
UUID = 0x1D,
|
||||
Array = 0x1E,
|
||||
UnnamedTuple = 0x1F,
|
||||
NamedTuple = 0x20,
|
||||
Set = 0x21,
|
||||
Interval = 0x22,
|
||||
Nullable = 0x23,
|
||||
Function = 0x24,
|
||||
AggregateFunction = 0x25,
|
||||
LowCardinality = 0x26,
|
||||
Map = 0x27,
|
||||
IPv4 = 0x28,
|
||||
IPv6 = 0x29,
|
||||
Variant = 0x2A,
|
||||
Dynamic = 0x2B,
|
||||
Custom = 0x2C,
|
||||
Bool = 0x2D,
|
||||
SimpleAggregateFunction = 0x2E,
|
||||
Nested = 0x2F,
|
||||
};
|
||||
|
||||
BinaryTypeIndex getBinaryTypeIndex(const DataTypePtr & type)
|
||||
{
|
||||
/// By default custom types don't have their own BinaryTypeIndex.
|
||||
if (type->hasCustomName())
|
||||
{
|
||||
/// Some widely used custom types have separate BinaryTypeIndex for better serialization.
|
||||
/// Right now it's Bool, SimpleAggregateFunction and Nested types.
|
||||
/// TODO: Consider adding BinaryTypeIndex for more custom types.
|
||||
|
||||
if (isBool(type))
|
||||
return BinaryTypeIndex::Bool;
|
||||
|
||||
if (typeid_cast<const DataTypeCustomSimpleAggregateFunction *>(type->getCustomName()))
|
||||
return BinaryTypeIndex::SimpleAggregateFunction;
|
||||
|
||||
if (isNested(type))
|
||||
return BinaryTypeIndex::Nested;
|
||||
|
||||
return BinaryTypeIndex::Custom;
|
||||
}
|
||||
|
||||
switch (type->getTypeId())
|
||||
{
|
||||
case TypeIndex::Nothing:
|
||||
return BinaryTypeIndex::Nothing;
|
||||
case TypeIndex::UInt8:
|
||||
return BinaryTypeIndex::UInt8;
|
||||
case TypeIndex::UInt16:
|
||||
return BinaryTypeIndex::UInt16;
|
||||
case TypeIndex::UInt32:
|
||||
return BinaryTypeIndex::UInt32;
|
||||
case TypeIndex::UInt64:
|
||||
return BinaryTypeIndex::UInt64;
|
||||
case TypeIndex::UInt128:
|
||||
return BinaryTypeIndex::UInt128;
|
||||
case TypeIndex::UInt256:
|
||||
return BinaryTypeIndex::UInt256;
|
||||
case TypeIndex::Int8:
|
||||
return BinaryTypeIndex::Int8;
|
||||
case TypeIndex::Int16:
|
||||
return BinaryTypeIndex::Int16;
|
||||
case TypeIndex::Int32:
|
||||
return BinaryTypeIndex::Int32;
|
||||
case TypeIndex::Int64:
|
||||
return BinaryTypeIndex::Int64;
|
||||
case TypeIndex::Int128:
|
||||
return BinaryTypeIndex::Int128;
|
||||
case TypeIndex::Int256:
|
||||
return BinaryTypeIndex::Int256;
|
||||
case TypeIndex::Float32:
|
||||
return BinaryTypeIndex::Float32;
|
||||
case TypeIndex::Float64:
|
||||
return BinaryTypeIndex::Float64;
|
||||
case TypeIndex::Date:
|
||||
return BinaryTypeIndex::Date;
|
||||
case TypeIndex::Date32:
|
||||
return BinaryTypeIndex::Date32;
|
||||
case TypeIndex::DateTime:
|
||||
if (assert_cast<const DataTypeDateTime &>(*type).hasExplicitTimeZone())
|
||||
return BinaryTypeIndex::DateTimeWithTimezone;
|
||||
return BinaryTypeIndex::DateTimeUTC;
|
||||
case TypeIndex::DateTime64:
|
||||
if (assert_cast<const DataTypeDateTime64 &>(*type).hasExplicitTimeZone())
|
||||
return BinaryTypeIndex::DateTime64WithTimezone;
|
||||
return BinaryTypeIndex::DateTime64UTC;
|
||||
case TypeIndex::String:
|
||||
return BinaryTypeIndex::String;
|
||||
case TypeIndex::FixedString:
|
||||
return BinaryTypeIndex::FixedString;
|
||||
case TypeIndex::Enum8:
|
||||
return BinaryTypeIndex::Enum8;
|
||||
case TypeIndex::Enum16:
|
||||
return BinaryTypeIndex::Enum16;
|
||||
case TypeIndex::Decimal32:
|
||||
return BinaryTypeIndex::Decimal32;
|
||||
case TypeIndex::Decimal64:
|
||||
return BinaryTypeIndex::Decimal64;
|
||||
case TypeIndex::Decimal128:
|
||||
return BinaryTypeIndex::Decimal128;
|
||||
case TypeIndex::Decimal256:
|
||||
return BinaryTypeIndex::Decimal256;
|
||||
case TypeIndex::UUID:
|
||||
return BinaryTypeIndex::UUID;
|
||||
case TypeIndex::Array:
|
||||
return BinaryTypeIndex::Array;
|
||||
case TypeIndex::Tuple:
|
||||
{
|
||||
const auto & tuple_type = assert_cast<const DataTypeTuple &>(*type);
|
||||
if (tuple_type.haveExplicitNames())
|
||||
return BinaryTypeIndex::NamedTuple;
|
||||
return BinaryTypeIndex::UnnamedTuple;
|
||||
}
|
||||
case TypeIndex::Set:
|
||||
return BinaryTypeIndex::Set;
|
||||
case TypeIndex::Interval:
|
||||
return BinaryTypeIndex::Interval;
|
||||
case TypeIndex::Nullable:
|
||||
return BinaryTypeIndex::Nullable;
|
||||
case TypeIndex::Function:
|
||||
return BinaryTypeIndex::Function;
|
||||
case TypeIndex::AggregateFunction:
|
||||
return BinaryTypeIndex::AggregateFunction;
|
||||
case TypeIndex::LowCardinality:
|
||||
return BinaryTypeIndex::LowCardinality;
|
||||
case TypeIndex::Map:
|
||||
return BinaryTypeIndex::Map;
|
||||
case TypeIndex::Object:
|
||||
/// Object type will be deprecated and replaced by new implementation. No need to support it here.
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Binary encoding of type Object is not supported");
|
||||
case TypeIndex::IPv4:
|
||||
return BinaryTypeIndex::IPv4;
|
||||
case TypeIndex::IPv6:
|
||||
return BinaryTypeIndex::IPv6;
|
||||
case TypeIndex::Variant:
|
||||
return BinaryTypeIndex::Variant;
|
||||
case TypeIndex::Dynamic:
|
||||
return BinaryTypeIndex::Dynamic;
|
||||
/// JSONPaths is used only during schema inference and cannot be used anywhere else.
|
||||
case TypeIndex::JSONPaths:
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Binary encoding of type JSONPaths is not supported");
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void encodeEnumValues(const DataTypePtr & type, WriteBuffer & buf)
|
||||
{
|
||||
const auto & enum_type = assert_cast<const DataTypeEnum<T> &>(*type);
|
||||
const auto & values = enum_type.getValues();
|
||||
writeVarUInt(values.size(), buf);
|
||||
for (const auto & [name, value] : values)
|
||||
{
|
||||
writeStringBinary(name, buf);
|
||||
writeBinaryLittleEndian(value, buf);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
DataTypePtr decodeEnum(ReadBuffer & buf)
|
||||
{
|
||||
typename DataTypeEnum<T>::Values values;
|
||||
size_t size;
|
||||
readVarUInt(size, buf);
|
||||
for (size_t i = 0; i != size; ++i)
|
||||
{
|
||||
String name;
|
||||
readStringBinary(name, buf);
|
||||
T value;
|
||||
readBinaryLittleEndian(value, buf);
|
||||
values.emplace_back(name, value);
|
||||
}
|
||||
|
||||
return std::make_shared<DataTypeEnum<T>>(values);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void encodeDecimal(const DataTypePtr & type, WriteBuffer & buf)
|
||||
{
|
||||
const auto & decimal_type = assert_cast<const DataTypeDecimal<T> &>(*type);
|
||||
/// Both precision and scale should be less than 76, so we can decode it in 1 byte.
|
||||
writeBinary(UInt8(decimal_type.getPrecision()), buf);
|
||||
writeBinary(UInt8(decimal_type.getScale()), buf);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
DataTypePtr decodeDecimal(ReadBuffer & buf)
|
||||
{
|
||||
UInt8 precision;
|
||||
readBinary(precision, buf);
|
||||
UInt8 scale;
|
||||
readBinary(scale, buf);
|
||||
return std::make_shared<DataTypeDecimal<T>>(precision, scale);
|
||||
}
|
||||
|
||||
void encodeAggregateFunction(const String & function_name, const Array & parameters, const DataTypes & arguments_types, WriteBuffer & buf)
|
||||
{
|
||||
writeStringBinary(function_name, buf);
|
||||
writeVarUInt(parameters.size(), buf);
|
||||
for (const auto & param : parameters)
|
||||
encodeField(param, buf);
|
||||
writeVarUInt(arguments_types.size(), buf);
|
||||
for (const auto & argument_type : arguments_types)
|
||||
encodeDataType(argument_type, buf);
|
||||
}
|
||||
|
||||
std::tuple<AggregateFunctionPtr, Array, DataTypes> decodeAggregateFunction(ReadBuffer & buf)
|
||||
{
|
||||
String function_name;
|
||||
readStringBinary(function_name, buf);
|
||||
size_t num_parameters;
|
||||
readVarUInt(num_parameters, buf);
|
||||
Array parameters;
|
||||
parameters.reserve(num_parameters);
|
||||
for (size_t i = 0; i != num_parameters; ++i)
|
||||
parameters.push_back(decodeField(buf));
|
||||
size_t num_arguments;
|
||||
readVarUInt(num_arguments, buf);
|
||||
DataTypes arguments_types;
|
||||
arguments_types.reserve(num_arguments);
|
||||
for (size_t i = 0; i != num_arguments; ++i)
|
||||
arguments_types.push_back(decodeDataType(buf));
|
||||
AggregateFunctionProperties properties;
|
||||
auto action = NullsAction::EMPTY;
|
||||
auto function = AggregateFunctionFactory::instance().get(function_name, action, arguments_types, parameters, properties);
|
||||
return {function, parameters, arguments_types};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void encodeDataType(const DataTypePtr & type, WriteBuffer & buf)
|
||||
{
|
||||
/// First, write the BinaryTypeIndex byte.
|
||||
auto binary_type_index = getBinaryTypeIndex(type);
|
||||
buf.write(UInt8(binary_type_index));
|
||||
/// Then, write additional information depending on the data type.
|
||||
switch (binary_type_index)
|
||||
{
|
||||
case BinaryTypeIndex::DateTimeWithTimezone:
|
||||
{
|
||||
const auto & datetime_type = assert_cast<const DataTypeDateTime &>(*type);
|
||||
writeStringBinary(datetime_type.getTimeZone().getTimeZone(), buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::DateTime64UTC:
|
||||
{
|
||||
const auto & datetime64_type = assert_cast<const DataTypeDateTime64 &>(*type);
|
||||
/// Maximum scale for DateTime64 is 9, so we can write it as 1 byte.
|
||||
buf.write(UInt8(datetime64_type.getScale()));
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::DateTime64WithTimezone:
|
||||
{
|
||||
const auto & datetime64_type = assert_cast<const DataTypeDateTime64 &>(*type);
|
||||
buf.write(UInt8(datetime64_type.getScale()));
|
||||
writeStringBinary(datetime64_type.getTimeZone().getTimeZone(), buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::FixedString:
|
||||
{
|
||||
const auto & fixed_string_type = assert_cast<const DataTypeFixedString &>(*type);
|
||||
writeVarUInt(fixed_string_type.getN(), buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Enum8:
|
||||
{
|
||||
encodeEnumValues<Int8>(type, buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Enum16:
|
||||
{
|
||||
encodeEnumValues<Int16>(type, buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Decimal32:
|
||||
{
|
||||
encodeDecimal<Decimal32>(type, buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Decimal64:
|
||||
{
|
||||
encodeDecimal<Decimal64>(type, buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Decimal128:
|
||||
{
|
||||
encodeDecimal<Decimal128>(type, buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Decimal256:
|
||||
{
|
||||
encodeDecimal<Decimal256>(type, buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Array:
|
||||
{
|
||||
const auto & array_type = assert_cast<const DataTypeArray &>(*type);
|
||||
encodeDataType(array_type.getNestedType(), buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::NamedTuple:
|
||||
{
|
||||
const auto & tuple_type = assert_cast<const DataTypeTuple &>(*type);
|
||||
const auto & types = tuple_type.getElements();
|
||||
const auto & names = tuple_type.getElementNames();
|
||||
writeVarUInt(types.size(), buf);
|
||||
for (size_t i = 0; i != types.size(); ++i)
|
||||
{
|
||||
writeStringBinary(names[i], buf);
|
||||
encodeDataType(types[i], buf);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::UnnamedTuple:
|
||||
{
|
||||
const auto & tuple_type = assert_cast<const DataTypeTuple &>(*type);
|
||||
const auto & element_types = tuple_type.getElements();
|
||||
writeVarUInt(element_types.size(), buf);
|
||||
for (const auto & element_type : element_types)
|
||||
encodeDataType(element_type, buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Interval:
|
||||
{
|
||||
const auto & interval_type = assert_cast<const DataTypeInterval &>(*type);
|
||||
writeBinary(UInt8(interval_type.getKind().kind), buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Nullable:
|
||||
{
|
||||
const auto & nullable_type = assert_cast<const DataTypeNullable &>(*type);
|
||||
encodeDataType(nullable_type.getNestedType(), buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Function:
|
||||
{
|
||||
const auto & function_type = assert_cast<const DataTypeFunction &>(*type);
|
||||
const auto & arguments_types = function_type.getArgumentTypes();
|
||||
const auto & return_type = function_type.getReturnType();
|
||||
writeVarUInt(arguments_types.size(), buf);
|
||||
for (const auto & argument_type : arguments_types)
|
||||
encodeDataType(argument_type, buf);
|
||||
encodeDataType(return_type, buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::LowCardinality:
|
||||
{
|
||||
const auto & low_cardinality_type = assert_cast<const DataTypeLowCardinality &>(*type);
|
||||
encodeDataType(low_cardinality_type.getDictionaryType(), buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Map:
|
||||
{
|
||||
const auto & map_type = assert_cast<const DataTypeMap &>(*type);
|
||||
encodeDataType(map_type.getKeyType(), buf);
|
||||
encodeDataType(map_type.getValueType(), buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Variant:
|
||||
{
|
||||
const auto & variant_type = assert_cast<const DataTypeVariant &>(*type);
|
||||
const auto & variants = variant_type.getVariants();
|
||||
writeVarUInt(variants.size(), buf);
|
||||
for (const auto & variant : variants)
|
||||
encodeDataType(variant, buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Dynamic:
|
||||
{
|
||||
const auto & dynamic_type = assert_cast<const DataTypeDynamic &>(*type);
|
||||
/// Maximum number of dynamic types is 255, we can write it as 1 byte.
|
||||
writeBinary(UInt8(dynamic_type.getMaxDynamicTypes()), buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::AggregateFunction:
|
||||
{
|
||||
const auto & aggregate_function_type = assert_cast<const DataTypeAggregateFunction &>(*type);
|
||||
writeVarUInt(aggregate_function_type.getVersion(), buf);
|
||||
encodeAggregateFunction(aggregate_function_type.getFunctionName(), aggregate_function_type.getParameters(), aggregate_function_type.getArgumentsDataTypes(), buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::SimpleAggregateFunction:
|
||||
{
|
||||
const auto & simple_aggregate_function_type = assert_cast<const DataTypeCustomSimpleAggregateFunction &>(*type->getCustomName());
|
||||
encodeAggregateFunction(simple_aggregate_function_type.getFunctionName(), simple_aggregate_function_type.getParameters(), simple_aggregate_function_type.getArgumentsDataTypes(), buf);
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Nested:
|
||||
{
|
||||
const auto & nested_type = assert_cast<const DataTypeNestedCustomName &>(*type->getCustomName());
|
||||
const auto & elements = nested_type.getElements();
|
||||
const auto & names = nested_type.getNames();
|
||||
writeVarUInt(elements.size(), buf);
|
||||
for (size_t i = 0; i != elements.size(); ++i)
|
||||
{
|
||||
writeStringBinary(names[i], buf);
|
||||
encodeDataType(elements[i], buf);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BinaryTypeIndex::Custom:
|
||||
{
|
||||
const auto & type_name = type->getName();
|
||||
writeStringBinary(type_name, buf);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
String encodeDataType(const DataTypePtr & type)
|
||||
{
|
||||
WriteBufferFromOwnString buf;
|
||||
encodeDataType(type, buf);
|
||||
return buf.str();
|
||||
}
|
||||
|
||||
DataTypePtr decodeDataType(ReadBuffer & buf)
|
||||
{
|
||||
UInt8 type;
|
||||
readBinary(type, buf);
|
||||
switch (BinaryTypeIndex(type))
|
||||
{
|
||||
case BinaryTypeIndex::Nothing:
|
||||
return std::make_shared<DataTypeNothing>();
|
||||
case BinaryTypeIndex::UInt8:
|
||||
return std::make_shared<DataTypeUInt8>();
|
||||
case BinaryTypeIndex::Bool:
|
||||
return DataTypeFactory::instance().get("Bool");
|
||||
case BinaryTypeIndex::UInt16:
|
||||
return std::make_shared<DataTypeUInt16>();
|
||||
case BinaryTypeIndex::UInt32:
|
||||
return std::make_shared<DataTypeUInt32>();
|
||||
case BinaryTypeIndex::UInt64:
|
||||
return std::make_shared<DataTypeUInt64>();
|
||||
case BinaryTypeIndex::UInt128:
|
||||
return std::make_shared<DataTypeUInt128>();
|
||||
case BinaryTypeIndex::UInt256:
|
||||
return std::make_shared<DataTypeUInt256>();
|
||||
case BinaryTypeIndex::Int8:
|
||||
return std::make_shared<DataTypeInt8>();
|
||||
case BinaryTypeIndex::Int16:
|
||||
return std::make_shared<DataTypeInt16>();
|
||||
case BinaryTypeIndex::Int32:
|
||||
return std::make_shared<DataTypeInt32>();
|
||||
case BinaryTypeIndex::Int64:
|
||||
return std::make_shared<DataTypeInt64>();
|
||||
case BinaryTypeIndex::Int128:
|
||||
return std::make_shared<DataTypeInt128>();
|
||||
case BinaryTypeIndex::Int256:
|
||||
return std::make_shared<DataTypeInt256>();
|
||||
case BinaryTypeIndex::Float32:
|
||||
return std::make_shared<DataTypeFloat32>();
|
||||
case BinaryTypeIndex::Float64:
|
||||
return std::make_shared<DataTypeFloat64>();
|
||||
case BinaryTypeIndex::Date:
|
||||
return std::make_shared<DataTypeDate>();
|
||||
case BinaryTypeIndex::Date32:
|
||||
return std::make_shared<DataTypeDate32>();
|
||||
case BinaryTypeIndex::DateTimeUTC:
|
||||
return std::make_shared<DataTypeDateTime>();
|
||||
case BinaryTypeIndex::DateTimeWithTimezone:
|
||||
{
|
||||
String time_zone;
|
||||
readStringBinary(time_zone, buf);
|
||||
return std::make_shared<DataTypeDateTime>(time_zone);
|
||||
}
|
||||
case BinaryTypeIndex::DateTime64UTC:
|
||||
{
|
||||
UInt8 scale;
|
||||
readBinary(scale, buf);
|
||||
return std::make_shared<DataTypeDateTime64>(scale);
|
||||
}
|
||||
case BinaryTypeIndex::DateTime64WithTimezone:
|
||||
{
|
||||
UInt8 scale;
|
||||
readBinary(scale, buf);
|
||||
String time_zone;
|
||||
readStringBinary(time_zone, buf);
|
||||
return std::make_shared<DataTypeDateTime64>(scale, time_zone);
|
||||
}
|
||||
case BinaryTypeIndex::String:
|
||||
return std::make_shared<DataTypeString>();
|
||||
case BinaryTypeIndex::FixedString:
|
||||
{
|
||||
UInt64 size;
|
||||
readVarUInt(size, buf);
|
||||
return std::make_shared<DataTypeFixedString>(size);
|
||||
}
|
||||
case BinaryTypeIndex::Enum8:
|
||||
return decodeEnum<Int8>(buf);
|
||||
case BinaryTypeIndex::Enum16:
|
||||
return decodeEnum<Int16>(buf);
|
||||
case BinaryTypeIndex::Decimal32:
|
||||
return decodeDecimal<Decimal32>(buf);
|
||||
case BinaryTypeIndex::Decimal64:
|
||||
return decodeDecimal<Decimal64>(buf);
|
||||
case BinaryTypeIndex::Decimal128:
|
||||
return decodeDecimal<Decimal128>(buf);
|
||||
case BinaryTypeIndex::Decimal256:
|
||||
return decodeDecimal<Decimal256>(buf);
|
||||
case BinaryTypeIndex::UUID:
|
||||
return std::make_shared<DataTypeUUID>();
|
||||
case BinaryTypeIndex::Array:
|
||||
return std::make_shared<DataTypeArray>(decodeDataType(buf));
|
||||
case BinaryTypeIndex::NamedTuple:
|
||||
{
|
||||
size_t size;
|
||||
readVarUInt(size, buf);
|
||||
DataTypes elements;
|
||||
elements.reserve(size);
|
||||
Names names;
|
||||
names.reserve(size);
|
||||
for (size_t i = 0; i != size; ++i)
|
||||
{
|
||||
names.emplace_back();
|
||||
readStringBinary(names.back(), buf);
|
||||
elements.push_back(decodeDataType(buf));
|
||||
}
|
||||
|
||||
return std::make_shared<DataTypeTuple>(elements, names);
|
||||
}
|
||||
case BinaryTypeIndex::UnnamedTuple:
|
||||
{
|
||||
size_t size;
|
||||
readVarUInt(size, buf);
|
||||
DataTypes elements;
|
||||
elements.reserve(size);
|
||||
for (size_t i = 0; i != size; ++i)
|
||||
elements.push_back(decodeDataType(buf));
|
||||
return std::make_shared<DataTypeTuple>(elements);
|
||||
}
|
||||
case BinaryTypeIndex::Set:
|
||||
return std::make_shared<DataTypeSet>();
|
||||
case BinaryTypeIndex::Interval:
|
||||
{
|
||||
UInt8 kind;
|
||||
readBinary(kind, buf);
|
||||
return std::make_shared<DataTypeInterval>(IntervalKind(IntervalKind::Kind(kind)));
|
||||
}
|
||||
case BinaryTypeIndex::Nullable:
|
||||
return std::make_shared<DataTypeNullable>(decodeDataType(buf));
|
||||
case BinaryTypeIndex::Function:
|
||||
{
|
||||
size_t arguments_size;
|
||||
readVarUInt(arguments_size, buf);
|
||||
DataTypes arguments;
|
||||
arguments.reserve(arguments_size);
|
||||
for (size_t i = 0; i != arguments_size; ++i)
|
||||
arguments.push_back(decodeDataType(buf));
|
||||
auto return_type = decodeDataType(buf);
|
||||
return std::make_shared<DataTypeFunction>(arguments, return_type);
|
||||
}
|
||||
case BinaryTypeIndex::LowCardinality:
|
||||
return std::make_shared<DataTypeLowCardinality>(decodeDataType(buf));
|
||||
case BinaryTypeIndex::Map:
|
||||
{
|
||||
auto key_type = decodeDataType(buf);
|
||||
auto value_type = decodeDataType(buf);
|
||||
return std::make_shared<DataTypeMap>(key_type, value_type);
|
||||
}
|
||||
case BinaryTypeIndex::IPv4:
|
||||
return std::make_shared<DataTypeIPv4>();
|
||||
case BinaryTypeIndex::IPv6:
|
||||
return std::make_shared<DataTypeIPv6>();
|
||||
case BinaryTypeIndex::Variant:
|
||||
{
|
||||
size_t size;
|
||||
readVarUInt(size, buf);
|
||||
DataTypes variants;
|
||||
variants.reserve(size);
|
||||
for (size_t i = 0; i != size; ++i)
|
||||
variants.push_back(decodeDataType(buf));
|
||||
return std::make_shared<DataTypeVariant>(variants);
|
||||
}
|
||||
case BinaryTypeIndex::Dynamic:
|
||||
{
|
||||
UInt8 max_dynamic_types;
|
||||
readBinary(max_dynamic_types, buf);
|
||||
return std::make_shared<DataTypeDynamic>(max_dynamic_types);
|
||||
}
|
||||
case BinaryTypeIndex::AggregateFunction:
|
||||
{
|
||||
size_t version;
|
||||
readVarUInt(version, buf);
|
||||
const auto & [function, parameters, arguments_types] = decodeAggregateFunction(buf);
|
||||
return std::make_shared<DataTypeAggregateFunction>(function, arguments_types, parameters, version);
|
||||
}
|
||||
case BinaryTypeIndex::SimpleAggregateFunction:
|
||||
{
|
||||
const auto & [function, parameters, arguments_types] = decodeAggregateFunction(buf);
|
||||
return createSimpleAggregateFunctionType(function, arguments_types, parameters);
|
||||
}
|
||||
case BinaryTypeIndex::Nested:
|
||||
{
|
||||
size_t size;
|
||||
readVarUInt(size, buf);
|
||||
Names names;
|
||||
names.reserve(size);
|
||||
DataTypes elements;
|
||||
elements.reserve(size);
|
||||
for (size_t i = 0; i != size; ++i)
|
||||
{
|
||||
names.emplace_back();
|
||||
readStringBinary(names.back(), buf);
|
||||
elements.push_back(decodeDataType(buf));
|
||||
}
|
||||
|
||||
return createNested(elements, names);
|
||||
}
|
||||
case BinaryTypeIndex::Custom:
|
||||
{
|
||||
String type_name;
|
||||
readStringBinary(type_name, buf);
|
||||
return DataTypeFactory::instance().get(type_name);
|
||||
}
|
||||
}
|
||||
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Unknown type code: {0:#04x}", UInt64(type));
|
||||
}
|
||||
|
||||
DataTypePtr decodeDataType(const String & data)
|
||||
{
|
||||
ReadBufferFromString buf(data);
|
||||
return decodeDataType(buf);
|
||||
}
|
||||
|
||||
}
|
118
src/DataTypes/DataTypesBinaryEncoding.h
Normal file
118
src/DataTypes/DataTypesBinaryEncoding.h
Normal file
@ -0,0 +1,118 @@
|
||||
#pragma once
|
||||
|
||||
#include <DataTypes/IDataType.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/**
|
||||
|
||||
Binary encoding for ClickHouse data types:
|
||||
|------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| ClickHouse data type | Binary encoding |
|
||||
|------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Nothing | 0x00 |
|
||||
| UInt8 | 0x01 |
|
||||
| UInt16 | 0x02 |
|
||||
| UInt32 | 0x03 |
|
||||
| UInt64 | 0x04 |
|
||||
| UInt128 | 0x05 |
|
||||
| UInt256 | 0x06 |
|
||||
| Int8 | 0x07 |
|
||||
| Int16 | 0x08 |
|
||||
| Int32 | 0x09 |
|
||||
| Int64 | 0x0A |
|
||||
| Int128 | 0x0B |
|
||||
| Int256 | 0x0C |
|
||||
| Float32 | 0x0D |
|
||||
| Float64 | 0x0E |
|
||||
| Date | 0x0F |
|
||||
| Date32 | 0x10 |
|
||||
| DateTime | 0x11 |
|
||||
| DateTime(time_zone) | 0x12<var_uint_time_zone_name_size><time_zone_name_data> |
|
||||
| DateTime64(P) | 0x13<uint8_precision> |
|
||||
| DateTime64(P, time_zone) | 0x14<uint8_precision><var_uint_time_zone_name_size><time_zone_name_data> |
|
||||
| String | 0x15 |
|
||||
| FixedString(N) | 0x16<var_uint_size> |
|
||||
| Enum8 | 0x17<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int8_value_1>...<var_uint_name_size_N><name_data_N><int8_value_N> |
|
||||
| Enum16 | 0x18<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int16_little_endian_value_1>...><var_uint_name_size_N><name_data_N><int16_little_endian_value_N> |
|
||||
| Decimal32(P, S) | 0x19<uint8_precision><uint8_scale> |
|
||||
| Decimal64(P, S) | 0x1A<uint8_precision><uint8_scale> |
|
||||
| Decimal128(P, S) | 0x1B<uint8_precision><uint8_scale> |
|
||||
| Decimal256(P, S) | 0x1C<uint8_precision><uint8_scale> |
|
||||
| UUID | 0x1D |
|
||||
| Array(T) | 0x1E<nested_type_encoding> |
|
||||
| Tuple(T1, ..., TN) | 0x1F<var_uint_number_of_elements><nested_type_encoding_1>...<nested_type_encoding_N> |
|
||||
| Tuple(name1 T1, ..., nameN TN) | 0x20<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N> |
|
||||
| Set | 0x21 |
|
||||
| Interval | 0x22<interval_kind> |
|
||||
| Nullable(T) | 0x23<nested_type_encoding> |
|
||||
| Function | 0x24<var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N><return_type_encoding> |
|
||||
| AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x25<var_uint_version><var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N> |
|
||||
| LowCardinality(T) | 0x26<nested_type_encoding> |
|
||||
| Map(K, V) | 0x27<key_type_encoding><value_type_encoding> |
|
||||
| IPv4 | 0x28 |
|
||||
| IPv6 | 0x29 |
|
||||
| Variant(T1, ..., TN) | 0x2A<var_uint_number_of_variants><variant_type_encoding_1>...<variant_type_encoding_N> |
|
||||
| Dynamic(max_types=N) | 0x2B<uint8_max_types> |
|
||||
| Custom type (Ring, Polygon, etc) | 0x2C<var_uint_type_name_size><type_name_data> |
|
||||
| Bool | 0x2D |
|
||||
| SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN) | 0x2E<var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N> |
|
||||
| Nested(name1 T1, ..., nameN TN) | 0x2F<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N> |
|
||||
|------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
|
||||
Interval kind binary encoding:
|
||||
|---------------|-----------------|
|
||||
| Interval kind | Binary encoding |
|
||||
|---------------|-----------------|
|
||||
| Nanosecond | 0x00 |
|
||||
| Microsecond | 0x01 |
|
||||
| Millisecond | 0x02 |
|
||||
| Second | 0x03 |
|
||||
| Minute | 0x04 |
|
||||
| Hour | 0x05 |
|
||||
| Day | 0x06 |
|
||||
| Week | 0x07 |
|
||||
| Month | 0x08 |
|
||||
| Quarter | 0x09 |
|
||||
| Year | 0x1A |
|
||||
|---------------|-----------------|
|
||||
|
||||
Aggregate function parameter binary encoding (binary encoding of a Field, see src/Common/FieldBinaryEncoding.h):
|
||||
|------------------------|------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Parameter type | Binary encoding |
|
||||
|------------------------|------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Null | 0x00 |
|
||||
| UInt64 | 0x01<var_uint_value> |
|
||||
| Int64 | 0x02<var_int_value> |
|
||||
| UInt128 | 0x03<uint128_little_endian_value> |
|
||||
| Int128 | 0x04<int128_little_endian_value> |
|
||||
| UInt128 | 0x05<uint128_little_endian_value> |
|
||||
| Int128 | 0x06<int128_little_endian_value> |
|
||||
| Float64 | 0x07<float64_little_endian_value> |
|
||||
| Decimal32 | 0x08<var_uint_scale><int32_little_endian_value> |
|
||||
| Decimal64 | 0x09<var_uint_scale><int64_little_endian_value> |
|
||||
| Decimal128 | 0x0A<var_uint_scale><int128_little_endian_value> |
|
||||
| Decimal256 | 0x0B<var_uint_scale><int256_little_endian_value> |
|
||||
| String | 0x0C<var_uint_size><data> |
|
||||
| Array | 0x0D<var_uint_size><value_encoding_1>...<value_encoding_N> |
|
||||
| Tuple | 0x0E<var_uint_size><value_encoding_1>...<value_encoding_N> |
|
||||
| Map | 0x0F<var_uint_size><key_encoding_1><value_encoding_1>...<key_endoding_N><value_encoding_N> |
|
||||
| IPv4 | 0x10<uint32_little_endian_value> |
|
||||
| IPv6 | 0x11<uint128_little_endian_value> |
|
||||
| UUID | 0x12<uuid_value> |
|
||||
| Bool | 0x13<bool_value> |
|
||||
| Object | 0x14<var_uint_size><var_uint_key_size_1><key_data_1><value_encoding_1>...<var_uint_key_size_N><key_data_N><value_encoding_N> |
|
||||
| AggregateFunctionState | 0x15<var_uint_name_size><name_data><var_uint_data_size><data> |
|
||||
| Negative infinity | 0xFE |
|
||||
| Positive infinity | 0xFF |
|
||||
|------------------------|------------------------------------------------------------------------------------------------------------------------------|
|
||||
*/
|
||||
|
||||
String encodeDataType(const DataTypePtr & type);
|
||||
void encodeDataType(const DataTypePtr & type, WriteBuffer & buf);
|
||||
|
||||
DataTypePtr decodeDataType(const String & data);
|
||||
DataTypePtr decodeDataType(ReadBuffer & buf);
|
||||
|
||||
}
|
@ -257,6 +257,9 @@ public:
|
||||
|
||||
bool position_independent_encoding = true;
|
||||
|
||||
/// True if data type names should be serialized in binary encoding.
|
||||
bool data_types_binary_encoding = false;
|
||||
|
||||
bool use_compact_variant_discriminators_serialization = false;
|
||||
|
||||
enum class DynamicStatisticsMode
|
||||
@ -278,6 +281,9 @@ public:
|
||||
|
||||
bool position_independent_encoding = true;
|
||||
|
||||
/// True if data type names should be deserialized in binary encoding.
|
||||
bool data_types_binary_encoding = false;
|
||||
|
||||
bool native_format = false;
|
||||
|
||||
/// If not zero, may be used to avoid reallocations while reading column of String type.
|
||||
|
@ -42,13 +42,13 @@ void SerializationArray::deserializeBinary(Field & field, ReadBuffer & istr, con
|
||||
{
|
||||
size_t size;
|
||||
readVarUInt(size, istr);
|
||||
if (settings.max_binary_array_size && size > settings.max_binary_array_size)
|
||||
if (settings.binary.max_binary_string_size && size > settings.binary.max_binary_string_size)
|
||||
throw Exception(
|
||||
ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size: {}. The maximum is: {}. To increase the maximum, use setting "
|
||||
"format_binary_max_array_size",
|
||||
size,
|
||||
settings.max_binary_array_size);
|
||||
settings.binary.max_binary_string_size);
|
||||
|
||||
field = Array();
|
||||
Array & arr = field.get<Array &>();
|
||||
@ -82,13 +82,13 @@ void SerializationArray::deserializeBinary(IColumn & column, ReadBuffer & istr,
|
||||
|
||||
size_t size;
|
||||
readVarUInt(size, istr);
|
||||
if (settings.max_binary_array_size && size > settings.max_binary_array_size)
|
||||
if (settings.binary.max_binary_string_size && size > settings.binary.max_binary_string_size)
|
||||
throw Exception(
|
||||
ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large array size: {}. The maximum is: {}. To increase the maximum, use setting "
|
||||
"format_binary_max_array_size",
|
||||
size,
|
||||
settings.max_binary_array_size);
|
||||
settings.binary.max_binary_string_size);
|
||||
|
||||
IColumn & nested_column = column_array.getData();
|
||||
|
||||
|
@ -4,6 +4,8 @@
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/DataTypeVariant.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeNothing.h>
|
||||
#include <DataTypes/DataTypesBinaryEncoding.h>
|
||||
|
||||
#include <Columns/ColumnDynamic.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
@ -109,7 +111,10 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix(
|
||||
const auto & variant_column = column_dynamic.getVariantColumn();
|
||||
|
||||
/// Write internal Variant type name.
|
||||
writeStringBinary(dynamic_state->variant_type->getName(), *stream);
|
||||
if (settings.data_types_binary_encoding)
|
||||
encodeDataType(dynamic_state->variant_type, *stream);
|
||||
else
|
||||
writeStringBinary(dynamic_state->variant_type->getName(), *stream);
|
||||
|
||||
/// Write statistics in prefix if needed.
|
||||
if (settings.dynamic_write_statistics == SerializeBinaryBulkSettings::DynamicStatisticsMode::PREFIX)
|
||||
@ -178,9 +183,16 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD
|
||||
readBinaryLittleEndian(structure_version, *structure_stream);
|
||||
auto structure_state = std::make_shared<DeserializeBinaryBulkStateDynamicStructure>(structure_version);
|
||||
/// Read internal Variant type name.
|
||||
String data_type_name;
|
||||
readStringBinary(data_type_name, *structure_stream);
|
||||
structure_state->variant_type = DataTypeFactory::instance().get(data_type_name);
|
||||
if (settings.data_types_binary_encoding)
|
||||
{
|
||||
structure_state->variant_type = decodeDataType(*structure_stream);
|
||||
}
|
||||
else
|
||||
{
|
||||
String data_type_name;
|
||||
readStringBinary(data_type_name, *structure_stream);
|
||||
structure_state->variant_type = DataTypeFactory::instance().get(data_type_name);
|
||||
}
|
||||
const auto * variant_type = typeid_cast<const DataTypeVariant *>(structure_state->variant_type.get());
|
||||
if (!variant_type)
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect type of Dynamic nested column, expected Variant, got {}", structure_state->variant_type->getName());
|
||||
@ -280,33 +292,27 @@ void SerializationDynamic::deserializeBinaryBulkWithMultipleStreams(
|
||||
|
||||
void SerializationDynamic::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||
{
|
||||
UInt8 null_bit = field.isNull();
|
||||
writeBinary(null_bit, ostr);
|
||||
if (null_bit)
|
||||
/// Serialize NULL as Nothing type with no value.
|
||||
if (field.isNull())
|
||||
{
|
||||
encodeDataType(std::make_shared<DataTypeNothing>(), ostr);
|
||||
return;
|
||||
}
|
||||
|
||||
auto field_type = applyVisitor(FieldToDataType(), field);
|
||||
auto field_type_name = field_type->getName();
|
||||
writeVarUInt(field_type_name.size(), ostr);
|
||||
writeString(field_type_name, ostr);
|
||||
encodeDataType(field_type, ostr);
|
||||
field_type->getDefaultSerialization()->serializeBinary(field, ostr, settings);
|
||||
}
|
||||
|
||||
void SerializationDynamic::deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings & settings) const
|
||||
{
|
||||
UInt8 null_bit;
|
||||
readBinary(null_bit, istr);
|
||||
if (null_bit)
|
||||
auto field_type = decodeDataType(istr);
|
||||
if (isNothing(field_type))
|
||||
{
|
||||
field = Null();
|
||||
return;
|
||||
}
|
||||
|
||||
size_t field_type_name_size;
|
||||
readVarUInt(field_type_name_size, istr);
|
||||
String field_type_name(field_type_name_size, 0);
|
||||
istr.readStrict(field_type_name.data(), field_type_name_size);
|
||||
auto field_type = DataTypeFactory::instance().get(field_type_name);
|
||||
field_type->getDefaultSerialization()->deserializeBinary(field, istr, settings);
|
||||
}
|
||||
|
||||
@ -317,15 +323,15 @@ void SerializationDynamic::serializeBinary(const IColumn & column, size_t row_nu
|
||||
const auto & variant_column = dynamic_column.getVariantColumn();
|
||||
auto global_discr = variant_column.globalDiscriminatorAt(row_num);
|
||||
|
||||
UInt8 null_bit = global_discr == ColumnVariant::NULL_DISCRIMINATOR;
|
||||
writeBinary(null_bit, ostr);
|
||||
if (null_bit)
|
||||
/// Serialize NULL as Nothing type with no value.
|
||||
if (global_discr == ColumnVariant::NULL_DISCRIMINATOR)
|
||||
{
|
||||
encodeDataType(std::make_shared<DataTypeNothing>(), ostr);
|
||||
return;
|
||||
}
|
||||
|
||||
const auto & variant_type = assert_cast<const DataTypeVariant &>(*variant_info.variant_type).getVariant(global_discr);
|
||||
const auto & variant_type_name = variant_info.variant_names[global_discr];
|
||||
writeVarUInt(variant_type_name.size(), ostr);
|
||||
writeString(variant_type_name, ostr);
|
||||
encodeDataType(variant_type, ostr);
|
||||
variant_type->getDefaultSerialization()->serializeBinary(variant_column.getVariantByGlobalDiscriminator(global_discr), variant_column.offsetAt(row_num), ostr, settings);
|
||||
}
|
||||
|
||||
@ -346,30 +352,23 @@ static void deserializeVariant(
|
||||
void SerializationDynamic::deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const
|
||||
{
|
||||
auto & dynamic_column = assert_cast<ColumnDynamic &>(column);
|
||||
UInt8 null_bit;
|
||||
readBinary(null_bit, istr);
|
||||
if (null_bit)
|
||||
auto variant_type = decodeDataType(istr);
|
||||
if (isNothing(variant_type))
|
||||
{
|
||||
dynamic_column.insertDefault();
|
||||
return;
|
||||
}
|
||||
|
||||
size_t variant_type_name_size;
|
||||
readVarUInt(variant_type_name_size, istr);
|
||||
String variant_type_name(variant_type_name_size, 0);
|
||||
istr.readStrict(variant_type_name.data(), variant_type_name_size);
|
||||
|
||||
auto variant_type_name = variant_type->getName();
|
||||
const auto & variant_info = dynamic_column.getVariantInfo();
|
||||
auto it = variant_info.variant_name_to_discriminator.find(variant_type_name);
|
||||
if (it != variant_info.variant_name_to_discriminator.end())
|
||||
{
|
||||
const auto & variant_type = assert_cast<const DataTypeVariant &>(*variant_info.variant_type).getVariant(it->second);
|
||||
deserializeVariant(dynamic_column.getVariantColumn(), variant_type, it->second, istr, [&settings](const ISerialization & serialization, IColumn & variant, ReadBuffer & buf){ serialization.deserializeBinary(variant, buf, settings); });
|
||||
return;
|
||||
}
|
||||
|
||||
/// We don't have this variant yet. Let's try to add it.
|
||||
auto variant_type = DataTypeFactory::instance().get(variant_type_name);
|
||||
if (dynamic_column.addNewVariant(variant_type))
|
||||
{
|
||||
auto discr = variant_info.variant_name_to_discriminator.at(variant_type_name);
|
||||
|
@ -55,13 +55,13 @@ void SerializationMap::deserializeBinary(Field & field, ReadBuffer & istr, const
|
||||
{
|
||||
size_t size;
|
||||
readVarUInt(size, istr);
|
||||
if (settings.max_binary_array_size && size > settings.max_binary_array_size)
|
||||
if (settings.binary.max_binary_string_size && size > settings.binary.max_binary_string_size)
|
||||
throw Exception(
|
||||
ErrorCodes::TOO_LARGE_ARRAY_SIZE,
|
||||
"Too large map size: {}. The maximum is: {}. To increase the maximum, use setting "
|
||||
"format_binary_max_array_size",
|
||||
size,
|
||||
settings.max_binary_array_size);
|
||||
settings.binary.max_binary_string_size);
|
||||
field = Map();
|
||||
Map & map = field.get<Map &>();
|
||||
map.reserve(size);
|
||||
|
@ -33,13 +33,13 @@ namespace ErrorCodes
|
||||
void SerializationString::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||
{
|
||||
const String & s = field.get<const String &>();
|
||||
if (settings.max_binary_string_size && s.size() > settings.max_binary_string_size)
|
||||
if (settings.binary.max_binary_string_size && s.size() > settings.binary.max_binary_string_size)
|
||||
throw Exception(
|
||||
ErrorCodes::TOO_LARGE_STRING_SIZE,
|
||||
"Too large string size: {}. The maximum is: {}. To increase the maximum, use setting "
|
||||
"format_binary_max_string_size",
|
||||
s.size(),
|
||||
settings.max_binary_string_size);
|
||||
settings.binary.max_binary_string_size);
|
||||
|
||||
writeVarUInt(s.size(), ostr);
|
||||
writeString(s, ostr);
|
||||
@ -50,13 +50,13 @@ void SerializationString::deserializeBinary(Field & field, ReadBuffer & istr, co
|
||||
{
|
||||
UInt64 size;
|
||||
readVarUInt(size, istr);
|
||||
if (settings.max_binary_string_size && size > settings.max_binary_string_size)
|
||||
if (settings.binary.max_binary_string_size && size > settings.binary.max_binary_string_size)
|
||||
throw Exception(
|
||||
ErrorCodes::TOO_LARGE_STRING_SIZE,
|
||||
"Too large string size: {}. The maximum is: {}. To increase the maximum, use setting "
|
||||
"format_binary_max_string_size",
|
||||
size,
|
||||
settings.max_binary_string_size);
|
||||
settings.binary.max_binary_string_size);
|
||||
|
||||
field = String();
|
||||
String & s = field.get<String &>();
|
||||
@ -68,13 +68,13 @@ void SerializationString::deserializeBinary(Field & field, ReadBuffer & istr, co
|
||||
void SerializationString::serializeBinary(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const
|
||||
{
|
||||
const StringRef & s = assert_cast<const ColumnString &>(column).getDataAt(row_num);
|
||||
if (settings.max_binary_string_size && s.size > settings.max_binary_string_size)
|
||||
if (settings.binary.max_binary_string_size && s.size > settings.binary.max_binary_string_size)
|
||||
throw Exception(
|
||||
ErrorCodes::TOO_LARGE_STRING_SIZE,
|
||||
"Too large string size: {}. The maximum is: {}. To increase the maximum, use setting "
|
||||
"format_binary_max_string_size",
|
||||
s.size,
|
||||
settings.max_binary_string_size);
|
||||
settings.binary.max_binary_string_size);
|
||||
|
||||
writeVarUInt(s.size, ostr);
|
||||
writeString(s, ostr);
|
||||
@ -89,13 +89,13 @@ void SerializationString::deserializeBinary(IColumn & column, ReadBuffer & istr,
|
||||
|
||||
UInt64 size;
|
||||
readVarUInt(size, istr);
|
||||
if (settings.max_binary_string_size && size > settings.max_binary_string_size)
|
||||
if (settings.binary.max_binary_string_size && size > settings.binary.max_binary_string_size)
|
||||
throw Exception(
|
||||
ErrorCodes::TOO_LARGE_STRING_SIZE,
|
||||
"Too large string size: {}. The maximum is: {}. To increase the maximum, use setting "
|
||||
"format_binary_max_string_size",
|
||||
size,
|
||||
settings.max_binary_string_size);
|
||||
settings.binary.max_binary_string_size);
|
||||
|
||||
size_t old_chars_size = data.size();
|
||||
size_t offset = old_chars_size + size + 1;
|
||||
|
@ -72,8 +72,8 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size)
|
||||
DataTypePtr type = DataTypeFactory::instance().get(data_type);
|
||||
|
||||
FormatSettings settings;
|
||||
settings.max_binary_string_size = 100;
|
||||
settings.max_binary_array_size = 100;
|
||||
settings.binary.max_binary_string_size = 100;
|
||||
settings.binary.max_binary_string_size = 100;
|
||||
|
||||
Field field;
|
||||
type->getDefaultSerialization()->deserializeBinary(field, in, settings);
|
||||
|
129
src/DataTypes/tests/gtest_data_types_binary_encoding.cpp
Normal file
129
src/DataTypes/tests/gtest_data_types_binary_encoding.cpp
Normal file
@ -0,0 +1,129 @@
|
||||
#include <gtest/gtest.h>
|
||||
#include <Core/Field.h>
|
||||
#include <DataTypes/DataTypesBinaryEncoding.h>
|
||||
#include <DataTypes/DataTypeDateTime64.h>
|
||||
#include <DataTypes/DataTypeFixedString.h>
|
||||
#include <DataTypes/DataTypeEnum.h>
|
||||
#include <DataTypes/DataTypesDecimal.h>
|
||||
#include <DataTypes/DataTypeFunction.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeUUID.h>
|
||||
#include <DataTypes/DataTypeSet.h>
|
||||
#include <DataTypes/DataTypeInterval.h>
|
||||
#include <DataTypes/DataTypeIPv4andIPv6.h>
|
||||
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||
#include <DataTypes/DataTypeNothing.h>
|
||||
#include <DataTypes/DataTypeDynamic.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <AggregateFunctions/IAggregateFunction.h>
|
||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <Common/tests/gtest_global_register.h>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
namespace DB::ErrorCodes
|
||||
{
|
||||
extern const int UNSUPPORTED_METHOD;
|
||||
}
|
||||
|
||||
|
||||
void check(const DataTypePtr & type)
|
||||
{
|
||||
// std::cerr << "Check " << type->getName() << "\n";
|
||||
WriteBufferFromOwnString ostr;
|
||||
encodeDataType(type, ostr);
|
||||
ReadBufferFromString istr(ostr.str());
|
||||
DataTypePtr decoded_type = decodeDataType(istr);
|
||||
ASSERT_TRUE(istr.eof());
|
||||
ASSERT_EQ(type->getName(), decoded_type->getName());
|
||||
ASSERT_TRUE(type->equals(*decoded_type));
|
||||
}
|
||||
|
||||
GTEST_TEST(DataTypesBinaryEncoding, EncodeAndDecode)
|
||||
{
|
||||
tryRegisterAggregateFunctions();
|
||||
check(std::make_shared<DataTypeNothing>());
|
||||
check(std::make_shared<DataTypeInt8>());
|
||||
check(std::make_shared<DataTypeUInt8>());
|
||||
check(std::make_shared<DataTypeInt16>());
|
||||
check(std::make_shared<DataTypeUInt16>());
|
||||
check(std::make_shared<DataTypeInt32>());
|
||||
check(std::make_shared<DataTypeUInt32>());
|
||||
check(std::make_shared<DataTypeInt64>());
|
||||
check(std::make_shared<DataTypeUInt64>());
|
||||
check(std::make_shared<DataTypeInt128>());
|
||||
check(std::make_shared<DataTypeUInt128>());
|
||||
check(std::make_shared<DataTypeInt256>());
|
||||
check(std::make_shared<DataTypeUInt256>());
|
||||
check(std::make_shared<DataTypeFloat32>());
|
||||
check(std::make_shared<DataTypeFloat64>());
|
||||
check(std::make_shared<DataTypeDate>());
|
||||
check(std::make_shared<DataTypeDate32>());
|
||||
check(std::make_shared<DataTypeDateTime>());
|
||||
check(std::make_shared<DataTypeDateTime>("EST"));
|
||||
check(std::make_shared<DataTypeDateTime>("CET"));
|
||||
check(std::make_shared<DataTypeDateTime64>(3));
|
||||
check(std::make_shared<DataTypeDateTime64>(3, "EST"));
|
||||
check(std::make_shared<DataTypeDateTime64>(3, "CET"));
|
||||
check(std::make_shared<DataTypeString>());
|
||||
check(std::make_shared<DataTypeFixedString>(10));
|
||||
check(DataTypeFactory::instance().get("Enum8('a' = 1, 'b' = 2, 'c' = 3, 'd' = -128)"));
|
||||
check(DataTypeFactory::instance().get("Enum16('a' = 1, 'b' = 2, 'c' = 3, 'd' = -1000)"));
|
||||
check(std::make_shared<DataTypeDecimal32>(3, 6));
|
||||
check(std::make_shared<DataTypeDecimal64>(3, 6));
|
||||
check(std::make_shared<DataTypeDecimal128>(3, 6));
|
||||
check(std::make_shared<DataTypeDecimal256>(3, 6));
|
||||
check(std::make_shared<DataTypeUUID>());
|
||||
check(DataTypeFactory::instance().get("Array(UInt32)"));
|
||||
check(DataTypeFactory::instance().get("Array(Array(Array(UInt32)))"));
|
||||
check(DataTypeFactory::instance().get("Tuple(UInt32, String, UUID)"));
|
||||
check(DataTypeFactory::instance().get("Tuple(UInt32, String, Tuple(UUID, Date, IPv4))"));
|
||||
check(DataTypeFactory::instance().get("Tuple(c1 UInt32, c2 String, c3 UUID)"));
|
||||
check(DataTypeFactory::instance().get("Tuple(c1 UInt32, c2 String, c3 Tuple(c4 UUID, c5 Date, c6 IPv4))"));
|
||||
check(std::make_shared<DataTypeSet>());
|
||||
check(std::make_shared<DataTypeInterval>(IntervalKind::Kind::Nanosecond));
|
||||
check(std::make_shared<DataTypeInterval>(IntervalKind::Kind::Microsecond));
|
||||
check(DataTypeFactory::instance().get("Nullable(UInt32)"));
|
||||
check(DataTypeFactory::instance().get("Nullable(Nothing)"));
|
||||
check(DataTypeFactory::instance().get("Nullable(UUID)"));
|
||||
check(std::make_shared<DataTypeFunction>(
|
||||
DataTypes{
|
||||
std::make_shared<DataTypeInt8>(),
|
||||
std::make_shared<DataTypeDate>(),
|
||||
DataTypeFactory::instance().get("Array(Array(Array(UInt32)))")},
|
||||
DataTypeFactory::instance().get("Tuple(c1 UInt32, c2 String, c3 UUID)")));
|
||||
DataTypes argument_types = {std::make_shared<DataTypeUInt64>()};
|
||||
Array parameters = {Field(0.1), Field(0.2)};
|
||||
AggregateFunctionProperties properties;
|
||||
AggregateFunctionPtr function = AggregateFunctionFactory::instance().get("quantiles", NullsAction::EMPTY, argument_types, parameters, properties);
|
||||
check(std::make_shared<DataTypeAggregateFunction>(function, argument_types, parameters));
|
||||
check(std::make_shared<DataTypeAggregateFunction>(function, argument_types, parameters, 2));
|
||||
check(DataTypeFactory::instance().get("AggregateFunction(sum, UInt64)"));
|
||||
check(DataTypeFactory::instance().get("AggregateFunction(quantiles(0.5, 0.9), UInt64)"));
|
||||
check(DataTypeFactory::instance().get("AggregateFunction(sequenceMatch('(?1)(?2)'), Date, UInt8, UInt8)"));
|
||||
check(DataTypeFactory::instance().get("AggregateFunction(sumMapFiltered([1, 4, 8]), Array(UInt64), Array(UInt64))"));
|
||||
check(DataTypeFactory::instance().get("LowCardinality(UInt32)"));
|
||||
check(DataTypeFactory::instance().get("LowCardinality(Nullable(String))"));
|
||||
check(DataTypeFactory::instance().get("Map(String, UInt32)"));
|
||||
check(DataTypeFactory::instance().get("Map(String, Map(String, Map(String, UInt32)))"));
|
||||
check(std::make_shared<DataTypeIPv4>());
|
||||
check(std::make_shared<DataTypeIPv6>());
|
||||
check(DataTypeFactory::instance().get("Variant(String, UInt32, Date32)"));
|
||||
check(std::make_shared<DataTypeDynamic>());
|
||||
check(std::make_shared<DataTypeDynamic>(10));
|
||||
check(std::make_shared<DataTypeDynamic>(255));
|
||||
check(DataTypeFactory::instance().get("Bool"));
|
||||
check(DataTypeFactory::instance().get("SimpleAggregateFunction(sum, UInt64)"));
|
||||
check(DataTypeFactory::instance().get("SimpleAggregateFunction(maxMap, Tuple(Array(UInt32), Array(UInt32)))"));
|
||||
check(DataTypeFactory::instance().get("SimpleAggregateFunction(groupArrayArray(19), Array(UInt64))"));
|
||||
check(DataTypeFactory::instance().get("Nested(a UInt32, b UInt32)"));
|
||||
check(DataTypeFactory::instance().get("Nested(a UInt32, b Nested(c String, d Nested(e Date)))"));
|
||||
check(DataTypeFactory::instance().get("Ring"));
|
||||
check(DataTypeFactory::instance().get("Point"));
|
||||
check(DataTypeFactory::instance().get("Polygon"));
|
||||
check(DataTypeFactory::instance().get("MultiPolygon"));
|
||||
check(DataTypeFactory::instance().get("Tuple(Map(LowCardinality(String), Array(AggregateFunction(2, quantiles(0.1, 0.2), Float32))), Array(Array(Tuple(UInt32, Tuple(a Map(String, String), b Nullable(Date), c Variant(Tuple(g String, d Array(UInt32)), Date, Map(String, String)))))))"));
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
#include <filesystem>
|
||||
#include <base/isSharedPtrUnique.h>
|
||||
#include <Databases/DatabaseAtomic.h>
|
||||
#include <Databases/DatabaseFactory.h>
|
||||
#include <Databases/DatabaseOnDisk.h>
|
||||
@ -12,7 +13,7 @@
|
||||
#include <Interpreters/ExternalDictionariesLoader.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Storages/StorageMaterializedView.h>
|
||||
#include "Common/logger_useful.h"
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/PoolId.h>
|
||||
#include <Common/atomicRename.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
@ -397,7 +398,7 @@ DatabaseAtomic::DetachedTables DatabaseAtomic::cleanupDetachedTables()
|
||||
LOG_DEBUG(log, "There are {} detached tables. Start searching non used tables.", detached_tables.size());
|
||||
while (it != detached_tables.end())
|
||||
{
|
||||
if (it->second.unique())
|
||||
if (isSharedPtrUnique(it->second))
|
||||
{
|
||||
not_in_use.emplace(it->first, it->second);
|
||||
it = detached_tables.erase(it);
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <Databases/DatabaseLazy.h>
|
||||
|
||||
#include <base/sort.h>
|
||||
#include <base/isSharedPtrUnique.h>
|
||||
#include <iomanip>
|
||||
#include <filesystem>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
@ -305,7 +306,7 @@ try
|
||||
String table_name = expired_tables.front().table_name;
|
||||
auto it = tables_cache.find(table_name);
|
||||
|
||||
if (!it->second.table || it->second.table.unique())
|
||||
if (!it->second.table || isSharedPtrUnique(it->second.table))
|
||||
{
|
||||
LOG_DEBUG(log, "Drop table {} from cache.", backQuote(it->first));
|
||||
it->second.table.reset();
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#if USE_MYSQL
|
||||
# include <string>
|
||||
# include <base/isSharedPtrUnique.h>
|
||||
# include <Databases/DatabaseFactory.h>
|
||||
# include <DataTypes/DataTypeDateTime.h>
|
||||
# include <DataTypes/DataTypeNullable.h>
|
||||
@ -354,7 +355,7 @@ void DatabaseMySQL::cleanOutdatedTables()
|
||||
{
|
||||
for (auto iterator = outdated_tables.begin(); iterator != outdated_tables.end();)
|
||||
{
|
||||
if (!iterator->unique())
|
||||
if (!isSharedPtrUnique(*iterator))
|
||||
++iterator;
|
||||
else
|
||||
{
|
||||
|
@ -716,6 +716,16 @@ static void writeFieldsToColumn(
|
||||
|
||||
null_map_column->insertValue(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Column is not null but field is null. It's possible due to overrides
|
||||
if (field.isNull())
|
||||
{
|
||||
column_to.insertDefault();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return true;
|
||||
};
|
||||
@ -791,7 +801,7 @@ static void writeFieldsToColumn(
|
||||
|
||||
if (write_data_to_null_map(value, index))
|
||||
{
|
||||
const String & data = value.get<const String &>();
|
||||
const String & data = value.safeGet<const String &>();
|
||||
casted_string_column->insertData(data.data(), data.size());
|
||||
}
|
||||
}
|
||||
|
@ -529,7 +529,12 @@ void registerDatabaseMaterializedPostgreSQL(DatabaseFactory & factory)
|
||||
}
|
||||
|
||||
auto connection_info = postgres::formatConnectionString(
|
||||
configuration.database, configuration.host, configuration.port, configuration.username, configuration.password);
|
||||
configuration.database,
|
||||
configuration.host,
|
||||
configuration.port,
|
||||
configuration.username,
|
||||
configuration.password,
|
||||
args.context->getSettingsRef().postgresql_connection_attempt_timeout);
|
||||
|
||||
auto postgresql_replica_settings = std::make_unique<MaterializedPostgreSQLSettings>();
|
||||
if (engine_define->settings)
|
||||
|
@ -545,8 +545,9 @@ void registerDatabasePostgreSQL(DatabaseFactory & factory)
|
||||
configuration,
|
||||
settings.postgresql_connection_pool_size,
|
||||
settings.postgresql_connection_pool_wait_timeout,
|
||||
POSTGRESQL_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES,
|
||||
settings.postgresql_connection_pool_auto_close_connection);
|
||||
settings.postgresql_connection_pool_retries,
|
||||
settings.postgresql_connection_pool_auto_close_connection,
|
||||
settings.postgresql_connection_attempt_timeout);
|
||||
|
||||
return std::make_shared<DatabasePostgreSQL>(
|
||||
args.context,
|
||||
|
@ -205,8 +205,9 @@ void registerDictionarySourcePostgreSQL(DictionarySourceFactory & factory)
|
||||
configuration.replicas_configurations,
|
||||
settings.postgresql_connection_pool_size,
|
||||
settings.postgresql_connection_pool_wait_timeout,
|
||||
POSTGRESQL_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES,
|
||||
settings.postgresql_connection_pool_auto_close_connection);
|
||||
settings.postgresql_connection_pool_retries,
|
||||
settings.postgresql_connection_pool_auto_close_connection,
|
||||
settings.postgresql_connection_attempt_timeout);
|
||||
|
||||
PostgreSQLDictionarySource::Configuration dictionary_configuration
|
||||
{
|
||||
|
@ -439,13 +439,15 @@ String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings, Fo
|
||||
case FormatSettings::EscapingRule::CSV:
|
||||
result += fmt::format(
|
||||
", use_best_effort_in_schema_inference={}, bool_true_representation={}, bool_false_representation={},"
|
||||
" null_representation={}, delimiter={}, tuple_delimiter={}",
|
||||
" null_representation={}, delimiter={}, tuple_delimiter={}, try_infer_numbers_from_strings={}, try_infer_strings_from_quoted_tuples={}",
|
||||
settings.csv.use_best_effort_in_schema_inference,
|
||||
settings.bool_true_representation,
|
||||
settings.bool_false_representation,
|
||||
settings.csv.null_representation,
|
||||
settings.csv.delimiter,
|
||||
settings.csv.tuple_delimiter);
|
||||
settings.csv.tuple_delimiter,
|
||||
settings.csv.try_infer_numbers_from_strings,
|
||||
settings.csv.try_infer_strings_from_quoted_tuples);
|
||||
break;
|
||||
case FormatSettings::EscapingRule::JSON:
|
||||
result += fmt::format(
|
||||
|
@ -243,6 +243,7 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se
|
||||
format_settings.orc.output_row_index_stride = settings.output_format_orc_row_index_stride;
|
||||
format_settings.orc.use_fast_decoder = settings.input_format_orc_use_fast_decoder;
|
||||
format_settings.orc.filter_push_down = settings.input_format_orc_filter_push_down;
|
||||
format_settings.orc.read_use_writer_time_zone = settings.input_format_orc_read_use_writer_time_zone;
|
||||
format_settings.defaults_for_omitted_fields = settings.input_format_defaults_for_omitted_fields;
|
||||
format_settings.capn_proto.enum_comparing_mode = settings.format_capn_proto_enum_comparising_mode;
|
||||
format_settings.capn_proto.skip_fields_with_unsupported_types_in_schema_inference = settings.input_format_capn_proto_skip_fields_with_unsupported_types_in_schema_inference;
|
||||
@ -269,9 +270,13 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se
|
||||
format_settings.markdown.escape_special_characters = settings.output_format_markdown_escape_special_characters;
|
||||
format_settings.bson.output_string_as_string = settings.output_format_bson_string_as_string;
|
||||
format_settings.bson.skip_fields_with_unsupported_types_in_schema_inference = settings.input_format_bson_skip_fields_with_unsupported_types_in_schema_inference;
|
||||
format_settings.max_binary_string_size = settings.format_binary_max_string_size;
|
||||
format_settings.max_binary_array_size = settings.format_binary_max_array_size;
|
||||
format_settings.binary.max_binary_string_size = settings.format_binary_max_string_size;
|
||||
format_settings.binary.max_binary_array_size = settings.format_binary_max_array_size;
|
||||
format_settings.binary.encode_types_in_binary_format = settings.output_format_binary_encode_types_in_binary_format;
|
||||
format_settings.binary.decode_types_in_binary_format = settings.input_format_binary_decode_types_in_binary_format;
|
||||
format_settings.native.allow_types_conversion = settings.input_format_native_allow_types_conversion;
|
||||
format_settings.native.encode_types_in_binary_format = settings.output_format_native_encode_types_in_binary_format;
|
||||
format_settings.native.decode_types_in_binary_format = settings.input_format_native_decode_types_in_binary_format;
|
||||
format_settings.max_parser_depth = context->getSettingsRef().max_parser_depth;
|
||||
format_settings.client_protocol_version = context->getClientProtocolVersion();
|
||||
format_settings.date_time_overflow_behavior = settings.date_time_overflow_behavior;
|
||||
|
@ -106,8 +106,6 @@ struct FormatSettings
|
||||
UInt64 input_allow_errors_num = 0;
|
||||
Float32 input_allow_errors_ratio = 0;
|
||||
|
||||
UInt64 max_binary_string_size = 1_GiB;
|
||||
UInt64 max_binary_array_size = 1_GiB;
|
||||
UInt64 client_protocol_version = 0;
|
||||
|
||||
UInt64 max_parser_depth = DBMS_DEFAULT_MAX_PARSER_DEPTH;
|
||||
@ -121,6 +119,14 @@ struct FormatSettings
|
||||
ZSTD
|
||||
};
|
||||
|
||||
struct
|
||||
{
|
||||
UInt64 max_binary_string_size = 1_GiB;
|
||||
UInt64 max_binary_array_size = 1_GiB;
|
||||
bool encode_types_in_binary_format = false;
|
||||
bool decode_types_in_binary_format = false;
|
||||
} binary{};
|
||||
|
||||
struct
|
||||
{
|
||||
UInt64 row_group_size = 1000000;
|
||||
@ -403,6 +409,7 @@ struct FormatSettings
|
||||
bool use_fast_decoder = true;
|
||||
bool filter_push_down = true;
|
||||
UInt64 output_row_index_stride = 10'000;
|
||||
bool read_use_writer_time_zone = false;
|
||||
} orc{};
|
||||
|
||||
/// For capnProto format we should determine how to
|
||||
@ -458,6 +465,8 @@ struct FormatSettings
|
||||
struct
|
||||
{
|
||||
bool allow_types_conversion = true;
|
||||
bool encode_types_in_binary_format = false;
|
||||
bool decode_types_in_binary_format = false;
|
||||
} native{};
|
||||
|
||||
struct
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Compression/CompressedReadBufferFromFile.h>
|
||||
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <DataTypes/DataTypesBinaryEncoding.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <base/range.h>
|
||||
|
||||
@ -31,8 +32,8 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
|
||||
NativeReader::NativeReader(ReadBuffer & istr_, UInt64 server_revision_)
|
||||
: istr(istr_), server_revision(server_revision_)
|
||||
NativeReader::NativeReader(ReadBuffer & istr_, UInt64 server_revision_, std::optional<FormatSettings> format_settings_)
|
||||
: istr(istr_), server_revision(server_revision_), format_settings(format_settings_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -40,16 +41,12 @@ NativeReader::NativeReader(
|
||||
ReadBuffer & istr_,
|
||||
const Block & header_,
|
||||
UInt64 server_revision_,
|
||||
bool skip_unknown_columns_,
|
||||
bool null_as_default_,
|
||||
bool allow_types_conversion_,
|
||||
std::optional<FormatSettings> format_settings_,
|
||||
BlockMissingValues * block_missing_values_)
|
||||
: istr(istr_)
|
||||
, header(header_)
|
||||
, server_revision(server_revision_)
|
||||
, skip_unknown_columns(skip_unknown_columns_)
|
||||
, null_as_default(null_as_default_)
|
||||
, allow_types_conversion(allow_types_conversion_)
|
||||
, format_settings(std::move(format_settings_))
|
||||
, block_missing_values(block_missing_values_)
|
||||
{
|
||||
}
|
||||
@ -83,13 +80,14 @@ void NativeReader::resetParser()
|
||||
use_index = false;
|
||||
}
|
||||
|
||||
void NativeReader::readData(const ISerialization & serialization, ColumnPtr & column, ReadBuffer & istr, size_t rows, double avg_value_size_hint)
|
||||
static void readData(const ISerialization & serialization, ColumnPtr & column, ReadBuffer & istr, const std::optional<FormatSettings> & format_settings, size_t rows, double avg_value_size_hint)
|
||||
{
|
||||
ISerialization::DeserializeBinaryBulkSettings settings;
|
||||
settings.getter = [&](ISerialization::SubstreamPath) -> ReadBuffer * { return &istr; };
|
||||
settings.avg_value_size_hint = avg_value_size_hint;
|
||||
settings.position_independent_encoding = false;
|
||||
settings.native_format = true;
|
||||
settings.data_types_binary_encoding = format_settings && format_settings->native.decode_types_in_binary_format;
|
||||
|
||||
ISerialization::DeserializeBinaryBulkStatePtr state;
|
||||
|
||||
@ -167,8 +165,16 @@ Block NativeReader::read()
|
||||
|
||||
/// Type
|
||||
String type_name;
|
||||
readBinary(type_name, istr);
|
||||
column.type = data_type_factory.get(type_name);
|
||||
if (format_settings && format_settings->native.decode_types_in_binary_format)
|
||||
{
|
||||
column.type = decodeDataType(istr);
|
||||
type_name = column.type->getName();
|
||||
}
|
||||
else
|
||||
{
|
||||
readBinary(type_name, istr);
|
||||
column.type = data_type_factory.get(type_name);
|
||||
}
|
||||
|
||||
setVersionToAggregateFunctions(column.type, true, server_revision);
|
||||
|
||||
@ -203,7 +209,7 @@ Block NativeReader::read()
|
||||
|
||||
double avg_value_size_hint = avg_value_size_hints.empty() ? 0 : avg_value_size_hints[i];
|
||||
if (rows) /// If no rows, nothing to read.
|
||||
readData(*serialization, read_column, istr, rows, avg_value_size_hint);
|
||||
readData(*serialization, read_column, istr, format_settings, rows, avg_value_size_hint);
|
||||
|
||||
column.column = std::move(read_column);
|
||||
|
||||
@ -214,12 +220,12 @@ Block NativeReader::read()
|
||||
{
|
||||
auto & header_column = header.getByName(column.name);
|
||||
|
||||
if (null_as_default)
|
||||
if (format_settings && format_settings->null_as_default)
|
||||
insertNullAsDefaultIfNeeded(column, header_column, header.getPositionByName(column.name), block_missing_values);
|
||||
|
||||
if (!header_column.type->equals(*column.type))
|
||||
{
|
||||
if (allow_types_conversion)
|
||||
if (format_settings && format_settings->native.allow_types_conversion)
|
||||
{
|
||||
try
|
||||
{
|
||||
@ -246,7 +252,7 @@ Block NativeReader::read()
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!skip_unknown_columns)
|
||||
if (format_settings && !format_settings->skip_unknown_fields)
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Unknown column with name {} found while reading data in Native format", column.name);
|
||||
use_in_result = false;
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ class NativeReader
|
||||
{
|
||||
public:
|
||||
/// If a non-zero server_revision is specified, additional block information may be expected and read.
|
||||
NativeReader(ReadBuffer & istr_, UInt64 server_revision_);
|
||||
NativeReader(ReadBuffer & istr_, UInt64 server_revision_, std::optional<FormatSettings> format_settings_ = std::nullopt);
|
||||
|
||||
/// For cases when data structure (header) is known in advance.
|
||||
/// NOTE We may use header for data validation and/or type conversions. It is not implemented.
|
||||
@ -28,9 +28,7 @@ public:
|
||||
ReadBuffer & istr_,
|
||||
const Block & header_,
|
||||
UInt64 server_revision_,
|
||||
bool skip_unknown_columns_ = false,
|
||||
bool null_as_default_ = false,
|
||||
bool allow_types_conversion_ = false,
|
||||
std::optional<FormatSettings> format_settings_ = std::nullopt,
|
||||
BlockMissingValues * block_missing_values_ = nullptr);
|
||||
|
||||
/// For cases when we have an index. It allows to skip columns. Only columns specified in the index will be read.
|
||||
@ -38,8 +36,6 @@ public:
|
||||
IndexForNativeFormat::Blocks::const_iterator index_block_it_,
|
||||
IndexForNativeFormat::Blocks::const_iterator index_block_end_);
|
||||
|
||||
static void readData(const ISerialization & serialization, ColumnPtr & column, ReadBuffer & istr, size_t rows, double avg_value_size_hint);
|
||||
|
||||
Block getHeader() const;
|
||||
|
||||
void resetParser();
|
||||
@ -50,9 +46,7 @@ private:
|
||||
ReadBuffer & istr;
|
||||
Block header;
|
||||
UInt64 server_revision;
|
||||
bool skip_unknown_columns = false;
|
||||
bool null_as_default = false;
|
||||
bool allow_types_conversion = false;
|
||||
std::optional<FormatSettings> format_settings = std::nullopt;
|
||||
BlockMissingValues * block_missing_values = nullptr;
|
||||
|
||||
bool use_index = false;
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <Columns/ColumnSparse.h>
|
||||
#include <DataTypes/DataTypeLowCardinality.h>
|
||||
#include <DataTypes/DataTypeAggregateFunction.h>
|
||||
#include <DataTypes/DataTypesBinaryEncoding.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -25,10 +26,20 @@ namespace ErrorCodes
|
||||
|
||||
|
||||
NativeWriter::NativeWriter(
|
||||
WriteBuffer & ostr_, UInt64 client_revision_, const Block & header_, bool remove_low_cardinality_,
|
||||
IndexForNativeFormat * index_, size_t initial_size_of_file_)
|
||||
: ostr(ostr_), client_revision(client_revision_), header(header_),
|
||||
index(index_), initial_size_of_file(initial_size_of_file_), remove_low_cardinality(remove_low_cardinality_)
|
||||
WriteBuffer & ostr_,
|
||||
UInt64 client_revision_,
|
||||
const Block & header_,
|
||||
std::optional<FormatSettings> format_settings_,
|
||||
bool remove_low_cardinality_,
|
||||
IndexForNativeFormat * index_,
|
||||
size_t initial_size_of_file_)
|
||||
: ostr(ostr_)
|
||||
, client_revision(client_revision_)
|
||||
, header(header_)
|
||||
, index(index_)
|
||||
, initial_size_of_file(initial_size_of_file_)
|
||||
, remove_low_cardinality(remove_low_cardinality_)
|
||||
, format_settings(std::move(format_settings_))
|
||||
{
|
||||
if (index)
|
||||
{
|
||||
@ -45,7 +56,7 @@ void NativeWriter::flush()
|
||||
}
|
||||
|
||||
|
||||
static void writeData(const ISerialization & serialization, const ColumnPtr & column, WriteBuffer & ostr, UInt64 offset, UInt64 limit)
|
||||
static void writeData(const ISerialization & serialization, const ColumnPtr & column, WriteBuffer & ostr, const std::optional<FormatSettings> & format_settings, UInt64 offset, UInt64 limit)
|
||||
{
|
||||
/** If there are columns-constants - then we materialize them.
|
||||
* (Since the data type does not know how to serialize / deserialize constants.)
|
||||
@ -57,6 +68,7 @@ static void writeData(const ISerialization & serialization, const ColumnPtr & co
|
||||
settings.getter = [&ostr](ISerialization::SubstreamPath) -> WriteBuffer * { return &ostr; };
|
||||
settings.position_independent_encoding = false;
|
||||
settings.low_cardinality_max_dictionary_size = 0;
|
||||
settings.data_types_binary_encoding = format_settings && format_settings->native.encode_types_in_binary_format;
|
||||
|
||||
ISerialization::SerializeBinaryBulkStatePtr state;
|
||||
serialization.serializeBinaryBulkStatePrefix(*full_column, settings, state);
|
||||
@ -121,15 +133,22 @@ size_t NativeWriter::write(const Block & block)
|
||||
setVersionToAggregateFunctions(column.type, include_version, include_version ? std::optional<size_t>(client_revision) : std::nullopt);
|
||||
|
||||
/// Type
|
||||
String type_name = column.type->getName();
|
||||
if (format_settings && format_settings->native.encode_types_in_binary_format)
|
||||
{
|
||||
encodeDataType(column.type, ostr);
|
||||
}
|
||||
else
|
||||
{
|
||||
String type_name = column.type->getName();
|
||||
|
||||
/// For compatibility, we will not send explicit timezone parameter in DateTime data type
|
||||
/// to older clients, that cannot understand it.
|
||||
if (client_revision < DBMS_MIN_REVISION_WITH_TIME_ZONE_PARAMETER_IN_DATETIME_DATA_TYPE
|
||||
&& startsWith(type_name, "DateTime("))
|
||||
type_name = "DateTime";
|
||||
/// For compatibility, we will not send explicit timezone parameter in DateTime data type
|
||||
/// to older clients, that cannot understand it.
|
||||
if (client_revision < DBMS_MIN_REVISION_WITH_TIME_ZONE_PARAMETER_IN_DATETIME_DATA_TYPE
|
||||
&& startsWith(type_name, "DateTime("))
|
||||
type_name = "DateTime";
|
||||
|
||||
writeStringBinary(type_name, ostr);
|
||||
writeStringBinary(type_name, ostr);
|
||||
}
|
||||
|
||||
/// Serialization. Dynamic, if client supports it.
|
||||
SerializationPtr serialization;
|
||||
@ -161,7 +180,7 @@ size_t NativeWriter::write(const Block & block)
|
||||
|
||||
/// Data
|
||||
if (rows) /// Zero items of data is always represented as zero number of bytes.
|
||||
writeData(*serialization, column.column, ostr, 0, 0);
|
||||
writeData(*serialization, column.column, ostr, format_settings, 0, 0);
|
||||
|
||||
if (index)
|
||||
{
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <base/types.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <Core/Block.h>
|
||||
#include <Formats/FormatSettings.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -23,7 +24,7 @@ public:
|
||||
/** If non-zero client_revision is specified, additional block information can be written.
|
||||
*/
|
||||
NativeWriter(
|
||||
WriteBuffer & ostr_, UInt64 client_revision_, const Block & header_, bool remove_low_cardinality_ = false,
|
||||
WriteBuffer & ostr_, UInt64 client_revision_, const Block & header_, std::optional<FormatSettings> format_settings_ = std::nullopt, bool remove_low_cardinality_ = false,
|
||||
IndexForNativeFormat * index_ = nullptr, size_t initial_size_of_file_ = 0);
|
||||
|
||||
Block getHeader() const { return header; }
|
||||
@ -44,6 +45,7 @@ private:
|
||||
CompressedWriteBuffer * ostr_concrete = nullptr;
|
||||
|
||||
bool remove_low_cardinality;
|
||||
std::optional<FormatSettings> format_settings;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -284,7 +284,12 @@ public:
|
||||
{
|
||||
while (x)
|
||||
{
|
||||
result_array_values_data.push_back(std::countr_zero(x));
|
||||
/// С++20 char8_t is not an unsigned integral type anymore https://godbolt.org/z/Mqcb7qn58
|
||||
/// and thus you cannot use std::countr_zero on it.
|
||||
if constexpr (std::is_same_v<UnsignedType, UInt8>)
|
||||
result_array_values_data.push_back(std::countr_zero(static_cast<unsigned char>(x)));
|
||||
else
|
||||
result_array_values_data.push_back(std::countr_zero(x));
|
||||
x &= (x - 1);
|
||||
}
|
||||
}
|
||||
@ -336,4 +341,3 @@ REGISTER_FUNCTION(BitToArray)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -1,9 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/types.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Common/OptimizedRegularExpression.h>
|
||||
#include <Common/re2.h>
|
||||
#include <Functions/Regexps.h>
|
||||
#include <Functions/ReplaceStringImpl.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <base/types.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -48,45 +51,75 @@ struct ReplaceRegexpImpl
|
||||
|
||||
static constexpr int max_captures = 10;
|
||||
|
||||
static Instructions createInstructions(std::string_view replacement, int num_captures)
|
||||
/// The replacement string references must not contain non-existing capturing groups.
|
||||
static void checkSubstitutions(std::string_view replacement, int num_captures)
|
||||
{
|
||||
Instructions instructions;
|
||||
|
||||
String literals;
|
||||
for (size_t i = 0; i < replacement.size(); ++i)
|
||||
{
|
||||
if (replacement[i] == '\\' && i + 1 < replacement.size())
|
||||
{
|
||||
if (isNumericASCII(replacement[i + 1])) /// Substitution
|
||||
if (isNumericASCII(replacement[i + 1])) /// substitution
|
||||
{
|
||||
int substitution_num = replacement[i + 1] - '0';
|
||||
if (substitution_num >= num_captures)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Substitution '\\{}' in replacement argument is invalid, regexp has only {} capturing groups", substitution_num, num_captures - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static Instructions createInstructions(std::string_view replacement, int num_captures)
|
||||
{
|
||||
checkSubstitutions(replacement, num_captures);
|
||||
|
||||
Instructions instructions;
|
||||
|
||||
String literals;
|
||||
literals.reserve(replacement.size());
|
||||
|
||||
for (size_t i = 0; i < replacement.size(); ++i)
|
||||
{
|
||||
if (replacement[i] == '\\' && i + 1 < replacement.size())
|
||||
{
|
||||
if (isNumericASCII(replacement[i + 1])) /// substitution
|
||||
{
|
||||
if (!literals.empty())
|
||||
{
|
||||
instructions.emplace_back(literals);
|
||||
literals = "";
|
||||
}
|
||||
instructions.emplace_back(replacement[i + 1] - '0');
|
||||
int substitution_num = replacement[i + 1] - '0';
|
||||
instructions.emplace_back(substitution_num);
|
||||
}
|
||||
else
|
||||
literals += replacement[i + 1]; /// Escaping
|
||||
literals += replacement[i + 1]; /// escaping
|
||||
++i;
|
||||
}
|
||||
else
|
||||
literals += replacement[i]; /// Plain character
|
||||
literals += replacement[i]; /// plain character
|
||||
}
|
||||
|
||||
if (!literals.empty())
|
||||
instructions.emplace_back(literals);
|
||||
|
||||
for (const auto & instr : instructions)
|
||||
if (instr.substitution_num >= num_captures)
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"Id {} in replacement string is an invalid substitution, regexp has only {} capturing groups",
|
||||
instr.substitution_num, num_captures - 1);
|
||||
|
||||
return instructions;
|
||||
}
|
||||
|
||||
static bool canFallbackToStringReplacement(const String & needle, const String & replacement, const re2::RE2 & searcher, int num_captures)
|
||||
{
|
||||
if (searcher.NumberOfCapturingGroups())
|
||||
return false;
|
||||
|
||||
checkSubstitutions(replacement, num_captures);
|
||||
|
||||
String required_substring;
|
||||
bool is_trivial;
|
||||
bool required_substring_is_prefix;
|
||||
std::vector<String> alternatives;
|
||||
OptimizedRegularExpression::analyze(needle, required_substring, is_trivial, required_substring_is_prefix, alternatives);
|
||||
return is_trivial && required_substring_is_prefix && required_substring == needle;
|
||||
}
|
||||
|
||||
static void processString(
|
||||
const char * haystack_data,
|
||||
size_t haystack_length,
|
||||
@ -124,7 +157,7 @@ struct ReplaceRegexpImpl
|
||||
{
|
||||
std::string_view replacement;
|
||||
if (instr.substitution_num >= 0)
|
||||
replacement = std::string_view(matches[instr.substitution_num].data(), matches[instr.substitution_num].size());
|
||||
replacement = {matches[instr.substitution_num].data(), matches[instr.substitution_num].size()};
|
||||
else
|
||||
replacement = instr.literal;
|
||||
res_data.resize(res_data.size() + replacement.size());
|
||||
@ -179,19 +212,32 @@ struct ReplaceRegexpImpl
|
||||
res_offsets.resize(haystack_size);
|
||||
|
||||
re2::RE2::Options regexp_options;
|
||||
/// Don't write error messages to stderr.
|
||||
regexp_options.set_log_errors(false);
|
||||
regexp_options.set_log_errors(false); /// don't write error messages to stderr
|
||||
|
||||
re2::RE2 searcher(needle, regexp_options);
|
||||
|
||||
if (!searcher.ok())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The pattern argument is not a valid re2 pattern: {}", searcher.error());
|
||||
|
||||
int num_captures = std::min(searcher.NumberOfCapturingGroups() + 1, max_captures);
|
||||
|
||||
/// Try to use non-regexp string replacement. This shortcut is implemented only for const-needles + const-replacement as
|
||||
/// pattern analysis incurs some cost too.
|
||||
if (canFallbackToStringReplacement(needle, replacement, searcher, num_captures))
|
||||
{
|
||||
auto convertTrait = [](ReplaceRegexpTraits::Replace first_or_all)
|
||||
{
|
||||
switch (first_or_all)
|
||||
{
|
||||
case ReplaceRegexpTraits::Replace::First: return ReplaceStringTraits::Replace::First;
|
||||
case ReplaceRegexpTraits::Replace::All: return ReplaceStringTraits::Replace::All;
|
||||
}
|
||||
};
|
||||
ReplaceStringImpl<Name, convertTrait(replace)>::vectorConstantConstant(haystack_data, haystack_offsets, needle, replacement, res_data, res_offsets);
|
||||
return;
|
||||
}
|
||||
|
||||
Instructions instructions = createInstructions(replacement, num_captures);
|
||||
|
||||
/// Cannot perform search for whole columns. Will process each string separately.
|
||||
for (size_t i = 0; i < haystack_size; ++i)
|
||||
{
|
||||
size_t from = i > 0 ? haystack_offsets[i - 1] : 0;
|
||||
@ -221,10 +267,8 @@ struct ReplaceRegexpImpl
|
||||
res_offsets.resize(haystack_size);
|
||||
|
||||
re2::RE2::Options regexp_options;
|
||||
/// Don't write error messages to stderr.
|
||||
regexp_options.set_log_errors(false);
|
||||
regexp_options.set_log_errors(false); /// don't write error messages to stderr
|
||||
|
||||
/// Cannot perform search for whole columns. Will process each string separately.
|
||||
for (size_t i = 0; i < haystack_size; ++i)
|
||||
{
|
||||
size_t hs_from = i > 0 ? haystack_offsets[i - 1] : 0;
|
||||
@ -242,6 +286,7 @@ struct ReplaceRegexpImpl
|
||||
re2::RE2 searcher(needle, regexp_options);
|
||||
if (!searcher.ok())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The pattern argument is not a valid re2 pattern: {}", searcher.error());
|
||||
|
||||
int num_captures = std::min(searcher.NumberOfCapturingGroups() + 1, max_captures);
|
||||
Instructions instructions = createInstructions(replacement, num_captures);
|
||||
|
||||
@ -270,17 +315,14 @@ struct ReplaceRegexpImpl
|
||||
res_offsets.resize(haystack_size);
|
||||
|
||||
re2::RE2::Options regexp_options;
|
||||
/// Don't write error messages to stderr.
|
||||
regexp_options.set_log_errors(false);
|
||||
regexp_options.set_log_errors(false); /// don't write error messages to stderr
|
||||
|
||||
re2::RE2 searcher(needle, regexp_options);
|
||||
|
||||
if (!searcher.ok())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The pattern argument is not a valid re2 pattern: {}", searcher.error());
|
||||
|
||||
int num_captures = std::min(searcher.NumberOfCapturingGroups() + 1, max_captures);
|
||||
|
||||
/// Cannot perform search for whole columns. Will process each string separately.
|
||||
for (size_t i = 0; i < haystack_size; ++i)
|
||||
{
|
||||
size_t hs_from = i > 0 ? haystack_offsets[i - 1] : 0;
|
||||
@ -290,8 +332,9 @@ struct ReplaceRegexpImpl
|
||||
size_t repl_from = i > 0 ? replacement_offsets[i - 1] : 0;
|
||||
const char * repl_data = reinterpret_cast<const char *>(replacement_data.data() + repl_from);
|
||||
const size_t repl_length = static_cast<unsigned>(replacement_offsets[i] - repl_from - 1);
|
||||
std::string_view replacement(repl_data, repl_length);
|
||||
|
||||
Instructions instructions = createInstructions(std::string_view(repl_data, repl_length), num_captures);
|
||||
Instructions instructions = createInstructions(replacement, num_captures);
|
||||
|
||||
processString(hs_data, hs_length, res_data, res_offset, searcher, num_captures, instructions);
|
||||
res_offsets[i] = res_offset;
|
||||
@ -317,10 +360,8 @@ struct ReplaceRegexpImpl
|
||||
res_offsets.resize(haystack_size);
|
||||
|
||||
re2::RE2::Options regexp_options;
|
||||
/// Don't write error messages to stderr.
|
||||
regexp_options.set_log_errors(false);
|
||||
regexp_options.set_log_errors(false); /// don't write error messages to stderr
|
||||
|
||||
/// Cannot perform search for whole columns. Will process each string separately.
|
||||
for (size_t i = 0; i < haystack_size; ++i)
|
||||
{
|
||||
size_t hs_from = i > 0 ? haystack_offsets[i - 1] : 0;
|
||||
@ -338,12 +379,14 @@ struct ReplaceRegexpImpl
|
||||
size_t repl_from = i > 0 ? replacement_offsets[i - 1] : 0;
|
||||
const char * repl_data = reinterpret_cast<const char *>(replacement_data.data() + repl_from);
|
||||
const size_t repl_length = static_cast<unsigned>(replacement_offsets[i] - repl_from - 1);
|
||||
std::string_view replacement(repl_data, repl_length);
|
||||
|
||||
re2::RE2 searcher(needle, regexp_options);
|
||||
if (!searcher.ok())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The pattern argument is not a valid re2 pattern: {}", searcher.error());
|
||||
|
||||
int num_captures = std::min(searcher.NumberOfCapturingGroups() + 1, max_captures);
|
||||
Instructions instructions = createInstructions(std::string_view(repl_data, repl_length), num_captures);
|
||||
Instructions instructions = createInstructions(replacement, num_captures);
|
||||
|
||||
processString(hs_data, hs_length, res_data, res_offset, searcher, num_captures, instructions);
|
||||
res_offsets[i] = res_offset;
|
||||
@ -367,16 +410,13 @@ struct ReplaceRegexpImpl
|
||||
res_offsets.resize(haystack_size);
|
||||
|
||||
re2::RE2::Options regexp_options;
|
||||
/// Don't write error messages to stderr.
|
||||
regexp_options.set_log_errors(false);
|
||||
regexp_options.set_log_errors(false); /// don't write error messages to stderr
|
||||
|
||||
re2::RE2 searcher(needle, regexp_options);
|
||||
|
||||
if (!searcher.ok())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The pattern argument is not a valid re2 pattern: {}", searcher.error());
|
||||
|
||||
int num_captures = std::min(searcher.NumberOfCapturingGroups() + 1, max_captures);
|
||||
|
||||
Instructions instructions = createInstructions(replacement, num_captures);
|
||||
|
||||
for (size_t i = 0; i < haystack_size; ++i)
|
||||
|
@ -103,27 +103,40 @@ private:
|
||||
sorted_labels[i].label = label;
|
||||
}
|
||||
|
||||
/// Stable sort is required for for labels to apply in same order if score is equal
|
||||
std::stable_sort(sorted_labels.begin(), sorted_labels.end(), [](const auto & lhs, const auto & rhs) { return lhs.score > rhs.score; });
|
||||
/// Sorting scores in descending order to traverse the ROC curve from left to right
|
||||
std::sort(sorted_labels.begin(), sorted_labels.end(), [](const auto & lhs, const auto & rhs) { return lhs.score > rhs.score; });
|
||||
|
||||
/// We will first calculate non-normalized area.
|
||||
|
||||
size_t area = 0;
|
||||
size_t count_positive = 0;
|
||||
Float64 area = 0.0;
|
||||
Float64 prev_score = sorted_labels[0].score;
|
||||
size_t prev_fp = 0, prev_tp = 0;
|
||||
size_t curr_fp = 0, curr_tp = 0;
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
// Only increment the area when the score changes
|
||||
if (sorted_labels[i].score != prev_score)
|
||||
{
|
||||
area += (curr_fp - prev_fp) * (curr_tp + prev_tp) / 2.0; // Trapezoidal area under curve (might degenerate to zero or to a rectangle)
|
||||
prev_fp = curr_fp;
|
||||
prev_tp = curr_tp;
|
||||
prev_score = sorted_labels[i].score;
|
||||
}
|
||||
|
||||
if (sorted_labels[i].label)
|
||||
++count_positive; /// The curve moves one step up. No area increase.
|
||||
curr_tp += 1; /// The curve moves one step up.
|
||||
else
|
||||
area += count_positive; /// The curve moves one step right. Area is increased by 1 * height = count_positive.
|
||||
curr_fp += 1; /// The curve moves one step right.
|
||||
}
|
||||
|
||||
/// Then divide the area to the area of rectangle.
|
||||
area += (curr_fp - prev_fp) * (curr_tp + prev_tp) / 2.0;
|
||||
|
||||
if (count_positive == 0 || count_positive == size)
|
||||
/// Then normalize it dividing by the area to the area of rectangle.
|
||||
|
||||
if (curr_tp == 0 || curr_tp == size)
|
||||
return std::numeric_limits<Float64>::quiet_NaN();
|
||||
|
||||
return static_cast<Float64>(area) / count_positive / (size - count_positive);
|
||||
return area / curr_tp / (size - curr_tp);
|
||||
}
|
||||
|
||||
static void vector(
|
||||
|
@ -5,7 +5,17 @@ namespace DB
|
||||
|
||||
REGISTER_FUNCTION(Tuple)
|
||||
{
|
||||
factory.registerFunction<FunctionTuple>();
|
||||
factory.registerFunction<FunctionTuple>(FunctionDocumentation{
|
||||
.description = R"(
|
||||
Returns a tuple by grouping input arguments.
|
||||
|
||||
For columns C1, C2, ... with the types T1, T2, ..., it returns a named Tuple(C1 T1, C2 T2, ...) type tuple containing these columns if their names are unique and can be treated as unquoted identifiers, otherwise a Tuple(T1, T2, ...) is returned. There is no cost to execute the function.
|
||||
Tuples are normally used as intermediate values for an argument of IN operators, or for creating a list of formal parameters of lambda functions. Tuples can’t be written to a table.
|
||||
|
||||
The function implements the operator `(x, y, ...)`.
|
||||
)",
|
||||
.examples{{"typical", "SELECT tuple(1, 2)", "(1,2)"}},
|
||||
.categories{"Miscellaneous"}});
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -6,20 +6,28 @@
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Parsers/isUnquotedIdentifier.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** tuple(x, y, ...) is a function that allows you to group several columns
|
||||
/** tuple(x, y, ...) is a function that allows you to group several columns.
|
||||
* tupleElement(tuple, n) is a function that allows you to retrieve a column from tuple.
|
||||
*/
|
||||
class FunctionTuple : public IFunction
|
||||
{
|
||||
bool enable_named_columns;
|
||||
|
||||
public:
|
||||
static constexpr auto name = "tuple";
|
||||
|
||||
/// maybe_unused: false-positive
|
||||
[[ maybe_unused ]] static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionTuple>(); }
|
||||
[[maybe_unused]] static FunctionPtr create(ContextPtr context)
|
||||
{
|
||||
return std::make_shared<FunctionTuple>(context->getSettingsRef().enable_named_columns_in_function_tuple);
|
||||
}
|
||||
|
||||
explicit FunctionTuple(bool enable_named_columns_ = false) : enable_named_columns(enable_named_columns_) { }
|
||||
|
||||
String getName() const override { return name; }
|
||||
|
||||
@ -38,9 +46,26 @@ public:
|
||||
bool useDefaultImplementationForConstants() const override { return true; }
|
||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||
{
|
||||
return std::make_shared<DataTypeTuple>(arguments);
|
||||
if (arguments.empty())
|
||||
return std::make_shared<DataTypeTuple>(DataTypes{});
|
||||
|
||||
DataTypes types;
|
||||
Names names;
|
||||
NameSet name_set;
|
||||
for (const auto & argument : arguments)
|
||||
{
|
||||
types.emplace_back(argument.type);
|
||||
names.emplace_back(argument.name);
|
||||
name_set.emplace(argument.name);
|
||||
}
|
||||
|
||||
if (enable_named_columns && name_set.size() == names.size()
|
||||
&& std::all_of(names.cbegin(), names.cend(), [](const auto & n) { return isUnquotedIdentifier(n); }))
|
||||
return std::make_shared<DataTypeTuple>(types, names);
|
||||
else
|
||||
return std::make_shared<DataTypeTuple>(types);
|
||||
}
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||
@ -53,9 +78,9 @@ public:
|
||||
for (size_t i = 0; i < tuple_size; ++i)
|
||||
{
|
||||
/** If tuple is mixed of constant and not constant columns,
|
||||
* convert all to non-constant columns,
|
||||
* because many places in code expect all non-constant columns in non-constant tuple.
|
||||
*/
|
||||
* convert all to non-constant columns,
|
||||
* because many places in code expect all non-constant columns in non-constant tuple.
|
||||
*/
|
||||
tuple_columns[i] = arguments[i].column->convertToFullColumnIfConst();
|
||||
}
|
||||
return ColumnTuple::create(tuple_columns);
|
||||
|
118
src/Functions/tupleNames.cpp
Normal file
118
src/Functions/tupleNames.cpp
Normal file
@ -0,0 +1,118 @@
|
||||
#include <Columns/ColumnArray.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <DataTypes/DataTypeArray.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <DataTypes/DataTypeTuple.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
/** Transform a named tuple into names, which is a constant array of strings.
|
||||
*/
|
||||
class ExecutableFunctionTupleNames : public IExecutableFunction
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "tupleNames";
|
||||
|
||||
explicit ExecutableFunctionTupleNames(Array name_fields_) : name_fields(std::move(name_fields_)) { }
|
||||
|
||||
String getName() const override { return name; }
|
||||
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName &, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||
{
|
||||
return result_type->createColumnConst(input_rows_count, name_fields);
|
||||
}
|
||||
|
||||
private:
|
||||
Array name_fields;
|
||||
};
|
||||
|
||||
class FunctionBaseTupleNames : public IFunctionBase
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "tupleNames";
|
||||
|
||||
explicit FunctionBaseTupleNames(DataTypePtr argument_type, DataTypePtr result_type_, Array name_fields_)
|
||||
: argument_types({std::move(argument_type)}), result_type(std::move(result_type_)), name_fields(std::move(name_fields_))
|
||||
{
|
||||
}
|
||||
|
||||
String getName() const override { return name; }
|
||||
|
||||
bool isSuitableForConstantFolding() const override { return true; }
|
||||
|
||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||
|
||||
const DataTypes & getArgumentTypes() const override { return argument_types; }
|
||||
|
||||
const DataTypePtr & getResultType() const override { return result_type; }
|
||||
|
||||
ExecutableFunctionPtr prepare(const ColumnsWithTypeAndName &) const override
|
||||
{
|
||||
return std::make_unique<ExecutableFunctionTupleNames>(name_fields);
|
||||
}
|
||||
|
||||
private:
|
||||
DataTypes argument_types;
|
||||
DataTypePtr result_type;
|
||||
Array name_fields;
|
||||
};
|
||||
|
||||
class TupleNamesOverloadResolver : public IFunctionOverloadResolver
|
||||
{
|
||||
public:
|
||||
static constexpr auto name = "tupleNames";
|
||||
|
||||
static FunctionOverloadResolverPtr create(ContextPtr) { return std::make_unique<TupleNamesOverloadResolver>(); }
|
||||
|
||||
String getName() const override { return name; }
|
||||
|
||||
size_t getNumberOfArguments() const override { return 1; }
|
||||
|
||||
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||
{
|
||||
const DataTypeTuple * tuple = checkAndGetDataType<DataTypeTuple>(arguments[0].type.get());
|
||||
|
||||
if (!tuple)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First argument for function {} must be a tuple", getName());
|
||||
|
||||
return std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>());
|
||||
}
|
||||
|
||||
FunctionBasePtr buildImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type) const override
|
||||
{
|
||||
const DataTypeTuple * tuple = checkAndGetDataType<DataTypeTuple>(arguments[0].type.get());
|
||||
|
||||
if (!tuple)
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First argument for function {} must be a tuple", getName());
|
||||
|
||||
DataTypes types = tuple->getElements();
|
||||
Array name_fields;
|
||||
for (const auto & elem_name : tuple->getElementNames())
|
||||
name_fields.emplace_back(elem_name);
|
||||
|
||||
return std::make_unique<FunctionBaseTupleNames>(arguments[0].type, result_type, std::move(name_fields));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(TupleNames)
|
||||
{
|
||||
factory.registerFunction<TupleNamesOverloadResolver>(FunctionDocumentation{
|
||||
.description = R"(
|
||||
Converts a tuple into an array of column names. For a tuple in the form `Tuple(a T, b T, ...)`, it returns an array of strings representing the named columns of the tuple. If the tuple elements do not have explicit names, their indices will be used as the column names instead.
|
||||
)",
|
||||
.examples{{"typical", "SELECT tupleNames(tuple(1 as a, 2 as b))", "['a','b']"}},
|
||||
.categories{"Miscellaneous"}});
|
||||
}
|
||||
|
||||
}
|
@ -713,8 +713,12 @@ ReadWriteBufferFromHTTP::HTTPFileInfo ReadWriteBufferFromHTTP::getFileInfo()
|
||||
/// fall back to slow whole-file reads when HEAD is actually supported; that sounds
|
||||
/// like a nightmare to debug.)
|
||||
if (e.getHTTPStatus() >= 400 && e.getHTTPStatus() <= 499 &&
|
||||
e.getHTTPStatus() != Poco::Net::HTTPResponse::HTTP_TOO_MANY_REQUESTS)
|
||||
e.getHTTPStatus() != Poco::Net::HTTPResponse::HTTP_TOO_MANY_REQUESTS &&
|
||||
e.getHTTPStatus() != Poco::Net::HTTPResponse::HTTP_REQUEST_TIMEOUT &&
|
||||
e.getHTTPStatus() != Poco::Net::HTTPResponse::HTTP_MISDIRECTED_REQUEST)
|
||||
{
|
||||
return HTTPFileInfo{};
|
||||
}
|
||||
|
||||
throw;
|
||||
}
|
||||
|
@ -59,10 +59,10 @@ clickhouse_add_executable (parse_date_time_best_effort parse_date_time_best_effo
|
||||
target_link_libraries (parse_date_time_best_effort PRIVATE clickhouse_common_io)
|
||||
|
||||
clickhouse_add_executable (zlib_ng_bug zlib_ng_bug.cpp)
|
||||
target_link_libraries (zlib_ng_bug PRIVATE ch_contrib::zlib)
|
||||
target_link_libraries (zlib_ng_bug PRIVATE ch_contrib::zlib clickhouse_common_io)
|
||||
|
||||
clickhouse_add_executable (dragonbox_test dragonbox_test.cpp)
|
||||
target_link_libraries (dragonbox_test PRIVATE ch_contrib::dragonbox_to_chars)
|
||||
target_link_libraries (dragonbox_test PRIVATE ch_contrib::dragonbox_to_chars clickhouse_common_io)
|
||||
|
||||
clickhouse_add_executable (zstd_buffers zstd_buffers.cpp)
|
||||
target_link_libraries (zstd_buffers PRIVATE clickhouse_common_io)
|
||||
|
@ -29,33 +29,12 @@ BlockIO InterpreterSetRoleQuery::execute()
|
||||
|
||||
void InterpreterSetRoleQuery::setRole(const ASTSetRoleQuery & query)
|
||||
{
|
||||
auto & access_control = getContext()->getAccessControl();
|
||||
auto session_context = getContext()->getSessionContext();
|
||||
auto user = session_context->getUser();
|
||||
|
||||
if (query.kind == ASTSetRoleQuery::Kind::SET_ROLE_DEFAULT)
|
||||
{
|
||||
session_context->setCurrentRolesDefault();
|
||||
}
|
||||
else
|
||||
{
|
||||
RolesOrUsersSet roles_from_query{*query.roles, access_control};
|
||||
std::vector<UUID> new_current_roles;
|
||||
if (roles_from_query.all)
|
||||
{
|
||||
new_current_roles = user->granted_roles.findGranted(roles_from_query);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (const auto & id : roles_from_query.getMatchingIDs())
|
||||
{
|
||||
if (!user->granted_roles.isGranted(id))
|
||||
throw Exception(ErrorCodes::SET_NON_GRANTED_ROLE, "Role should be granted to set current");
|
||||
new_current_roles.emplace_back(id);
|
||||
}
|
||||
}
|
||||
session_context->setCurrentRoles(new_current_roles);
|
||||
}
|
||||
session_context->setCurrentRoles(RolesOrUsersSet{*query.roles, session_context->getAccessControl()});
|
||||
}
|
||||
|
||||
|
||||
|
@ -405,10 +405,6 @@ Block createBlockForSet(
|
||||
|
||||
}
|
||||
|
||||
ScopeStack::Level::Level() = default;
|
||||
ScopeStack::Level::~Level() = default;
|
||||
ScopeStack::Level::Level(Level &&) noexcept = default;
|
||||
|
||||
FutureSetPtr makeExplicitSet(
|
||||
const ASTFunction * node, const ActionsDAG & actions, ContextPtr context, PreparedSets & prepared_sets)
|
||||
{
|
||||
@ -462,6 +458,7 @@ public:
|
||||
for (const auto * node : index)
|
||||
map.emplace(node->result_name, node);
|
||||
}
|
||||
~Index() = default;
|
||||
|
||||
void addNode(const ActionsDAG::Node * node)
|
||||
{
|
||||
@ -502,6 +499,10 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
ScopeStack::Level::Level() = default;
|
||||
ScopeStack::Level::~Level() = default;
|
||||
ScopeStack::Level::Level(Level &&) noexcept = default;
|
||||
|
||||
ActionsMatcher::Data::Data(
|
||||
ContextPtr context_,
|
||||
SizeLimits set_size_limit_,
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include <Interpreters/TemporaryDataOnDisk.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <base/sort.h>
|
||||
#include <Common/CacheBase.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/CurrentThread.h>
|
||||
#include <Common/JSONBuilder.h>
|
||||
@ -78,115 +77,6 @@ namespace ErrorCodes
|
||||
|
||||
namespace
|
||||
{
|
||||
/** Collects observed HashMap-s sizes to avoid redundant intermediate resizes.
|
||||
*/
|
||||
class HashTablesStatistics
|
||||
{
|
||||
public:
|
||||
struct Entry
|
||||
{
|
||||
size_t sum_of_sizes; // used to determine if it's better to convert aggregation to two-level from the beginning
|
||||
size_t median_size; // roughly the size we're going to preallocate on each thread
|
||||
};
|
||||
|
||||
using Cache = DB::CacheBase<UInt64, Entry>;
|
||||
using CachePtr = std::shared_ptr<Cache>;
|
||||
using Params = DB::Aggregator::Params::StatsCollectingParams;
|
||||
|
||||
/// Collection and use of the statistics should be enabled.
|
||||
std::optional<Entry> getSizeHint(const Params & params)
|
||||
{
|
||||
if (!params.isCollectionAndUseEnabled())
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Collection and use of the statistics should be enabled.");
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
const auto cache = getHashTableStatsCache(params, lock);
|
||||
if (const auto hint = cache->get(params.key))
|
||||
{
|
||||
LOG_TRACE(
|
||||
getLogger("Aggregator"),
|
||||
"An entry for key={} found in cache: sum_of_sizes={}, median_size={}",
|
||||
params.key,
|
||||
hint->sum_of_sizes,
|
||||
hint->median_size);
|
||||
return *hint;
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
/// Collection and use of the statistics should be enabled.
|
||||
void update(size_t sum_of_sizes, size_t median_size, const Params & params)
|
||||
{
|
||||
if (!params.isCollectionAndUseEnabled())
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Collection and use of the statistics should be enabled.");
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
const auto cache = getHashTableStatsCache(params, lock);
|
||||
const auto hint = cache->get(params.key);
|
||||
// We'll maintain the maximum among all the observed values until the next prediction turns out to be too wrong.
|
||||
if (!hint || sum_of_sizes < hint->sum_of_sizes / 2 || hint->sum_of_sizes < sum_of_sizes || median_size < hint->median_size / 2
|
||||
|| hint->median_size < median_size)
|
||||
{
|
||||
LOG_TRACE(
|
||||
getLogger("Aggregator"),
|
||||
"Statistics updated for key={}: new sum_of_sizes={}, median_size={}",
|
||||
params.key,
|
||||
sum_of_sizes,
|
||||
median_size);
|
||||
cache->set(params.key, std::make_shared<Entry>(Entry{.sum_of_sizes = sum_of_sizes, .median_size = median_size}));
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<DB::HashTablesCacheStatistics> getCacheStats() const
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
if (hash_table_stats)
|
||||
{
|
||||
size_t hits = 0, misses = 0;
|
||||
hash_table_stats->getStats(hits, misses);
|
||||
return DB::HashTablesCacheStatistics{.entries = hash_table_stats->count(), .hits = hits, .misses = misses};
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
static size_t calculateCacheKey(const DB::ASTPtr & select_query)
|
||||
{
|
||||
if (!select_query)
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Query ptr cannot be null");
|
||||
|
||||
const auto & select = select_query->as<DB::ASTSelectQuery &>();
|
||||
|
||||
// It may happen in some corner cases like `select 1 as num group by num`.
|
||||
if (!select.tables())
|
||||
return 0;
|
||||
|
||||
SipHash hash;
|
||||
hash.update(select.tables()->getTreeHash(/*ignore_aliases=*/ true));
|
||||
if (const auto where = select.where())
|
||||
hash.update(where->getTreeHash(/*ignore_aliases=*/ true));
|
||||
if (const auto group_by = select.groupBy())
|
||||
hash.update(group_by->getTreeHash(/*ignore_aliases=*/ true));
|
||||
return hash.get64();
|
||||
}
|
||||
|
||||
private:
|
||||
CachePtr getHashTableStatsCache(const Params & params, const std::lock_guard<std::mutex> &)
|
||||
{
|
||||
if (!hash_table_stats || hash_table_stats->maxSizeInBytes() != params.max_entries_for_hash_table_stats)
|
||||
hash_table_stats = std::make_shared<Cache>(params.max_entries_for_hash_table_stats);
|
||||
return hash_table_stats;
|
||||
}
|
||||
|
||||
mutable std::mutex mutex;
|
||||
CachePtr hash_table_stats;
|
||||
};
|
||||
|
||||
HashTablesStatistics & getHashTablesStatistics()
|
||||
{
|
||||
static HashTablesStatistics hash_tables_stats;
|
||||
return hash_tables_stats;
|
||||
}
|
||||
|
||||
bool worthConvertToTwoLevel(
|
||||
size_t group_by_two_level_threshold, size_t result_size, size_t group_by_two_level_threshold_bytes, auto result_size_bytes)
|
||||
{
|
||||
@ -215,49 +105,29 @@ void initDataVariantsWithSizeHint(
|
||||
DB::AggregatedDataVariants & result, DB::AggregatedDataVariants::Type method_chosen, const DB::Aggregator::Params & params)
|
||||
{
|
||||
const auto & stats_collecting_params = params.stats_collecting_params;
|
||||
if (stats_collecting_params.isCollectionAndUseEnabled())
|
||||
const auto max_threads = params.group_by_two_level_threshold != 0 ? std::max(params.max_threads, 1ul) : 1;
|
||||
if (auto hint = getSizeHint(stats_collecting_params, /*tables_cnt=*/max_threads))
|
||||
{
|
||||
if (auto hint = getHashTablesStatistics().getSizeHint(stats_collecting_params))
|
||||
{
|
||||
const auto max_threads = params.group_by_two_level_threshold != 0 ? std::max(params.max_threads, 1ul) : 1;
|
||||
const auto lower_limit = hint->sum_of_sizes / max_threads;
|
||||
const auto upper_limit = stats_collecting_params.max_size_to_preallocate_for_aggregation / max_threads;
|
||||
if (hint->median_size > upper_limit)
|
||||
{
|
||||
/// Since we cannot afford to preallocate as much as we want, we will likely need to do resize anyway.
|
||||
/// But we will also work with the big (i.e. not so cache friendly) HT from the beginning which may result in a slight slowdown.
|
||||
/// So let's just do nothing.
|
||||
LOG_TRACE(
|
||||
getLogger("Aggregator"),
|
||||
"No space were preallocated in hash tables because 'max_size_to_preallocate_for_aggregation' has too small value: {}, "
|
||||
"should be at least {}",
|
||||
stats_collecting_params.max_size_to_preallocate_for_aggregation,
|
||||
hint->median_size * max_threads);
|
||||
}
|
||||
/// https://github.com/ClickHouse/ClickHouse/issues/44402#issuecomment-1359920703
|
||||
else if ((max_threads > 1 && hint->sum_of_sizes > 100'000) || hint->sum_of_sizes > 500'000)
|
||||
{
|
||||
const auto adjusted = std::max(lower_limit, hint->median_size);
|
||||
if (worthConvertToTwoLevel(
|
||||
params.group_by_two_level_threshold,
|
||||
hint->sum_of_sizes,
|
||||
/*group_by_two_level_threshold_bytes*/ 0,
|
||||
/*result_size_bytes*/ 0))
|
||||
method_chosen = convertToTwoLevelTypeIfPossible(method_chosen);
|
||||
result.init(method_chosen, adjusted);
|
||||
ProfileEvents::increment(ProfileEvents::AggregationHashTablesInitializedAsTwoLevel, result.isTwoLevel());
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (worthConvertToTwoLevel(
|
||||
params.group_by_two_level_threshold,
|
||||
hint->sum_of_sizes,
|
||||
/*group_by_two_level_threshold_bytes*/ 0,
|
||||
/*result_size_bytes*/ 0))
|
||||
method_chosen = convertToTwoLevelTypeIfPossible(method_chosen);
|
||||
result.init(method_chosen, hint->median_size);
|
||||
}
|
||||
result.init(method_chosen);
|
||||
else
|
||||
{
|
||||
result.init(method_chosen);
|
||||
}
|
||||
ProfileEvents::increment(ProfileEvents::AggregationHashTablesInitializedAsTwoLevel, result.isTwoLevel());
|
||||
}
|
||||
|
||||
/// Collection and use of the statistics should be enabled.
|
||||
void updateStatistics(const DB::ManyAggregatedDataVariants & data_variants, const DB::Aggregator::Params::StatsCollectingParams & params)
|
||||
void updateStatistics(const DB::ManyAggregatedDataVariants & data_variants, const DB::StatsCollectingParams & params)
|
||||
{
|
||||
if (!params.isCollectionAndUseEnabled())
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Collection and use of the statistics should be enabled.");
|
||||
return;
|
||||
|
||||
std::vector<size_t> sizes(data_variants.size());
|
||||
for (size_t i = 0; i < data_variants.size(); ++i)
|
||||
@ -265,7 +135,7 @@ void updateStatistics(const DB::ManyAggregatedDataVariants & data_variants, cons
|
||||
const auto median_size = sizes.begin() + sizes.size() / 2; // not precisely though...
|
||||
std::nth_element(sizes.begin(), median_size, sizes.end());
|
||||
const auto sum_of_sizes = std::accumulate(sizes.begin(), sizes.end(), 0ull);
|
||||
getHashTablesStatistics().update(sum_of_sizes, *median_size, params);
|
||||
DB::getHashTablesStatistics().update(sum_of_sizes, *median_size, params);
|
||||
}
|
||||
|
||||
DB::ColumnNumbers calculateKeysPositions(const DB::Block & header, const DB::Aggregator::Params & params)
|
||||
@ -300,24 +170,6 @@ size_t getMinBytesForPrefetch()
|
||||
namespace DB
|
||||
{
|
||||
|
||||
std::optional<HashTablesCacheStatistics> getHashTablesCacheStatistics()
|
||||
{
|
||||
return getHashTablesStatistics().getCacheStats();
|
||||
}
|
||||
|
||||
Aggregator::Params::StatsCollectingParams::StatsCollectingParams() = default;
|
||||
|
||||
Aggregator::Params::StatsCollectingParams::StatsCollectingParams(
|
||||
const ASTPtr & select_query_,
|
||||
bool collect_hash_table_stats_during_aggregation_,
|
||||
size_t max_entries_for_hash_table_stats_,
|
||||
size_t max_size_to_preallocate_for_aggregation_)
|
||||
: key(collect_hash_table_stats_during_aggregation_ ? HashTablesStatistics::calculateCacheKey(select_query_) : 0)
|
||||
, max_entries_for_hash_table_stats(max_entries_for_hash_table_stats_)
|
||||
, max_size_to_preallocate_for_aggregation(max_size_to_preallocate_for_aggregation_)
|
||||
{
|
||||
}
|
||||
|
||||
Block Aggregator::getHeader(bool final) const
|
||||
{
|
||||
return params.getHeader(header, final);
|
||||
@ -2783,8 +2635,7 @@ ManyAggregatedDataVariants Aggregator::prepareVariantsToMerge(ManyAggregatedData
|
||||
|
||||
LOG_TRACE(log, "Merging aggregated data");
|
||||
|
||||
if (params.stats_collecting_params.isCollectionAndUseEnabled())
|
||||
updateStatistics(data_variants, params.stats_collecting_params);
|
||||
updateStatistics(data_variants, params.stats_collecting_params);
|
||||
|
||||
ManyAggregatedDataVariants non_empty_data;
|
||||
non_empty_data.reserve(data_variants.size());
|
||||
@ -3486,4 +3337,23 @@ void Aggregator::destroyAllAggregateStates(AggregatedDataVariants & result) cons
|
||||
throw Exception(ErrorCodes::UNKNOWN_AGGREGATED_DATA_VARIANT, "Unknown aggregated data variant.");
|
||||
}
|
||||
|
||||
UInt64 calculateCacheKey(const DB::ASTPtr & select_query)
|
||||
{
|
||||
if (!select_query)
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Query ptr cannot be null");
|
||||
|
||||
const auto & select = select_query->as<DB::ASTSelectQuery &>();
|
||||
|
||||
// It may happen in some corner cases like `select 1 as num group by num`.
|
||||
if (!select.tables())
|
||||
return 0;
|
||||
|
||||
SipHash hash;
|
||||
hash.update(select.tables()->getTreeHash(/*ignore_aliases=*/true));
|
||||
if (const auto where = select.where())
|
||||
hash.update(where->getTreeHash(/*ignore_aliases=*/true));
|
||||
if (const auto group_by = select.groupBy())
|
||||
hash.update(group_by->getTreeHash(/*ignore_aliases=*/true));
|
||||
return hash.get64();
|
||||
}
|
||||
}
|
||||
|
@ -39,9 +39,10 @@
|
||||
|
||||
#include <Parsers/IAST_fwd.h>
|
||||
|
||||
#include <Interpreters/AggregationMethod.h>
|
||||
#include <Interpreters/AggregatedData.h>
|
||||
#include <Interpreters/AggregatedDataVariants.h>
|
||||
#include <Interpreters/AggregationMethod.h>
|
||||
#include <Interpreters/HashTablesStatistics.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -128,24 +129,6 @@ public:
|
||||
|
||||
const double min_hit_rate_to_use_consecutive_keys_optimization;
|
||||
|
||||
struct StatsCollectingParams
|
||||
{
|
||||
StatsCollectingParams();
|
||||
|
||||
StatsCollectingParams(
|
||||
const ASTPtr & select_query_,
|
||||
bool collect_hash_table_stats_during_aggregation_,
|
||||
size_t max_entries_for_hash_table_stats_,
|
||||
size_t max_size_to_preallocate_for_aggregation_);
|
||||
|
||||
bool isCollectionAndUseEnabled() const { return key != 0; }
|
||||
void disable() { key = 0; }
|
||||
|
||||
UInt64 key = 0;
|
||||
const size_t max_entries_for_hash_table_stats = 0;
|
||||
const size_t max_size_to_preallocate_for_aggregation = 0;
|
||||
};
|
||||
|
||||
StatsCollectingParams stats_collecting_params;
|
||||
|
||||
Params(
|
||||
@ -674,6 +657,7 @@ private:
|
||||
Arena * arena);
|
||||
};
|
||||
|
||||
UInt64 calculateCacheKey(const DB::ASTPtr & select_query);
|
||||
|
||||
/** Get the aggregation variant by its type. */
|
||||
template <typename Method> Method & getDataVariant(AggregatedDataVariants & variants);
|
||||
@ -685,13 +669,4 @@ APPLY_FOR_AGGREGATED_VARIANTS(M)
|
||||
|
||||
#undef M
|
||||
|
||||
|
||||
struct HashTablesCacheStatistics
|
||||
{
|
||||
size_t entries = 0;
|
||||
size_t hits = 0;
|
||||
size_t misses = 0;
|
||||
};
|
||||
|
||||
std::optional<HashTablesCacheStatistics> getHashTablesCacheStatistics();
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user