mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-01 12:01:58 +00:00
Merge branch 'master' into add_system_detach_tables
This commit is contained in:
commit
e9a3191dcf
2
.github/workflows/pull_request.yml
vendored
2
.github/workflows/pull_request.yml
vendored
@ -172,7 +172,7 @@ jobs:
|
|||||||
################################# Stage Final #################################
|
################################# Stage Final #################################
|
||||||
#
|
#
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !failure() }}
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <cstdlib>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
|
@ -108,6 +108,14 @@ struct make_unsigned // NOLINT(readability-identifier-naming)
|
|||||||
using type = std::make_unsigned_t<T>;
|
using type = std::make_unsigned_t<T>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <> struct make_unsigned<Int8> { using type = UInt8; };
|
||||||
|
template <> struct make_unsigned<UInt8> { using type = UInt8; };
|
||||||
|
template <> struct make_unsigned<Int16> { using type = UInt16; };
|
||||||
|
template <> struct make_unsigned<UInt16> { using type = UInt16; };
|
||||||
|
template <> struct make_unsigned<Int32> { using type = UInt32; };
|
||||||
|
template <> struct make_unsigned<UInt32> { using type = UInt32; };
|
||||||
|
template <> struct make_unsigned<Int64> { using type = UInt64; };
|
||||||
|
template <> struct make_unsigned<UInt64> { using type = UInt64; };
|
||||||
template <> struct make_unsigned<Int128> { using type = UInt128; };
|
template <> struct make_unsigned<Int128> { using type = UInt128; };
|
||||||
template <> struct make_unsigned<UInt128> { using type = UInt128; };
|
template <> struct make_unsigned<UInt128> { using type = UInt128; };
|
||||||
template <> struct make_unsigned<Int256> { using type = UInt256; };
|
template <> struct make_unsigned<Int256> { using type = UInt256; };
|
||||||
@ -121,6 +129,14 @@ struct make_signed // NOLINT(readability-identifier-naming)
|
|||||||
using type = std::make_signed_t<T>;
|
using type = std::make_signed_t<T>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <> struct make_signed<Int8> { using type = Int8; };
|
||||||
|
template <> struct make_signed<UInt8> { using type = Int8; };
|
||||||
|
template <> struct make_signed<Int16> { using type = Int16; };
|
||||||
|
template <> struct make_signed<UInt16> { using type = Int16; };
|
||||||
|
template <> struct make_signed<Int32> { using type = Int32; };
|
||||||
|
template <> struct make_signed<UInt32> { using type = Int32; };
|
||||||
|
template <> struct make_signed<Int64> { using type = Int64; };
|
||||||
|
template <> struct make_signed<UInt64> { using type = Int64; };
|
||||||
template <> struct make_signed<Int128> { using type = Int128; };
|
template <> struct make_signed<Int128> { using type = Int128; };
|
||||||
template <> struct make_signed<UInt128> { using type = Int128; };
|
template <> struct make_signed<UInt128> { using type = Int128; };
|
||||||
template <> struct make_signed<Int256> { using type = Int256; };
|
template <> struct make_signed<Int256> { using type = Int256; };
|
||||||
|
9
base/base/isSharedPtrUnique.h
Normal file
9
base/base/isSharedPtrUnique.h
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
bool isSharedPtrUnique(const std::shared_ptr<T> & ptr)
|
||||||
|
{
|
||||||
|
return ptr.use_count() == 1;
|
||||||
|
}
|
@ -232,7 +232,7 @@ void Foundation_API format(
|
|||||||
const Any & value10);
|
const Any & value10);
|
||||||
|
|
||||||
|
|
||||||
void Foundation_API format(std::string & result, const std::string & fmt, const std::vector<Any> & values);
|
void Foundation_API formatVector(std::string & result, const std::string & fmt, const std::vector<Any> & values);
|
||||||
/// Supports a variable number of arguments and is used by
|
/// Supports a variable number of arguments and is used by
|
||||||
/// all other variants of format().
|
/// all other variants of format().
|
||||||
|
|
||||||
|
@ -21,6 +21,8 @@
|
|||||||
#include "Poco/AtomicCounter.h"
|
#include "Poco/AtomicCounter.h"
|
||||||
#include "Poco/Foundation.h"
|
#include "Poco/Foundation.h"
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
|
|
||||||
namespace Poco
|
namespace Poco
|
||||||
{
|
{
|
||||||
|
@ -303,7 +303,7 @@ void format(std::string& result, const std::string& fmt, const Any& value)
|
|||||||
{
|
{
|
||||||
std::vector<Any> args;
|
std::vector<Any> args;
|
||||||
args.push_back(value);
|
args.push_back(value);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -312,7 +312,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
std::vector<Any> args;
|
std::vector<Any> args;
|
||||||
args.push_back(value1);
|
args.push_back(value1);
|
||||||
args.push_back(value2);
|
args.push_back(value2);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -322,7 +322,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value1);
|
args.push_back(value1);
|
||||||
args.push_back(value2);
|
args.push_back(value2);
|
||||||
args.push_back(value3);
|
args.push_back(value3);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -333,7 +333,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value2);
|
args.push_back(value2);
|
||||||
args.push_back(value3);
|
args.push_back(value3);
|
||||||
args.push_back(value4);
|
args.push_back(value4);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -345,7 +345,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value3);
|
args.push_back(value3);
|
||||||
args.push_back(value4);
|
args.push_back(value4);
|
||||||
args.push_back(value5);
|
args.push_back(value5);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -358,7 +358,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value4);
|
args.push_back(value4);
|
||||||
args.push_back(value5);
|
args.push_back(value5);
|
||||||
args.push_back(value6);
|
args.push_back(value6);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -372,7 +372,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value5);
|
args.push_back(value5);
|
||||||
args.push_back(value6);
|
args.push_back(value6);
|
||||||
args.push_back(value7);
|
args.push_back(value7);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -387,7 +387,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value6);
|
args.push_back(value6);
|
||||||
args.push_back(value7);
|
args.push_back(value7);
|
||||||
args.push_back(value8);
|
args.push_back(value8);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -403,7 +403,7 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value7);
|
args.push_back(value7);
|
||||||
args.push_back(value8);
|
args.push_back(value8);
|
||||||
args.push_back(value9);
|
args.push_back(value9);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -420,11 +420,11 @@ void format(std::string& result, const std::string& fmt, const Any& value1, cons
|
|||||||
args.push_back(value8);
|
args.push_back(value8);
|
||||||
args.push_back(value9);
|
args.push_back(value9);
|
||||||
args.push_back(value10);
|
args.push_back(value10);
|
||||||
format(result, fmt, args);
|
formatVector(result, fmt, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void format(std::string& result, const std::string& fmt, const std::vector<Any>& values)
|
void formatVector(std::string& result, const std::string& fmt, const std::vector<Any>& values)
|
||||||
{
|
{
|
||||||
std::string::const_iterator itFmt = fmt.begin();
|
std::string::const_iterator itFmt = fmt.begin();
|
||||||
std::string::const_iterator endFmt = fmt.end();
|
std::string::const_iterator endFmt = fmt.end();
|
||||||
|
@ -57,7 +57,7 @@ std::string ObjectId::toString(const std::string& fmt) const
|
|||||||
|
|
||||||
for (int i = 0; i < 12; ++i)
|
for (int i = 0; i < 12; ++i)
|
||||||
{
|
{
|
||||||
s += format(fmt, (unsigned int) _id[i]);
|
s += Poco::format(fmt, (unsigned int) _id[i]);
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
@ -43,9 +43,9 @@ namespace Poco {
|
|||||||
namespace MongoDB {
|
namespace MongoDB {
|
||||||
|
|
||||||
|
|
||||||
static const std::string keyCursor {"cursor"};
|
[[ maybe_unused ]] static const std::string keyCursor {"cursor"};
|
||||||
static const std::string keyFirstBatch {"firstBatch"};
|
[[ maybe_unused ]] static const std::string keyFirstBatch {"firstBatch"};
|
||||||
static const std::string keyNextBatch {"nextBatch"};
|
[[ maybe_unused ]] static const std::string keyNextBatch {"nextBatch"};
|
||||||
|
|
||||||
static Poco::Int64 cursorIdFromResponse(const MongoDB::Document& doc);
|
static Poco::Int64 cursorIdFromResponse(const MongoDB::Document& doc);
|
||||||
|
|
||||||
|
@ -17,9 +17,9 @@
|
|||||||
#include "Poco/NumberFormatter.h"
|
#include "Poco/NumberFormatter.h"
|
||||||
#include "Poco/NumberParser.h"
|
#include "Poco/NumberParser.h"
|
||||||
#include "Poco/String.h"
|
#include "Poco/String.h"
|
||||||
|
#include <charconv>
|
||||||
#include <format>
|
#include <format>
|
||||||
|
|
||||||
|
|
||||||
using Poco::NumberFormatter;
|
using Poco::NumberFormatter;
|
||||||
using Poco::NumberParser;
|
using Poco::NumberParser;
|
||||||
using Poco::icompare;
|
using Poco::icompare;
|
||||||
|
2
contrib/azure
vendored
2
contrib/azure
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 92c94d7f37a43cc8fc4d466884a95f610c0593bf
|
Subproject commit ea3e19a7be08519134c643177d56c7484dfec884
|
@ -179,12 +179,19 @@ endif ()
|
|||||||
|
|
||||||
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
target_compile_definitions(_jemalloc PRIVATE -DJEMALLOC_PROF=1)
|
||||||
|
|
||||||
# jemalloc provides support for two different libunwind flavors: the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
# jemalloc provides support two unwind flavors:
|
||||||
# The latter is identified by `JEMALLOC_PROF_LIBGCC` and uses `_Unwind_Backtrace` method instead of `unw_backtrace`.
|
# - JEMALLOC_PROF_LIBUNWIND - unw_backtrace() - gnu libunwind (compatible with llvm libunwind)
|
||||||
# At the time ClickHouse uses LLVM libunwind which follows libgcc's way of backtracking.
|
# - JEMALLOC_PROF_LIBGCC - _Unwind_Backtrace() - the original HP libunwind and the one coming with gcc / g++ / libstdc++.
|
||||||
#
|
#
|
||||||
# ClickHouse has to provide `unw_backtrace` method by the means of [commit 8e2b31e](https://github.com/ClickHouse/libunwind/commit/8e2b31e766dd502f6df74909e04a7dbdf5182eb1).
|
# But for JEMALLOC_PROF_LIBGCC it also calls _Unwind_Backtrace() during
|
||||||
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBGCC=1)
|
# bootstraping of jemalloc, which may lead to deadlock, if the dlsym will do
|
||||||
|
# allocations somewhere (like glibc does prio 2.34, see [1]).
|
||||||
|
#
|
||||||
|
# [1]: https://sourceware.org/git/?p=glibc.git;a=commit;h=fada9018199c21c469ff0e731ef75c6020074ac9
|
||||||
|
#
|
||||||
|
# And since ClickHouse unwind already supports unw_backtrace() we can safely
|
||||||
|
# switch to it to avoid this deadlock.
|
||||||
|
target_compile_definitions (_jemalloc PRIVATE -DJEMALLOC_PROF_LIBUNWIND=1)
|
||||||
target_link_libraries (_jemalloc PRIVATE unwind)
|
target_link_libraries (_jemalloc PRIVATE unwind)
|
||||||
|
|
||||||
# for RTLD_NEXT
|
# for RTLD_NEXT
|
||||||
|
2
contrib/pocketfft
vendored
2
contrib/pocketfft
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 9efd4da52cf8d28d14531d14e43ad9d913807546
|
Subproject commit f4c1aa8aa9ce79ad39e80f2c9c41b92ead90fda3
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 078fa5638690004e1f744076d1bdcc4e93767304
|
Subproject commit be366233921293bd07a84dc4ea6991858665f202
|
@ -5,20 +5,13 @@ if (NOT ENABLE_ROCKSDB)
|
|||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
## this file is extracted from `contrib/rocksdb/CMakeLists.txt`
|
# Always disable jemalloc for rocksdb by default because it introduces non-standard jemalloc APIs
|
||||||
set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
|
|
||||||
list(APPEND CMAKE_MODULE_PATH "${ROCKSDB_SOURCE_DIR}/cmake/modules/")
|
|
||||||
|
|
||||||
set(PORTABLE ON)
|
|
||||||
## always disable jemalloc for rocksdb by default
|
|
||||||
## because it introduces non-standard jemalloc APIs
|
|
||||||
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
||||||
set(USE_SNAPPY OFF)
|
|
||||||
if (TARGET ch_contrib::snappy)
|
option(WITH_LIBURING "build with liburing" OFF) # TODO could try to enable this conditionally, depending on ClickHouse's ENABLE_LIBURING
|
||||||
set(USE_SNAPPY ON)
|
|
||||||
endif()
|
# ClickHouse cannot be compiled without snappy, lz4, zlib, zstd
|
||||||
option(WITH_SNAPPY "build with SNAPPY" ${USE_SNAPPY})
|
option(WITH_SNAPPY "build with SNAPPY" ON)
|
||||||
## lz4, zlib, zstd is enabled in ClickHouse by default
|
|
||||||
option(WITH_LZ4 "build with lz4" ON)
|
option(WITH_LZ4 "build with lz4" ON)
|
||||||
option(WITH_ZLIB "build with zlib" ON)
|
option(WITH_ZLIB "build with zlib" ON)
|
||||||
option(WITH_ZSTD "build with zstd" ON)
|
option(WITH_ZSTD "build with zstd" ON)
|
||||||
@ -26,78 +19,46 @@ option(WITH_ZSTD "build with zstd" ON)
|
|||||||
# third-party/folly is only validated to work on Linux and Windows for now.
|
# third-party/folly is only validated to work on Linux and Windows for now.
|
||||||
# So only turn it on there by default.
|
# So only turn it on there by default.
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
|
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
|
||||||
if(MSVC AND MSVC_VERSION LESS 1910)
|
|
||||||
# Folly does not compile with MSVC older than VS2017
|
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
|
||||||
else()
|
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
||||||
endif()
|
|
||||||
else()
|
else()
|
||||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if( NOT DEFINED CMAKE_CXX_STANDARD )
|
if(WITH_SNAPPY)
|
||||||
set(CMAKE_CXX_STANDARD 11)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(MSVC)
|
|
||||||
option(WITH_XPRESS "build with windows built in compression" OFF)
|
|
||||||
include("${ROCKSDB_SOURCE_DIR}/thirdparty.inc")
|
|
||||||
else()
|
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT CMAKE_SYSTEM_NAME MATCHES "kFreeBSD")
|
|
||||||
# FreeBSD has jemalloc as default malloc
|
|
||||||
# but it does not have all the jemalloc files in include/...
|
|
||||||
set(WITH_JEMALLOC ON)
|
|
||||||
else()
|
|
||||||
if(WITH_JEMALLOC AND TARGET ch_contrib::jemalloc)
|
|
||||||
add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE)
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::jemalloc)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WITH_SNAPPY)
|
|
||||||
add_definitions(-DSNAPPY)
|
add_definitions(-DSNAPPY)
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::snappy)
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WITH_ZLIB)
|
|
||||||
add_definitions(-DZLIB)
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WITH_LZ4)
|
|
||||||
add_definitions(-DLZ4)
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WITH_ZSTD)
|
|
||||||
add_definitions(-DZSTD)
|
|
||||||
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
if(WITH_ZLIB)
|
||||||
if(POWER9)
|
add_definitions(-DZLIB)
|
||||||
set(HAS_POWER9 1)
|
list(APPEND THIRDPARTY_LIBS ch_contrib::zlib)
|
||||||
set(HAS_ALTIVEC 1)
|
endif()
|
||||||
else()
|
|
||||||
set(HAS_POWER8 1)
|
|
||||||
set(HAS_ALTIVEC 1)
|
|
||||||
endif(POWER9)
|
|
||||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
if(WITH_LZ4)
|
||||||
set(HAS_ARMV8_CRC 1)
|
add_definitions(-DLZ4)
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
list(APPEND THIRDPARTY_LIBS ch_contrib::lz4)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
endif()
|
||||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
|
||||||
|
|
||||||
|
if(WITH_ZSTD)
|
||||||
|
add_definitions(-DZSTD)
|
||||||
|
list(APPEND THIRDPARTY_LIBS ch_contrib::zstd)
|
||||||
|
endif()
|
||||||
|
|
||||||
if(ENABLE_AVX2 AND ENABLE_PCLMULQDQ)
|
option(PORTABLE "build a portable binary" ON)
|
||||||
|
|
||||||
|
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||||
add_definitions(-DHAVE_SSE42)
|
add_definitions(-DHAVE_SSE42)
|
||||||
add_definitions(-DHAVE_PCLMUL)
|
add_definitions(-DHAVE_PCLMUL)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if(CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64|AARCH64")
|
||||||
|
set (HAS_ARMV8_CRC 1)
|
||||||
|
# the original build descriptions set specific flags for ARM. These flags are already subsumed by ClickHouse's general
|
||||||
|
# ARM flags, see cmake/cpu_features.cmake
|
||||||
|
# set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||||
|
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||||
|
endif()
|
||||||
|
|
||||||
set (HAVE_THREAD_LOCAL 1)
|
set (HAVE_THREAD_LOCAL 1)
|
||||||
if(HAVE_THREAD_LOCAL)
|
if(HAVE_THREAD_LOCAL)
|
||||||
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
|
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
|
||||||
@ -107,8 +68,6 @@ if(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
|||||||
add_definitions(-DOS_MACOSX)
|
add_definitions(-DOS_MACOSX)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||||
add_definitions(-DOS_LINUX)
|
add_definitions(-DOS_LINUX)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS")
|
|
||||||
add_definitions(-DOS_SOLARIS)
|
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||||
add_definitions(-DOS_FREEBSD)
|
add_definitions(-DOS_FREEBSD)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Android")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "Android")
|
||||||
@ -123,12 +82,10 @@ endif()
|
|||||||
|
|
||||||
if (OS_LINUX)
|
if (OS_LINUX)
|
||||||
add_definitions(-DROCKSDB_SCHED_GETCPU_PRESENT)
|
add_definitions(-DROCKSDB_SCHED_GETCPU_PRESENT)
|
||||||
add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT)
|
|
||||||
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
|
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
|
||||||
elseif (OS_FREEBSD)
|
|
||||||
add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT)
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
|
||||||
|
|
||||||
include_directories(${ROCKSDB_SOURCE_DIR})
|
include_directories(${ROCKSDB_SOURCE_DIR})
|
||||||
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
||||||
@ -136,11 +93,11 @@ if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
|||||||
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
|
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Main library source code
|
|
||||||
|
|
||||||
set(SOURCES
|
set(SOURCES
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
${ROCKSDB_SOURCE_DIR}/cache/cache_entry_roles.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/cache_key.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/cache/cache_reservation_manager.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||||
@ -156,6 +113,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_sequential_reader.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/db/blob/prefetch_buffer_collection.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
||||||
@ -229,6 +187,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
${ROCKSDB_SOURCE_DIR}/env/fs_remap.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/env/unique_id_gen.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc
|
${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc
|
${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/file/file_util.cc
|
${ROCKSDB_SOURCE_DIR}/file/file_util.cc
|
||||||
@ -247,6 +206,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc
|
${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc
|
${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc
|
${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/memory/memory_allocator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc
|
${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc
|
${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc
|
${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc
|
||||||
@ -322,6 +282,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/table/table_factory.cc
|
${ROCKSDB_SOURCE_DIR}/table/table_factory.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
|
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
|
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/table/unique_id.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
|
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc
|
${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc
|
${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc
|
||||||
@ -333,9 +294,12 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
|
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc
|
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc
|
${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
|
||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc
|
${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc
|
${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_handler.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record_result.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_record.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
||||||
@ -347,6 +311,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/util/regex.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
${ROCKSDB_SOURCE_DIR}/util/ribbon_config.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
||||||
@ -362,18 +327,23 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/cache_dump_load.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/cache_dump_load_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_secondary_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc
|
||||||
@ -393,6 +363,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/trace/replayer_impl.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
|
||||||
@ -411,6 +382,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
||||||
|
${ROCKSDB_SOURCE_DIR}/utilities/wal_filter.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/locktree/concurrent_tree.cc
|
||||||
@ -425,7 +397,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/dbt.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/dbt.cc
|
||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
||||||
rocksdb_build_version.cc)
|
build_version.cc) # generated by hand
|
||||||
|
|
||||||
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||||
set_source_files_properties(
|
set_source_files_properties(
|
||||||
@ -462,5 +434,6 @@ endif()
|
|||||||
add_library(_rocksdb ${SOURCES})
|
add_library(_rocksdb ${SOURCES})
|
||||||
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
add_library(ch_contrib::rocksdb ALIAS _rocksdb)
|
||||||
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
target_link_libraries(_rocksdb PRIVATE ${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
||||||
|
|
||||||
# SYSTEM is required to overcome some issues
|
# SYSTEM is required to overcome some issues
|
||||||
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")
|
target_include_directories(_rocksdb SYSTEM BEFORE INTERFACE "${ROCKSDB_SOURCE_DIR}/include")
|
||||||
|
@ -16,6 +16,9 @@ dpkg -i package_folder/clickhouse-client_*.deb
|
|||||||
|
|
||||||
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||||
|
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
source /utils.lib
|
||||||
|
|
||||||
# install test configs
|
# install test configs
|
||||||
/usr/share/clickhouse-test/config/install.sh
|
/usr/share/clickhouse-test/config/install.sh
|
||||||
|
|
||||||
@ -272,3 +275,5 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
|||||||
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
||||||
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
|
mv /var/log/clickhouse-server/stderr2.log /test_output/ ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
collect_core_dumps
|
||||||
|
@ -12,8 +12,7 @@ MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 10800 : MAX_RUN_TIME))
|
|||||||
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
|
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
|
||||||
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
|
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
|
||||||
|
|
||||||
# disable for now
|
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=1
|
||||||
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
|
|
||||||
|
|
||||||
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] || [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] || [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
||||||
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
|
RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
|
||||||
@ -310,7 +309,7 @@ function run_tests()
|
|||||||
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
|
try_run_with_retry 10 clickhouse-client -q "insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')"
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
timeout -s TERM --preserve-status 120m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
timeout -k 60m -s TERM --preserve-status 140m clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||||
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||||
| ts '%Y-%m-%d %H:%M:%S' \
|
| ts '%Y-%m-%d %H:%M:%S' \
|
||||||
| tee -a test_output/test_result.txt
|
| tee -a test_output/test_result.txt
|
||||||
@ -321,7 +320,7 @@ export -f run_tests
|
|||||||
|
|
||||||
|
|
||||||
# This should be enough to setup job and collect artifacts
|
# This should be enough to setup job and collect artifacts
|
||||||
TIMEOUT=$((MAX_RUN_TIME - 300))
|
TIMEOUT=$((MAX_RUN_TIME - 600))
|
||||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||||
# We don't run tests with Ordinary database in PRs, only in master.
|
# We don't run tests with Ordinary database in PRs, only in master.
|
||||||
# So run new/changed tests with Ordinary at least once in flaky check.
|
# So run new/changed tests with Ordinary at least once in flaky check.
|
||||||
@ -483,3 +482,5 @@ if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
|
|||||||
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
mv /var/log/clickhouse-server/stderr1.log /test_output/ ||:
|
||||||
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
|
tar -chf /test_output/coordination1.tar /var/lib/clickhouse1/coordination ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
collect_core_dumps
|
||||||
|
@ -1,8 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# core.COMM.PID-TID
|
|
||||||
sysctl kernel.core_pattern='core.%e.%p-%P'
|
|
||||||
|
|
||||||
OK="\tOK\t\\N\t"
|
OK="\tOK\t\\N\t"
|
||||||
FAIL="\tFAIL\t\\N\t"
|
FAIL="\tFAIL\t\\N\t"
|
||||||
|
|
||||||
@ -315,12 +312,4 @@ function collect_query_and_trace_logs()
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
function collect_core_dumps()
|
|
||||||
{
|
|
||||||
find . -type f -maxdepth 1 -name 'core.*' | while read -r core; do
|
|
||||||
zstd --threads=0 "$core"
|
|
||||||
mv "$core.zst" /test_output/
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
# vi: ft=bash
|
# vi: ft=bash
|
||||||
|
@ -1,5 +1,10 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
# core.COMM.PID-TID
|
||||||
|
sysctl kernel.core_pattern='core.%e.%p-%P'
|
||||||
|
# ASAN doesn't work with suid_dumpable=2
|
||||||
|
sysctl fs.suid_dumpable=1
|
||||||
|
|
||||||
function run_with_retry()
|
function run_with_retry()
|
||||||
{
|
{
|
||||||
if [[ $- =~ e ]]; then
|
if [[ $- =~ e ]]; then
|
||||||
@ -48,4 +53,12 @@ function timeout_with_logging() {
|
|||||||
return $exit_code
|
return $exit_code
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function collect_core_dumps()
|
||||||
|
{
|
||||||
|
find . -type f -maxdepth 1 -name 'core.*' | while read -r core; do
|
||||||
|
zstd --threads=0 "$core"
|
||||||
|
mv "$core.zst" /test_output/
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
# vi: ft=bash
|
# vi: ft=bash
|
||||||
|
@ -21,6 +21,9 @@ source /attach_gdb.lib
|
|||||||
# shellcheck source=../stateless/stress_tests.lib
|
# shellcheck source=../stateless/stress_tests.lib
|
||||||
source /stress_tests.lib
|
source /stress_tests.lib
|
||||||
|
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
source /utils.lib
|
||||||
|
|
||||||
install_packages package_folder
|
install_packages package_folder
|
||||||
|
|
||||||
# Thread Fuzzer allows to check more permutations of possible thread scheduling
|
# Thread Fuzzer allows to check more permutations of possible thread scheduling
|
||||||
|
@ -75,7 +75,7 @@ SETTINGS
|
|||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
- unordered — With unordered mode, the set of all already processed files is tracked with persistent nodes in ZooKeeper.
|
- unordered — With unordered mode, the set of all already processed files is tracked with persistent nodes in ZooKeeper.
|
||||||
- ordered — With ordered mode, only the max name of the successfully consumed file, and the names of files that will be retried after unsuccessful loading attempt are being stored in ZooKeeper.
|
- ordered — With ordered mode, the files are processed in lexicographic order. It means that if file named 'BBB' was processed at some point and later on a file named 'AA' is added to the bucket, it will be ignored. Only the max name (in lexicographic sense) of the successfully consumed file, and the names of files that will be retried after unsuccessful loading attempt are being stored in ZooKeeper.
|
||||||
|
|
||||||
Default value: `ordered` in versions before 24.6. Starting with 24.6 there is no default value, the setting becomes required to be specified manually. For tables created on earlier versions the default value will remain `Ordered` for compatibility.
|
Default value: `ordered` in versions before 24.6. Starting with 24.6 there is no default value, the setting becomes required to be specified manually. For tables created on earlier versions the default value will remain `Ordered` for compatibility.
|
||||||
|
|
||||||
|
@ -1535,6 +1535,10 @@ the columns from input data will be mapped to the columns from the table by thei
|
|||||||
Otherwise, the first row will be skipped.
|
Otherwise, the first row will be skipped.
|
||||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
|
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
|
||||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||||
|
If setting [output_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#output_format_binary_encode_types_in_binary_format) is set to 1,
|
||||||
|
the types in header will be written using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes output format.
|
||||||
|
If setting [input_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#input_format_binary_encode_types_in_binary_format) is set to 1,
|
||||||
|
the types in header will be read using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes input format.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## RowBinaryWithDefaults {#rowbinarywithdefaults}
|
## RowBinaryWithDefaults {#rowbinarywithdefaults}
|
||||||
|
@ -1951,6 +1951,18 @@ The maximum allowed size for String in RowBinary format. It prevents allocating
|
|||||||
|
|
||||||
Default value: `1GiB`.
|
Default value: `1GiB`.
|
||||||
|
|
||||||
|
### output_format_binary_encode_types_in_binary_format {#output_format_binary_encode_types_in_binary_format}
|
||||||
|
|
||||||
|
Write data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in RowBinaryWithNamesAndTypes output format.
|
||||||
|
|
||||||
|
Disabled by default.
|
||||||
|
|
||||||
|
### input_format_binary_decode_types_in_binary_format {#input_format_binary_decode_types_in_binary_format}
|
||||||
|
|
||||||
|
Read data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in RowBinaryWithNamesAndTypes input format.
|
||||||
|
|
||||||
|
Disabled by default.
|
||||||
|
|
||||||
## Native format settings {#native-format-settings}
|
## Native format settings {#native-format-settings}
|
||||||
|
|
||||||
### input_format_native_allow_types_conversion {#input_format_native_allow_types_conversion}
|
### input_format_native_allow_types_conversion {#input_format_native_allow_types_conversion}
|
||||||
@ -1958,3 +1970,15 @@ Default value: `1GiB`.
|
|||||||
Allow types conversion in Native input format between columns from input data and requested columns.
|
Allow types conversion in Native input format between columns from input data and requested columns.
|
||||||
|
|
||||||
Enabled by default.
|
Enabled by default.
|
||||||
|
|
||||||
|
### output_format_native_encode_types_in_binary_format {#output_format_native_encode_types_in_binary_format}
|
||||||
|
|
||||||
|
Write data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in Native output format.
|
||||||
|
|
||||||
|
Disabled by default.
|
||||||
|
|
||||||
|
### input_format_native_decode_types_in_binary_format {#input_format_native_decode_types_in_binary_format}
|
||||||
|
|
||||||
|
Read data types in [binary format](../../sql-reference/data-types/data-types-binary-encoding.md) instead of type names in Native input format.
|
||||||
|
|
||||||
|
Disabled by default.
|
@ -1358,12 +1358,25 @@ Connection pool size for PostgreSQL table engine and database engine.
|
|||||||
|
|
||||||
Default value: 16
|
Default value: 16
|
||||||
|
|
||||||
|
## postgresql_connection_attempt_timeout {#postgresql-connection-attempt-timeout}
|
||||||
|
|
||||||
|
Connection timeout in seconds of a single attempt to connect PostgreSQL end-point.
|
||||||
|
The value is passed as a `connect_timeout` parameter of the connection URL.
|
||||||
|
|
||||||
|
Default value: `2`.
|
||||||
|
|
||||||
## postgresql_connection_pool_wait_timeout {#postgresql-connection-pool-wait-timeout}
|
## postgresql_connection_pool_wait_timeout {#postgresql-connection-pool-wait-timeout}
|
||||||
|
|
||||||
Connection pool push/pop timeout on empty pool for PostgreSQL table engine and database engine. By default it will block on empty pool.
|
Connection pool push/pop timeout on empty pool for PostgreSQL table engine and database engine. By default it will block on empty pool.
|
||||||
|
|
||||||
Default value: 5000
|
Default value: 5000
|
||||||
|
|
||||||
|
## postgresql_connection_pool_retries {#postgresql-connection-pool-retries}
|
||||||
|
|
||||||
|
The maximum number of retries to establish a connection with the PostgreSQL end-point.
|
||||||
|
|
||||||
|
Default value: `2`.
|
||||||
|
|
||||||
## postgresql_connection_pool_auto_close_connection {#postgresql-connection-pool-auto-close-connection}
|
## postgresql_connection_pool_auto_close_connection {#postgresql-connection-pool-auto-close-connection}
|
||||||
|
|
||||||
Close connection before returning connection to the pool.
|
Close connection before returning connection to the pool.
|
||||||
|
@ -16,7 +16,7 @@ singleValueOrNull(x)
|
|||||||
|
|
||||||
**Parameters**
|
**Parameters**
|
||||||
|
|
||||||
- `x` — Column of any [data type](../../data-types/index.md).
|
- `x` — Column of any [data type](../../data-types/index.md) (except [Map](../../data-types/map.md), [Array](../../data-types/array.md) or [Tuple](../../data-types/tuple) which cannot be of type [Nullable](../../data-types/nullable.md)).
|
||||||
|
|
||||||
**Returned values**
|
**Returned values**
|
||||||
|
|
||||||
|
115
docs/en/sql-reference/data-types/data-types-binary-encoding.md
Normal file
115
docs/en/sql-reference/data-types/data-types-binary-encoding.md
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
---
|
||||||
|
slug: /en/sql-reference/data-types/data-types-binary-encoding
|
||||||
|
sidebar_position: 56
|
||||||
|
sidebar_label: Data types binary encoding specification.
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
# Data types binary encoding specification
|
||||||
|
|
||||||
|
This specification describes the binary format that can be used for binary encoding and decoding of ClickHouse data types. This format is used in `Dynamic` column [binary serialization](dynamic.md#binary-output-format) and can be used in input/output formats [RowBinaryWithNamesAndTypes](../../interfaces/formats.md#rowbinarywithnamesandtypes) and [Native](../../interfaces/formats.md#native) under corresponding settings.
|
||||||
|
|
||||||
|
The table below describes how each data type is represented in binary format. Each data type encoding consist of 1 byte that indicates the type and some optional additional information.
|
||||||
|
`var_uint` in the binary encoding means that the size is encoded using Variable-Length Quantity compression.
|
||||||
|
|
||||||
|
| ClickHouse data type | Binary encoding |
|
||||||
|
|--------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `Nothing` | `0x00` |
|
||||||
|
| `UInt8` | `0x01` |
|
||||||
|
| `UInt16` | `0x02` |
|
||||||
|
| `UInt32` | `0x03` |
|
||||||
|
| `UInt64` | `0x04` |
|
||||||
|
| `UInt128` | `0x05` |
|
||||||
|
| `UInt256` | `0x06` |
|
||||||
|
| `Int8` | `0x07` |
|
||||||
|
| `Int16` | `0x08` |
|
||||||
|
| `Int32` | `0x09` |
|
||||||
|
| `Int64` | `0x0A` |
|
||||||
|
| `Int128` | `0x0B` |
|
||||||
|
| `Int256` | `0x0C` |
|
||||||
|
| `Float32` | `0x0D` |
|
||||||
|
| `Float64` | `0x0E` |
|
||||||
|
| `Date` | `0x0F` |
|
||||||
|
| `Date32` | `0x10` |
|
||||||
|
| `DateTime` | `0x11` |
|
||||||
|
| `DateTime(time_zone)` | `0x12<var_uint_time_zone_name_size><time_zone_name_data>` |
|
||||||
|
| `DateTime64(P)` | `0x13<uint8_precision>` |
|
||||||
|
| `DateTime64(P, time_zone)` | `0x14<uint8_precision><var_uint_time_zone_name_size><time_zone_name_data>` |
|
||||||
|
| `String` | `0x15` |
|
||||||
|
| `FixedString(N)` | `0x16<var_uint_size>` |
|
||||||
|
| `Enum8` | `0x17<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int8_value_1>...<var_uint_name_size_N><name_data_N><int8_value_N>` |
|
||||||
|
| `Enum16` | `0x18<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><int16_little_endian_value_1>...><var_uint_name_size_N><name_data_N><int16_little_endian_value_N>` |
|
||||||
|
| `Decimal32(P, S)` | `0x19<uint8_precision><uint8_scale>` |
|
||||||
|
| `Decimal64(P, S)` | `0x1A<uint8_precision><uint8_scale>` |
|
||||||
|
| `Decimal128(P, S)` | `0x1B<uint8_precision><uint8_scale>` |
|
||||||
|
| `Decimal256(P, S)` | `0x1C<uint8_precision><uint8_scale>` |
|
||||||
|
| `UUID` | `0x1D` |
|
||||||
|
| `Array(T)` | `0x1E<nested_type_encoding>` |
|
||||||
|
| `Tuple(T1, ..., TN)` | `0x1F<var_uint_number_of_elements><nested_type_encoding_1>...<nested_type_encoding_N>` |
|
||||||
|
| `Tuple(name1 T1, ..., nameN TN)` | `0x20<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N>` |
|
||||||
|
| `Set` | `0x21` |
|
||||||
|
| `Interval` | `0x22<interval_kind>` (see [interval kind binary encoding](#interval-kind-binary-encoding)) |
|
||||||
|
| `Nullable(T)` | `0x23<nested_type_encoding>` |
|
||||||
|
| `Function` | `0x24<var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N><return_type_encoding>` |
|
||||||
|
| `AggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x25<var_uint_version><var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N>` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) |
|
||||||
|
| `LowCardinality(T)` | `0x26<nested_type_encoding>` |
|
||||||
|
| `Map(K, V)` | `0x27<key_type_encoding><value_type_encoding>` |
|
||||||
|
| `IPv4` | `0x28` |
|
||||||
|
| `IPv6` | `0x29` |
|
||||||
|
| `Variant(T1, ..., TN)` | `0x2A<var_uint_number_of_variants><variant_type_encoding_1>...<variant_type_encoding_N>` |
|
||||||
|
| `Dynamic(max_types=N)` | `0x2B<uint8_max_types>` |
|
||||||
|
| `Custom type` (`Ring`, `Polygon`, etc) | `0x2C<var_uint_type_name_size><type_name_data>` |
|
||||||
|
| `Bool` | `0x2D` |
|
||||||
|
| `SimpleAggregateFunction(function_name(param_1, ..., param_N), arg_T1, ..., arg_TN)` | `0x2E<var_uint_function_name_size><function_name_data><var_uint_number_of_parameters><param_1>...<param_N><var_uint_number_of_arguments><argument_type_encoding_1>...<argument_type_encoding_N>` (see [aggregate function parameter binary encoding](#aggregate-function-parameter-binary-encoding)) |
|
||||||
|
| `Nested(name1 T1, ..., nameN TN)` | `0x2F<var_uint_number_of_elements><var_uint_name_size_1><name_data_1><nested_type_encoding_1>...<var_uint_name_size_N><name_data_N><nested_type_encoding_N>` |
|
||||||
|
|
||||||
|
|
||||||
|
### Interval kind binary encoding
|
||||||
|
|
||||||
|
The table below describes how different interval kinds of `Interval` data type are encoded.
|
||||||
|
|
||||||
|
| Interval kind | Binary encoding |
|
||||||
|
|---------------|-----------------|
|
||||||
|
| `Nanosecond` | `0x00` |
|
||||||
|
| `Microsecond` | `0x01` |
|
||||||
|
| `Millisecond` | `0x02` |
|
||||||
|
| `Second` | `0x03` |
|
||||||
|
| `Minute` | `0x04` |
|
||||||
|
| `Hour` | `0x05` |
|
||||||
|
| `Day` | `0x06` |
|
||||||
|
| `Week` | `0x07` |
|
||||||
|
| `Month` | `0x08` |
|
||||||
|
| `Quarter` | `0x09` |
|
||||||
|
| `Year` | `0x1A` |
|
||||||
|
|
||||||
|
### Aggregate function parameter binary encoding
|
||||||
|
|
||||||
|
The table below describes how parameters of `AggragateFunction` and `SimpleAggregateFunction` are encoded.
|
||||||
|
The encoding of a parameter consists of 1 byte indicating the type of the parameter and the value itself.
|
||||||
|
|
||||||
|
| Parameter type | Binary encoding |
|
||||||
|
|--------------------------|--------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `Null` | `0x00` |
|
||||||
|
| `UInt64` | `0x01<var_uint_value>` |
|
||||||
|
| `Int64` | `0x02<var_int_value>` |
|
||||||
|
| `UInt128` | `0x03<uint128_little_endian_value>` |
|
||||||
|
| `Int128` | `0x04<int128_little_endian_value>` |
|
||||||
|
| `UInt128` | `0x05<uint128_little_endian_value>` |
|
||||||
|
| `Int128` | `0x06<int128_little_endian_value>` |
|
||||||
|
| `Float64` | `0x07<float64_little_endian_value>` |
|
||||||
|
| `Decimal32` | `0x08<var_uint_scale><int32_little_endian_value>` |
|
||||||
|
| `Decimal64` | `0x09<var_uint_scale><int64_little_endian_value>` |
|
||||||
|
| `Decimal128` | `0x0A<var_uint_scale><int128_little_endian_value>` |
|
||||||
|
| `Decimal256` | `0x0B<var_uint_scale><int256_little_endian_value>` |
|
||||||
|
| `String` | `0x0C<var_uint_size><data>` |
|
||||||
|
| `Array` | `0x0D<var_uint_size><value_encoding_1>...<value_encoding_N>` |
|
||||||
|
| `Tuple` | `0x0E<var_uint_size><value_encoding_1>...<value_encoding_N>` |
|
||||||
|
| `Map` | `0x0F<var_uint_size><key_encoding_1><value_encoding_1>...<key_endoding_N><value_encoding_N>` |
|
||||||
|
| `IPv4` | `0x10<uint32_little_endian_value>` |
|
||||||
|
| `IPv6` | `0x11<uint128_little_endian_value>` |
|
||||||
|
| `UUID` | `0x12<uuid_value>` |
|
||||||
|
| `Bool` | `0x13<bool_value>` |
|
||||||
|
| `Object` | `0x14<var_uint_size><var_uint_key_size_1><key_data_1><value_encoding_1>...<var_uint_key_size_N><key_data_N><value_encoding_N>` |
|
||||||
|
| `AggregateFunctionState` | `0x15<var_uint_name_size><name_data><var_uint_data_size><data>` |
|
||||||
|
| `Negative infinity` | `0xFE` |
|
||||||
|
| `Positive infinity` | `0xFF` |
|
@ -493,3 +493,14 @@ SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) O
|
|||||||
```
|
```
|
||||||
|
|
||||||
As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`.
|
As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`.
|
||||||
|
|
||||||
|
|
||||||
|
### Binary output format
|
||||||
|
|
||||||
|
In [RowBinary](../../interfaces/formats.md#rowbinary-rowbinary) format values of `Dynamic` type are serialized in the following format:
|
||||||
|
|
||||||
|
```text
|
||||||
|
<binary_encoded_data_type><value_in_binary_format_according_to_the_data_type>
|
||||||
|
```
|
||||||
|
|
||||||
|
See the [data types binary encoding specification](../../sql-reference/data-types/data-types-binary-encoding.md)
|
||||||
|
@ -5,11 +5,11 @@ sidebar_label: Object Data Type
|
|||||||
keywords: [object, data type]
|
keywords: [object, data type]
|
||||||
---
|
---
|
||||||
|
|
||||||
# Object Data Type
|
# Object Data Type (deprecated)
|
||||||
|
|
||||||
:::note
|
**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
|
||||||
This feature is not production-ready and is now deprecated. If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-ingestion/data-formats/json) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864)
|
|
||||||
:::
|
<hr />
|
||||||
|
|
||||||
Stores JavaScript Object Notation (JSON) documents in a single column.
|
Stores JavaScript Object Notation (JSON) documents in a single column.
|
||||||
|
|
||||||
|
@ -58,6 +58,8 @@ KILL QUERY WHERE query_id='2-857d-4a57-9ee0-327da5d60a90'
|
|||||||
KILL QUERY WHERE user='username' SYNC
|
KILL QUERY WHERE user='username' SYNC
|
||||||
```
|
```
|
||||||
|
|
||||||
|
:::tip If you are killing a query in ClickHouse Cloud or in a self-managed cluster, then be sure to use the ```ON CLUSTER [cluster-name]``` option, in order to ensure the query is killed on all replicas:::
|
||||||
|
|
||||||
Read-only users can only stop their own queries.
|
Read-only users can only stop their own queries.
|
||||||
|
|
||||||
By default, the asynchronous version of queries is used (`ASYNC`), which does not wait for confirmation that queries have stopped.
|
By default, the asynchronous version of queries is used (`ASYNC`), which does not wait for confirmation that queries have stopped.
|
||||||
@ -131,6 +133,7 @@ KILL MUTATION WHERE database = 'default' AND table = 'table'
|
|||||||
-- Cancel the specific mutation:
|
-- Cancel the specific mutation:
|
||||||
KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt'
|
KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt'
|
||||||
```
|
```
|
||||||
|
:::tip If you are killing a mutation in ClickHouse Cloud or in a self-managed cluster, then be sure to use the ```ON CLUSTER [cluster-name]``` option, in order to ensure the mutation is killed on all replicas:::
|
||||||
|
|
||||||
The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table).
|
The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table).
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ sidebar_label: "Отличительные возможности ClickHouse"
|
|||||||
|
|
||||||
Этот пункт пришлось выделить, так как существуют системы, которые могут хранить значения отдельных столбцов по отдельности, но не могут эффективно выполнять аналитические запросы в силу оптимизации под другой сценарий работы. Примеры: HBase, BigTable, Cassandra, HyperTable. В этих системах вы получите пропускную способность в районе сотен тысяч строк в секунду, но не сотен миллионов строк в секунду.
|
Этот пункт пришлось выделить, так как существуют системы, которые могут хранить значения отдельных столбцов по отдельности, но не могут эффективно выполнять аналитические запросы в силу оптимизации под другой сценарий работы. Примеры: HBase, BigTable, Cassandra, HyperTable. В этих системах вы получите пропускную способность в районе сотен тысяч строк в секунду, но не сотен миллионов строк в секунду.
|
||||||
|
|
||||||
Также стоит заметить, что ClickHouse является системой управления базами данных, а не системой для одной базой данных. То есть, ClickHouse позволяет создавать таблицы и базы данных во время выполнения (runtime), загружать данные и выполнять запросы без переконфигурирования и перезапуска сервера.
|
Также стоит заметить, что ClickHouse является системой управления базами данных, а не системой для одной базы данных. То есть, ClickHouse позволяет создавать таблицы и базы данных во время выполнения (runtime), загружать данные и выполнять запросы без переконфигурирования и перезапуска сервера.
|
||||||
|
|
||||||
## Сжатие данных {#szhatie-dannykh}
|
## Сжатие данных {#szhatie-dannykh}
|
||||||
|
|
||||||
|
@ -35,10 +35,9 @@ disable = '''
|
|||||||
broad-except,
|
broad-except,
|
||||||
bare-except,
|
bare-except,
|
||||||
no-else-return,
|
no-else-return,
|
||||||
global-statement
|
global-statement,
|
||||||
'''
|
'''
|
||||||
|
|
||||||
[tool.pylint.SIMILARITIES]
|
[tool.pylint.SIMILARITIES]
|
||||||
# due to SQL
|
# due to SQL
|
||||||
min-similarity-lines=1000
|
min-similarity-lines=1000
|
||||||
|
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
#include <Backups/BackupStatus.h>
|
#include <Backups/BackupStatus.h>
|
||||||
#include <Common/ProfileEvents.h>
|
#include <Common/ProfileEvents.h>
|
||||||
|
|
||||||
|
#include <exception>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@ namespace ProfileEvents
|
|||||||
extern const Event DistributedConnectionUsable;
|
extern const Event DistributedConnectionUsable;
|
||||||
extern const Event DistributedConnectionMissingTable;
|
extern const Event DistributedConnectionMissingTable;
|
||||||
extern const Event DistributedConnectionStaleReplica;
|
extern const Event DistributedConnectionStaleReplica;
|
||||||
|
extern const Event DistributedConnectionFailTry;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -97,6 +98,8 @@ void ConnectionEstablisher::run(ConnectionEstablisher::TryResult & result, std::
|
|||||||
}
|
}
|
||||||
catch (const Exception & e)
|
catch (const Exception & e)
|
||||||
{
|
{
|
||||||
|
ProfileEvents::increment(ProfileEvents::DistributedConnectionFailTry);
|
||||||
|
|
||||||
if (e.code() != ErrorCodes::NETWORK_ERROR && e.code() != ErrorCodes::SOCKET_TIMEOUT
|
if (e.code() != ErrorCodes::NETWORK_ERROR && e.code() != ErrorCodes::SOCKET_TIMEOUT
|
||||||
&& e.code() != ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF && e.code() != ErrorCodes::DNS_ERROR)
|
&& e.code() != ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF && e.code() != ErrorCodes::DNS_ERROR)
|
||||||
throw;
|
throw;
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
{
|
{
|
||||||
extern const Event HedgedRequestsChangeReplica;
|
extern const Event HedgedRequestsChangeReplica;
|
||||||
extern const Event DistributedConnectionFailTry;
|
|
||||||
extern const Event DistributedConnectionFailAtAll;
|
extern const Event DistributedConnectionFailAtAll;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -327,7 +326,6 @@ HedgedConnectionsFactory::State HedgedConnectionsFactory::processFinishedConnect
|
|||||||
{
|
{
|
||||||
ShuffledPool & shuffled_pool = shuffled_pools[index];
|
ShuffledPool & shuffled_pool = shuffled_pools[index];
|
||||||
LOG_INFO(log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message);
|
LOG_INFO(log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message);
|
||||||
ProfileEvents::increment(ProfileEvents::DistributedConnectionFailTry);
|
|
||||||
|
|
||||||
shuffled_pool.error_count = std::min(pool->getMaxErrorCup(), shuffled_pool.error_count + 1);
|
shuffled_pool.error_count = std::min(pool->getMaxErrorCup(), shuffled_pool.error_count + 1);
|
||||||
shuffled_pool.slowdown_count = 0;
|
shuffled_pool.slowdown_count = 0;
|
||||||
|
@ -267,7 +267,11 @@ bool ColumnAggregateFunction::structureEquals(const IColumn & to) const
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnAggregateFunction::insertRangeFrom(const IColumn & from, size_t start, size_t length)
|
void ColumnAggregateFunction::insertRangeFrom(const IColumn & from, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnAggregateFunction::doInsertRangeFrom(const IColumn & from, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnAggregateFunction & from_concrete = assert_cast<const ColumnAggregateFunction &>(from);
|
const ColumnAggregateFunction & from_concrete = assert_cast<const ColumnAggregateFunction &>(from);
|
||||||
|
|
||||||
@ -462,7 +466,11 @@ void ColumnAggregateFunction::insertFromWithOwnership(const IColumn & from, size
|
|||||||
insertMergeFrom(from, n);
|
insertMergeFrom(from, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnAggregateFunction::insertFrom(const IColumn & from, size_t n)
|
void ColumnAggregateFunction::insertFrom(const IColumn & from, size_t n)
|
||||||
|
#else
|
||||||
|
void ColumnAggregateFunction::doInsertFrom(const IColumn & from, size_t n)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
insertRangeFrom(from, n, 1);
|
insertRangeFrom(from, n, 1);
|
||||||
}
|
}
|
||||||
|
@ -145,7 +145,14 @@ public:
|
|||||||
|
|
||||||
void insertData(const char * pos, size_t length) override;
|
void insertData(const char * pos, size_t length) override;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & from, size_t n) override;
|
void insertFrom(const IColumn & from, size_t n) override;
|
||||||
|
#else
|
||||||
|
using IColumn::insertFrom;
|
||||||
|
|
||||||
|
void doInsertFrom(const IColumn & from, size_t n) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
void insertFrom(ConstAggregateDataPtr place);
|
void insertFrom(ConstAggregateDataPtr place);
|
||||||
|
|
||||||
@ -182,7 +189,11 @@ public:
|
|||||||
|
|
||||||
void protect() override;
|
void protect() override;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn & from, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn & from, size_t start, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn & from, size_t start, size_t length) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
void popBack(size_t n) override;
|
void popBack(size_t n) override;
|
||||||
|
|
||||||
@ -201,7 +212,11 @@ public:
|
|||||||
|
|
||||||
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
|
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t, size_t, const IColumn &, int) const override
|
int compareAt(size_t, size_t, const IColumn &, int) const override
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t, size_t, const IColumn &, int) const override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -337,7 +337,11 @@ bool ColumnArray::tryInsert(const Field & x)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnArray::insertFrom(const IColumn & src_, size_t n)
|
void ColumnArray::insertFrom(const IColumn & src_, size_t n)
|
||||||
|
#else
|
||||||
|
void ColumnArray::doInsertFrom(const IColumn & src_, size_t n)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnArray & src = assert_cast<const ColumnArray &>(src_);
|
const ColumnArray & src = assert_cast<const ColumnArray &>(src_);
|
||||||
size_t size = src.sizeAt(n);
|
size_t size = src.sizeAt(n);
|
||||||
@ -392,7 +396,11 @@ int ColumnArray::compareAtImpl(size_t n, size_t m, const IColumn & rhs_, int nan
|
|||||||
: 1);
|
: 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int ColumnArray::compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const
|
int ColumnArray::compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const
|
||||||
|
#else
|
||||||
|
int ColumnArray::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
return compareAtImpl(n, m, rhs_, nan_direction_hint);
|
return compareAtImpl(n, m, rhs_, nan_direction_hint);
|
||||||
}
|
}
|
||||||
@ -535,7 +543,11 @@ void ColumnArray::getExtremes(Field & min, Field & max) const
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnArray::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
void ColumnArray::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnArray::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
if (length == 0)
|
if (length == 0)
|
||||||
return;
|
return;
|
||||||
|
@ -84,10 +84,18 @@ public:
|
|||||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||||
void updateHashFast(SipHash & hash) const override;
|
void updateHashFast(SipHash & hash) const override;
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#endif
|
||||||
void insert(const Field & x) override;
|
void insert(const Field & x) override;
|
||||||
bool tryInsert(const Field & x) override;
|
bool tryInsert(const Field & x) override;
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & src_, size_t n) override;
|
void insertFrom(const IColumn & src_, size_t n) override;
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn & src_, size_t n) override;
|
||||||
|
#endif
|
||||||
void insertDefault() override;
|
void insertDefault() override;
|
||||||
void popBack(size_t n) override;
|
void popBack(size_t n) override;
|
||||||
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
|
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
|
||||||
@ -95,7 +103,11 @@ public:
|
|||||||
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
|
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
|
||||||
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
||||||
template <typename Type> ColumnPtr indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const;
|
template <typename Type> ColumnPtr indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const;
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
|
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
|
||||||
|
#endif
|
||||||
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint, const Collator & collator) const override;
|
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint, const Collator & collator) const override;
|
||||||
void getPermutation(PermutationSortDirection direction, PermutationSortStability stability,
|
void getPermutation(PermutationSortDirection direction, PermutationSortStability stability,
|
||||||
size_t limit, int nan_direction_hint, Permutation & res) const override;
|
size_t limit, int nan_direction_hint, Permutation & res) const override;
|
||||||
|
@ -85,7 +85,11 @@ public:
|
|||||||
bool isDefaultAt(size_t) const override { throwMustBeDecompressed(); }
|
bool isDefaultAt(size_t) const override { throwMustBeDecompressed(); }
|
||||||
void insert(const Field &) override { throwMustBeDecompressed(); }
|
void insert(const Field &) override { throwMustBeDecompressed(); }
|
||||||
bool tryInsert(const Field &) override { throwMustBeDecompressed(); }
|
bool tryInsert(const Field &) override { throwMustBeDecompressed(); }
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn &, size_t, size_t) override { throwMustBeDecompressed(); }
|
void insertRangeFrom(const IColumn &, size_t, size_t) override { throwMustBeDecompressed(); }
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn &, size_t, size_t) override { throwMustBeDecompressed(); }
|
||||||
|
#endif
|
||||||
void insertData(const char *, size_t) override { throwMustBeDecompressed(); }
|
void insertData(const char *, size_t) override { throwMustBeDecompressed(); }
|
||||||
void insertDefault() override { throwMustBeDecompressed(); }
|
void insertDefault() override { throwMustBeDecompressed(); }
|
||||||
void popBack(size_t) override { throwMustBeDecompressed(); }
|
void popBack(size_t) override { throwMustBeDecompressed(); }
|
||||||
@ -100,7 +104,11 @@ public:
|
|||||||
void expand(const Filter &, bool) override { throwMustBeDecompressed(); }
|
void expand(const Filter &, bool) override { throwMustBeDecompressed(); }
|
||||||
ColumnPtr permute(const Permutation &, size_t) const override { throwMustBeDecompressed(); }
|
ColumnPtr permute(const Permutation &, size_t) const override { throwMustBeDecompressed(); }
|
||||||
ColumnPtr index(const IColumn &, size_t) const override { throwMustBeDecompressed(); }
|
ColumnPtr index(const IColumn &, size_t) const override { throwMustBeDecompressed(); }
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t, size_t, const IColumn &, int) const override { throwMustBeDecompressed(); }
|
int compareAt(size_t, size_t, const IColumn &, int) const override { throwMustBeDecompressed(); }
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t, size_t, const IColumn &, int) const override { throwMustBeDecompressed(); }
|
||||||
|
#endif
|
||||||
void compareColumn(const IColumn &, size_t, PaddedPODArray<UInt64> *, PaddedPODArray<Int8> &, int, int) const override
|
void compareColumn(const IColumn &, size_t, PaddedPODArray<UInt64> *, PaddedPODArray<Int8> &, int, int) const override
|
||||||
{
|
{
|
||||||
throwMustBeDecompressed();
|
throwMustBeDecompressed();
|
||||||
|
@ -32,6 +32,8 @@ private:
|
|||||||
ColumnConst(const ColumnConst & src) = default;
|
ColumnConst(const ColumnConst & src) = default;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
bool isConst() const override { return true; }
|
||||||
|
|
||||||
ColumnPtr convertToFullColumn() const;
|
ColumnPtr convertToFullColumn() const;
|
||||||
|
|
||||||
ColumnPtr convertToFullColumnIfConst() const override
|
ColumnPtr convertToFullColumnIfConst() const override
|
||||||
@ -121,7 +123,11 @@ public:
|
|||||||
return data->isNullAt(0);
|
return data->isNullAt(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn &, size_t /*start*/, size_t length) override
|
void insertRangeFrom(const IColumn &, size_t /*start*/, size_t length) override
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn &, size_t /*start*/, size_t length) override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
s += length;
|
s += length;
|
||||||
}
|
}
|
||||||
@ -145,12 +151,20 @@ public:
|
|||||||
++s;
|
++s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn &, size_t) override
|
void insertFrom(const IColumn &, size_t) override
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn &, size_t) override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
++s;
|
++s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertManyFrom(const IColumn & /*src*/, size_t /* position */, size_t length) override { s += length; }
|
void insertManyFrom(const IColumn & /*src*/, size_t /* position */, size_t length) override { s += length; }
|
||||||
|
#else
|
||||||
|
void doInsertManyFrom(const IColumn & /*src*/, size_t /* position */, size_t length) override { s += length; }
|
||||||
|
#endif
|
||||||
|
|
||||||
void insertDefault() override
|
void insertDefault() override
|
||||||
{
|
{
|
||||||
@ -223,7 +237,11 @@ public:
|
|||||||
return data->allocatedBytes() + sizeof(s);
|
return data->allocatedBytes() + sizeof(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t, size_t, const IColumn & rhs, int nan_direction_hint) const override
|
int compareAt(size_t, size_t, const IColumn & rhs, int nan_direction_hint) const override
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t, size_t, const IColumn & rhs, int nan_direction_hint) const override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
return data->compareAt(0, 0, *assert_cast<const ColumnConst &>(rhs).data, nan_direction_hint);
|
return data->compareAt(0, 0, *assert_cast<const ColumnConst &>(rhs).data, nan_direction_hint);
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,11 @@ namespace ErrorCodes
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <is_decimal T>
|
template <is_decimal T>
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int ColumnDecimal<T>::compareAt(size_t n, size_t m, const IColumn & rhs_, int) const
|
int ColumnDecimal<T>::compareAt(size_t n, size_t m, const IColumn & rhs_, int) const
|
||||||
|
#else
|
||||||
|
int ColumnDecimal<T>::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int) const
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
auto & other = static_cast<const Self &>(rhs_);
|
auto & other = static_cast<const Self &>(rhs_);
|
||||||
const T & a = data[n];
|
const T & a = data[n];
|
||||||
@ -331,7 +335,11 @@ void ColumnDecimal<T>::insertData(const char * src, size_t /*length*/)
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <is_decimal T>
|
template <is_decimal T>
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnDecimal<T>::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
void ColumnDecimal<T>::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnDecimal<T>::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnDecimal & src_vec = assert_cast<const ColumnDecimal &>(src);
|
const ColumnDecimal & src_vec = assert_cast<const ColumnDecimal &>(src);
|
||||||
|
|
||||||
|
@ -55,9 +55,17 @@ public:
|
|||||||
void reserve(size_t n) override { data.reserve_exact(n); }
|
void reserve(size_t n) override { data.reserve_exact(n); }
|
||||||
void shrinkToFit() override { data.shrink_to_fit(); }
|
void shrinkToFit() override { data.shrink_to_fit(); }
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & src, size_t n) override { data.push_back(static_cast<const Self &>(src).getData()[n]); }
|
void insertFrom(const IColumn & src, size_t n) override { data.push_back(static_cast<const Self &>(src).getData()[n]); }
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn & src, size_t n) override { data.push_back(static_cast<const Self &>(src).getData()[n]); }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override
|
void insertManyFrom(const IColumn & src, size_t position, size_t length) override
|
||||||
|
#else
|
||||||
|
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
ValueType v = assert_cast<const Self &>(src).getData()[position];
|
ValueType v = assert_cast<const Self &>(src).getData()[position];
|
||||||
data.resize_fill(data.size() + length, v);
|
data.resize_fill(data.size() + length, v);
|
||||||
@ -68,7 +76,11 @@ public:
|
|||||||
void insertManyDefaults(size_t length) override { data.resize_fill(data.size() + length); }
|
void insertManyDefaults(size_t length) override { data.resize_fill(data.size() + length); }
|
||||||
void insert(const Field & x) override { data.push_back(x.get<T>()); }
|
void insert(const Field & x) override { data.push_back(x.get<T>()); }
|
||||||
bool tryInsert(const Field & x) override;
|
bool tryInsert(const Field & x) override;
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
void popBack(size_t n) override
|
void popBack(size_t n) override
|
||||||
{
|
{
|
||||||
@ -92,7 +104,11 @@ public:
|
|||||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||||
void updateHashFast(SipHash & hash) const override;
|
void updateHashFast(SipHash & hash) const override;
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
|
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override;
|
||||||
|
#endif
|
||||||
void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
||||||
size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override;
|
size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override;
|
||||||
void updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
void updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
||||||
|
@ -4,7 +4,9 @@
|
|||||||
#include <DataTypes/DataTypeFactory.h>
|
#include <DataTypes/DataTypeFactory.h>
|
||||||
#include <DataTypes/DataTypeVariant.h>
|
#include <DataTypes/DataTypeVariant.h>
|
||||||
#include <DataTypes/DataTypeString.h>
|
#include <DataTypes/DataTypeString.h>
|
||||||
|
#include <DataTypes/DataTypeNothing.h>
|
||||||
#include <DataTypes/FieldToDataType.h>
|
#include <DataTypes/FieldToDataType.h>
|
||||||
|
#include <DataTypes/DataTypesBinaryEncoding.h>
|
||||||
#include <Common/Arena.h>
|
#include <Common/Arena.h>
|
||||||
#include <Common/SipHash.h>
|
#include <Common/SipHash.h>
|
||||||
#include <Processors/Transforms/ColumnGathererTransform.h>
|
#include <Processors/Transforms/ColumnGathererTransform.h>
|
||||||
@ -213,7 +215,11 @@ bool ColumnDynamic::tryInsert(const DB::Field & x)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnDynamic::insertFrom(const DB::IColumn & src_, size_t n)
|
void ColumnDynamic::insertFrom(const DB::IColumn & src_, size_t n)
|
||||||
|
#else
|
||||||
|
void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const auto & dynamic_src = assert_cast<const ColumnDynamic &>(src_);
|
const auto & dynamic_src = assert_cast<const ColumnDynamic &>(src_);
|
||||||
|
|
||||||
@ -263,7 +269,11 @@ void ColumnDynamic::insertFrom(const DB::IColumn & src_, size_t n)
|
|||||||
variant_col.insertIntoVariantFrom(string_variant_discr, *tmp_string_column, 0);
|
variant_col.insertIntoVariantFrom(string_variant_discr, *tmp_string_column, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnDynamic::insertRangeFrom(const DB::IColumn & src_, size_t start, size_t length)
|
void ColumnDynamic::insertRangeFrom(const DB::IColumn & src_, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnDynamic::doInsertRangeFrom(const DB::IColumn & src_, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
if (start + length > src_.size())
|
if (start + length > src_.size())
|
||||||
throw Exception(ErrorCodes::PARAMETER_OUT_OF_BOUND, "Parameter out of bound in ColumnDynamic::insertRangeFrom method. "
|
throw Exception(ErrorCodes::PARAMETER_OUT_OF_BOUND, "Parameter out of bound in ColumnDynamic::insertRangeFrom method. "
|
||||||
@ -429,7 +439,11 @@ void ColumnDynamic::insertRangeFrom(const DB::IColumn & src_, size_t start, size
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnDynamic::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length)
|
void ColumnDynamic::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const auto & dynamic_src = assert_cast<const ColumnDynamic &>(src_);
|
const auto & dynamic_src = assert_cast<const ColumnDynamic &>(src_);
|
||||||
|
|
||||||
@ -481,7 +495,7 @@ StringRef ColumnDynamic::serializeValueIntoArena(size_t n, DB::Arena & arena, co
|
|||||||
/// We cannot use Variant serialization here as it serializes discriminator + value,
|
/// We cannot use Variant serialization here as it serializes discriminator + value,
|
||||||
/// but Dynamic doesn't have fixed mapping discriminator <-> variant type
|
/// but Dynamic doesn't have fixed mapping discriminator <-> variant type
|
||||||
/// as different Dynamic column can have different Variants.
|
/// as different Dynamic column can have different Variants.
|
||||||
/// Instead, we serialize null bit + variant type name (size + bytes) + value.
|
/// Instead, we serialize null bit + variant type in binary format (size + bytes) + value.
|
||||||
const auto & variant_col = assert_cast<const ColumnVariant &>(*variant_column);
|
const auto & variant_col = assert_cast<const ColumnVariant &>(*variant_column);
|
||||||
auto discr = variant_col.globalDiscriminatorAt(n);
|
auto discr = variant_col.globalDiscriminatorAt(n);
|
||||||
StringRef res;
|
StringRef res;
|
||||||
@ -495,14 +509,15 @@ StringRef ColumnDynamic::serializeValueIntoArena(size_t n, DB::Arena & arena, co
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto & variant_name = variant_info.variant_names[discr];
|
const auto & variant_type = assert_cast<const DataTypeVariant &>(*variant_info.variant_type).getVariant(discr);
|
||||||
size_t variant_name_size = variant_name.size();
|
String variant_type_binary_data = encodeDataType(variant_type);
|
||||||
char * pos = arena.allocContinue(sizeof(UInt8) + sizeof(size_t) + variant_name.size(), begin);
|
size_t variant_type_binary_data_size = variant_type_binary_data.size();
|
||||||
|
char * pos = arena.allocContinue(sizeof(UInt8) + sizeof(size_t) + variant_type_binary_data.size(), begin);
|
||||||
memcpy(pos, &null_bit, sizeof(UInt8));
|
memcpy(pos, &null_bit, sizeof(UInt8));
|
||||||
memcpy(pos + sizeof(UInt8), &variant_name_size, sizeof(size_t));
|
memcpy(pos + sizeof(UInt8), &variant_type_binary_data_size, sizeof(size_t));
|
||||||
memcpy(pos + sizeof(UInt8) + sizeof(size_t), variant_name.data(), variant_name.size());
|
memcpy(pos + sizeof(UInt8) + sizeof(size_t), variant_type_binary_data.data(), variant_type_binary_data.size());
|
||||||
res.data = pos;
|
res.data = pos;
|
||||||
res.size = sizeof(UInt8) + sizeof(size_t) + variant_name.size();
|
res.size = sizeof(UInt8) + sizeof(size_t) + variant_type_binary_data.size();
|
||||||
|
|
||||||
auto value_ref = variant_col.getVariantByGlobalDiscriminator(discr).serializeValueIntoArena(variant_col.offsetAt(n), arena, begin);
|
auto value_ref = variant_col.getVariantByGlobalDiscriminator(discr).serializeValueIntoArena(variant_col.offsetAt(n), arena, begin);
|
||||||
res.data = value_ref.data - res.size;
|
res.data = value_ref.data - res.size;
|
||||||
@ -521,13 +536,15 @@ const char * ColumnDynamic::deserializeAndInsertFromArena(const char * pos)
|
|||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read variant type name.
|
/// Read variant type in binary format.
|
||||||
const size_t variant_name_size = unalignedLoad<size_t>(pos);
|
const size_t variant_type_binary_data_size = unalignedLoad<size_t>(pos);
|
||||||
pos += sizeof(variant_name_size);
|
pos += sizeof(variant_type_binary_data_size);
|
||||||
String variant_name;
|
String variant_type_binary_data;
|
||||||
variant_name.resize(variant_name_size);
|
variant_type_binary_data.resize(variant_type_binary_data_size);
|
||||||
memcpy(variant_name.data(), pos, variant_name_size);
|
memcpy(variant_type_binary_data.data(), pos, variant_type_binary_data_size);
|
||||||
pos += variant_name_size;
|
pos += variant_type_binary_data_size;
|
||||||
|
auto variant_type = decodeDataType(variant_type_binary_data);
|
||||||
|
auto variant_name = variant_type->getName();
|
||||||
/// If we already have such variant, just deserialize it into corresponding variant column.
|
/// If we already have such variant, just deserialize it into corresponding variant column.
|
||||||
auto it = variant_info.variant_name_to_discriminator.find(variant_name);
|
auto it = variant_info.variant_name_to_discriminator.find(variant_name);
|
||||||
if (it != variant_info.variant_name_to_discriminator.end())
|
if (it != variant_info.variant_name_to_discriminator.end())
|
||||||
@ -537,7 +554,6 @@ const char * ColumnDynamic::deserializeAndInsertFromArena(const char * pos)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// If we don't have such variant, add it.
|
/// If we don't have such variant, add it.
|
||||||
auto variant_type = DataTypeFactory::instance().get(variant_name);
|
|
||||||
if (likely(addNewVariant(variant_type)))
|
if (likely(addNewVariant(variant_type)))
|
||||||
{
|
{
|
||||||
auto discr = variant_info.variant_name_to_discriminator[variant_name];
|
auto discr = variant_info.variant_name_to_discriminator[variant_name];
|
||||||
@ -563,13 +579,13 @@ const char * ColumnDynamic::skipSerializedInArena(const char * pos) const
|
|||||||
if (null_bit)
|
if (null_bit)
|
||||||
return pos;
|
return pos;
|
||||||
|
|
||||||
const size_t variant_name_size = unalignedLoad<size_t>(pos);
|
const size_t variant_type_binary_data_size = unalignedLoad<size_t>(pos);
|
||||||
pos += sizeof(variant_name_size);
|
pos += sizeof(variant_type_binary_data_size);
|
||||||
String variant_name;
|
String variant_type_binary_data;
|
||||||
variant_name.resize(variant_name_size);
|
variant_type_binary_data.resize(variant_type_binary_data_size);
|
||||||
memcpy(variant_name.data(), pos, variant_name_size);
|
memcpy(variant_type_binary_data.data(), pos, variant_type_binary_data_size);
|
||||||
pos += variant_name_size;
|
pos += variant_type_binary_data_size;
|
||||||
auto tmp_variant_column = DataTypeFactory::instance().get(variant_name)->createColumn();
|
auto tmp_variant_column = decodeDataType(variant_type_binary_data)->createColumn();
|
||||||
return tmp_variant_column->skipSerializedInArena(pos);
|
return tmp_variant_column->skipSerializedInArena(pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -587,7 +603,11 @@ void ColumnDynamic::updateHashWithValue(size_t n, SipHash & hash) const
|
|||||||
variant_col.getVariantByGlobalDiscriminator(discr).updateHashWithValue(variant_col.offsetAt(n), hash);
|
variant_col.getVariantByGlobalDiscriminator(discr).updateHashWithValue(variant_col.offsetAt(n), hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int ColumnDynamic::compareAt(size_t n, size_t m, const DB::IColumn & rhs, int nan_direction_hint) const
|
int ColumnDynamic::compareAt(size_t n, size_t m, const DB::IColumn & rhs, int nan_direction_hint) const
|
||||||
|
#else
|
||||||
|
int ColumnDynamic::doCompareAt(size_t n, size_t m, const DB::IColumn & rhs, int nan_direction_hint) const
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const auto & left_variant = assert_cast<const ColumnVariant &>(*variant_column);
|
const auto & left_variant = assert_cast<const ColumnVariant &>(*variant_column);
|
||||||
const auto & right_dynamic = assert_cast<const ColumnDynamic &>(rhs);
|
const auto & right_dynamic = assert_cast<const ColumnDynamic &>(rhs);
|
||||||
|
@ -142,9 +142,16 @@ public:
|
|||||||
|
|
||||||
void insert(const Field & x) override;
|
void insert(const Field & x) override;
|
||||||
bool tryInsert(const Field & x) override;
|
bool tryInsert(const Field & x) override;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & src_, size_t n) override;
|
void insertFrom(const IColumn & src_, size_t n) override;
|
||||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn & src_, size_t n) override;
|
||||||
|
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
void insertDefault() override
|
void insertDefault() override
|
||||||
{
|
{
|
||||||
@ -213,7 +220,11 @@ public:
|
|||||||
return scattered_columns;
|
return scattered_columns;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||||
|
#endif
|
||||||
|
|
||||||
bool hasEqualValues() const override
|
bool hasEqualValues() const override
|
||||||
{
|
{
|
||||||
|
@ -74,7 +74,11 @@ bool ColumnFixedString::tryInsert(const Field & x)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnFixedString::insertFrom(const IColumn & src_, size_t index)
|
void ColumnFixedString::insertFrom(const IColumn & src_, size_t index)
|
||||||
|
#else
|
||||||
|
void ColumnFixedString::doInsertFrom(const IColumn & src_, size_t index)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnFixedString & src = assert_cast<const ColumnFixedString &>(src_);
|
const ColumnFixedString & src = assert_cast<const ColumnFixedString &>(src_);
|
||||||
|
|
||||||
@ -86,7 +90,11 @@ void ColumnFixedString::insertFrom(const IColumn & src_, size_t index)
|
|||||||
memcpySmallAllowReadWriteOverflow15(chars.data() + old_size, &src.chars[n * index], n);
|
memcpySmallAllowReadWriteOverflow15(chars.data() + old_size, &src.chars[n * index], n);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnFixedString::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
void ColumnFixedString::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnFixedString::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnFixedString & src_concrete = assert_cast<const ColumnFixedString &>(src);
|
const ColumnFixedString & src_concrete = assert_cast<const ColumnFixedString &>(src);
|
||||||
if (n != src_concrete.getN())
|
if (n != src_concrete.getN())
|
||||||
@ -219,7 +227,11 @@ size_t ColumnFixedString::estimateCardinalityInPermutedRange(const Permutation &
|
|||||||
return elements.size();
|
return elements.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnFixedString::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
void ColumnFixedString::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnFixedString::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnFixedString & src_concrete = assert_cast<const ColumnFixedString &>(src);
|
const ColumnFixedString & src_concrete = assert_cast<const ColumnFixedString &>(src);
|
||||||
chassert(this->n == src_concrete.n);
|
chassert(this->n == src_concrete.n);
|
||||||
|
@ -98,9 +98,17 @@ public:
|
|||||||
|
|
||||||
bool tryInsert(const Field & x) override;
|
bool tryInsert(const Field & x) override;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & src_, size_t index) override;
|
void insertFrom(const IColumn & src_, size_t index) override;
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn & src_, size_t index) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
void insertData(const char * pos, size_t length) override;
|
void insertData(const char * pos, size_t length) override;
|
||||||
|
|
||||||
@ -129,7 +137,11 @@ public:
|
|||||||
|
|
||||||
void updateHashFast(SipHash & hash) const override;
|
void updateHashFast(SipHash & hash) const override;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t p1, size_t p2, const IColumn & rhs_, int /*nan_direction_hint*/) const override
|
int compareAt(size_t p1, size_t p2, const IColumn & rhs_, int /*nan_direction_hint*/) const override
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t p1, size_t p2, const IColumn & rhs_, int /*nan_direction_hint*/) const override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnFixedString & rhs = assert_cast<const ColumnFixedString &>(rhs_);
|
const ColumnFixedString & rhs = assert_cast<const ColumnFixedString &>(rhs_);
|
||||||
chassert(this->n == rhs.n);
|
chassert(this->n == rhs.n);
|
||||||
@ -144,7 +156,11 @@ public:
|
|||||||
|
|
||||||
size_t estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const override;
|
size_t estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const override;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
ColumnPtr filter(const IColumn::Filter & filt, ssize_t result_size_hint) const override;
|
ColumnPtr filter(const IColumn::Filter & filt, ssize_t result_size_hint) const override;
|
||||||
|
|
||||||
|
@ -72,7 +72,11 @@ ColumnPtr ColumnFunction::cut(size_t start, size_t length) const
|
|||||||
return ColumnFunction::create(length, function, capture, is_short_circuit_argument, is_function_compiled);
|
return ColumnFunction::create(length, function, capture, is_short_circuit_argument, is_function_compiled);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnFunction::insertFrom(const IColumn & src, size_t n)
|
void ColumnFunction::insertFrom(const IColumn & src, size_t n)
|
||||||
|
#else
|
||||||
|
void ColumnFunction::doInsertFrom(const IColumn & src, size_t n)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnFunction & src_func = assert_cast<const ColumnFunction &>(src);
|
const ColumnFunction & src_func = assert_cast<const ColumnFunction &>(src);
|
||||||
|
|
||||||
@ -89,7 +93,11 @@ void ColumnFunction::insertFrom(const IColumn & src, size_t n)
|
|||||||
++elements_size;
|
++elements_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnFunction::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
void ColumnFunction::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnFunction::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnFunction & src_func = assert_cast<const ColumnFunction &>(src);
|
const ColumnFunction & src_func = assert_cast<const ColumnFunction &>(src);
|
||||||
|
|
||||||
|
@ -94,8 +94,16 @@ public:
|
|||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot insert into {}", getName());
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot insert into {}", getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & src, size_t n) override;
|
void insertFrom(const IColumn & src, size_t n) override;
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn & src, size_t n) override;
|
||||||
|
#endif
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn &, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn &, size_t start, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn &, size_t start, size_t length) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
void insertData(const char *, size_t) override
|
void insertData(const char *, size_t) override
|
||||||
{
|
{
|
||||||
@ -137,7 +145,11 @@ public:
|
|||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "popBack is not implemented for {}", getName());
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "popBack is not implemented for {}", getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t, size_t, const IColumn &, int) const override
|
int compareAt(size_t, size_t, const IColumn &, int) const override
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t, size_t, const IColumn &, int) const override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "compareAt is not implemented for {}", getName());
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "compareAt is not implemented for {}", getName());
|
||||||
}
|
}
|
||||||
|
@ -159,7 +159,11 @@ void ColumnLowCardinality::insertDefault()
|
|||||||
idx.insertPosition(getDictionary().getDefaultValueIndex());
|
idx.insertPosition(getDictionary().getDefaultValueIndex());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnLowCardinality::insertFrom(const IColumn & src, size_t n)
|
void ColumnLowCardinality::insertFrom(const IColumn & src, size_t n)
|
||||||
|
#else
|
||||||
|
void ColumnLowCardinality::doInsertFrom(const IColumn & src, size_t n)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const auto * low_cardinality_src = typeid_cast<const ColumnLowCardinality *>(&src);
|
const auto * low_cardinality_src = typeid_cast<const ColumnLowCardinality *>(&src);
|
||||||
|
|
||||||
@ -187,7 +191,11 @@ void ColumnLowCardinality::insertFromFullColumn(const IColumn & src, size_t n)
|
|||||||
idx.insertPosition(getDictionary().uniqueInsertFrom(src, n));
|
idx.insertPosition(getDictionary().uniqueInsertFrom(src, n));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnLowCardinality::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
void ColumnLowCardinality::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnLowCardinality::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const auto * low_cardinality_src = typeid_cast<const ColumnLowCardinality *>(&src);
|
const auto * low_cardinality_src = typeid_cast<const ColumnLowCardinality *>(&src);
|
||||||
|
|
||||||
@ -364,7 +372,11 @@ int ColumnLowCardinality::compareAtImpl(size_t n, size_t m, const IColumn & rhs,
|
|||||||
return getDictionary().compareAt(n_index, m_index, low_cardinality_column.getDictionary(), nan_direction_hint);
|
return getDictionary().compareAt(n_index, m_index, low_cardinality_column.getDictionary(), nan_direction_hint);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int ColumnLowCardinality::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
int ColumnLowCardinality::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||||
|
#else
|
||||||
|
int ColumnLowCardinality::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
return compareAtImpl(n, m, rhs, nan_direction_hint);
|
return compareAtImpl(n, m, rhs, nan_direction_hint);
|
||||||
}
|
}
|
||||||
|
@ -78,10 +78,18 @@ public:
|
|||||||
bool tryInsert(const Field & x) override;
|
bool tryInsert(const Field & x) override;
|
||||||
void insertDefault() override;
|
void insertDefault() override;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & src, size_t n) override;
|
void insertFrom(const IColumn & src, size_t n) override;
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn & src, size_t n) override;
|
||||||
|
#endif
|
||||||
void insertFromFullColumn(const IColumn & src, size_t n);
|
void insertFromFullColumn(const IColumn & src, size_t n);
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#endif
|
||||||
void insertRangeFromFullColumn(const IColumn & src, size_t start, size_t length);
|
void insertRangeFromFullColumn(const IColumn & src, size_t start, size_t length);
|
||||||
void insertRangeFromDictionaryEncodedColumn(const IColumn & keys, const IColumn & positions);
|
void insertRangeFromDictionaryEncodedColumn(const IColumn & keys, const IColumn & positions);
|
||||||
|
|
||||||
@ -127,7 +135,11 @@ public:
|
|||||||
return ColumnLowCardinality::create(dictionary.getColumnUniquePtr(), getIndexes().index(indexes_, limit));
|
return ColumnLowCardinality::create(dictionary.getColumnUniquePtr(), getIndexes().index(indexes_, limit));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||||
|
#endif
|
||||||
|
|
||||||
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator &) const override;
|
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator &) const override;
|
||||||
|
|
||||||
|
@ -153,17 +153,29 @@ void ColumnMap::updateHashFast(SipHash & hash) const
|
|||||||
nested->updateHashFast(hash);
|
nested->updateHashFast(hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnMap::insertFrom(const IColumn & src, size_t n)
|
void ColumnMap::insertFrom(const IColumn & src, size_t n)
|
||||||
|
#else
|
||||||
|
void ColumnMap::doInsertFrom(const IColumn & src, size_t n)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
nested->insertFrom(assert_cast<const ColumnMap &>(src).getNestedColumn(), n);
|
nested->insertFrom(assert_cast<const ColumnMap &>(src).getNestedColumn(), n);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnMap::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
void ColumnMap::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnMap::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
assert_cast<ColumnArray &>(*nested).insertManyFrom(assert_cast<const ColumnMap &>(src).getNestedColumn(), position, length);
|
assert_cast<ColumnArray &>(*nested).insertManyFrom(assert_cast<const ColumnMap &>(src).getNestedColumn(), position, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnMap::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
void ColumnMap::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnMap::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
nested->insertRangeFrom(
|
nested->insertRangeFrom(
|
||||||
assert_cast<const ColumnMap &>(src).getNestedColumn(),
|
assert_cast<const ColumnMap &>(src).getNestedColumn(),
|
||||||
@ -210,7 +222,11 @@ MutableColumns ColumnMap::scatter(ColumnIndex num_columns, const Selector & sele
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int ColumnMap::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
int ColumnMap::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||||
|
#else
|
||||||
|
int ColumnMap::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const auto & rhs_map = assert_cast<const ColumnMap &>(rhs);
|
const auto & rhs_map = assert_cast<const ColumnMap &>(rhs);
|
||||||
return nested->compareAt(n, m, rhs_map.getNestedColumn(), nan_direction_hint);
|
return nested->compareAt(n, m, rhs_map.getNestedColumn(), nan_direction_hint);
|
||||||
|
@ -66,16 +66,28 @@ public:
|
|||||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||||
void updateHashFast(SipHash & hash) const override;
|
void updateHashFast(SipHash & hash) const override;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & src_, size_t n) override;
|
void insertFrom(const IColumn & src_, size_t n) override;
|
||||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn & src_, size_t n) override;
|
||||||
|
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||||
|
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
|
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
|
||||||
void expand(const Filter & mask, bool inverted) override;
|
void expand(const Filter & mask, bool inverted) override;
|
||||||
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
|
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
|
||||||
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
||||||
ColumnPtr replicate(const Offsets & offsets) const override;
|
ColumnPtr replicate(const Offsets & offsets) const override;
|
||||||
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
|
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||||
|
#endif
|
||||||
void getExtremes(Field & min, Field & max) const override;
|
void getExtremes(Field & min, Field & max) const override;
|
||||||
void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
||||||
size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override;
|
size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override;
|
||||||
|
@ -221,7 +221,11 @@ const char * ColumnNullable::skipSerializedInArena(const char * pos) const
|
|||||||
return pos;
|
return pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnNullable::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
void ColumnNullable::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnNullable::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnNullable & nullable_col = assert_cast<const ColumnNullable &>(src);
|
const ColumnNullable & nullable_col = assert_cast<const ColumnNullable &>(src);
|
||||||
getNullMapColumn().insertRangeFrom(*nullable_col.null_map, start, length);
|
getNullMapColumn().insertRangeFrom(*nullable_col.null_map, start, length);
|
||||||
@ -258,7 +262,11 @@ bool ColumnNullable::tryInsert(const Field & x)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnNullable::insertFrom(const IColumn & src, size_t n)
|
void ColumnNullable::insertFrom(const IColumn & src, size_t n)
|
||||||
|
#else
|
||||||
|
void ColumnNullable::doInsertFrom(const IColumn & src, size_t n)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnNullable & src_concrete = assert_cast<const ColumnNullable &>(src);
|
const ColumnNullable & src_concrete = assert_cast<const ColumnNullable &>(src);
|
||||||
getNestedColumn().insertFrom(src_concrete.getNestedColumn(), n);
|
getNestedColumn().insertFrom(src_concrete.getNestedColumn(), n);
|
||||||
@ -266,7 +274,11 @@ void ColumnNullable::insertFrom(const IColumn & src, size_t n)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnNullable::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
void ColumnNullable::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnNullable::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnNullable & src_concrete = assert_cast<const ColumnNullable &>(src);
|
const ColumnNullable & src_concrete = assert_cast<const ColumnNullable &>(src);
|
||||||
getNestedColumn().insertManyFrom(src_concrete.getNestedColumn(), position, length);
|
getNestedColumn().insertManyFrom(src_concrete.getNestedColumn(), position, length);
|
||||||
@ -402,7 +414,11 @@ int ColumnNullable::compareAtImpl(size_t n, size_t m, const IColumn & rhs_, int
|
|||||||
return getNestedColumn().compareAt(n, m, nested_rhs, null_direction_hint);
|
return getNestedColumn().compareAt(n, m, nested_rhs, null_direction_hint);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int ColumnNullable::compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
|
int ColumnNullable::compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
|
||||||
|
#else
|
||||||
|
int ColumnNullable::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
return compareAtImpl(n, m, rhs_, null_direction_hint);
|
return compareAtImpl(n, m, rhs_, null_direction_hint);
|
||||||
}
|
}
|
||||||
|
@ -69,11 +69,21 @@ public:
|
|||||||
char * serializeValueIntoMemory(size_t n, char * memory) const override;
|
char * serializeValueIntoMemory(size_t n, char * memory) const override;
|
||||||
const char * deserializeAndInsertFromArena(const char * pos) override;
|
const char * deserializeAndInsertFromArena(const char * pos) override;
|
||||||
const char * skipSerializedInArena(const char * pos) const override;
|
const char * skipSerializedInArena(const char * pos) const override;
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#endif
|
||||||
void insert(const Field & x) override;
|
void insert(const Field & x) override;
|
||||||
bool tryInsert(const Field & x) override;
|
bool tryInsert(const Field & x) override;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & src, size_t n) override;
|
void insertFrom(const IColumn & src, size_t n) override;
|
||||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn & src, size_t n) override;
|
||||||
|
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
void insertFromNotNullable(const IColumn & src, size_t n);
|
void insertFromNotNullable(const IColumn & src, size_t n);
|
||||||
void insertRangeFromNotNullable(const IColumn & src, size_t start, size_t length);
|
void insertRangeFromNotNullable(const IColumn & src, size_t start, size_t length);
|
||||||
@ -90,7 +100,11 @@ public:
|
|||||||
void expand(const Filter & mask, bool inverted) override;
|
void expand(const Filter & mask, bool inverted) override;
|
||||||
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
|
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
|
||||||
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
|
int compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
|
||||||
|
#endif
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
|
||||||
|
@ -763,12 +763,20 @@ void ColumnObject::get(size_t n, Field & res) const
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnObject::insertFrom(const IColumn & src, size_t n)
|
void ColumnObject::insertFrom(const IColumn & src, size_t n)
|
||||||
|
#else
|
||||||
|
void ColumnObject::doInsertFrom(const IColumn & src, size_t n)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
insert(src[n]);
|
insert(src[n]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnObject::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
void ColumnObject::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnObject::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const auto & src_object = assert_cast<const ColumnObject &>(src);
|
const auto & src_object = assert_cast<const ColumnObject &>(src);
|
||||||
|
|
||||||
|
@ -209,8 +209,15 @@ public:
|
|||||||
void insert(const Field & field) override;
|
void insert(const Field & field) override;
|
||||||
bool tryInsert(const Field & field) override;
|
bool tryInsert(const Field & field) override;
|
||||||
void insertDefault() override;
|
void insertDefault() override;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & src, size_t n) override;
|
void insertFrom(const IColumn & src, size_t n) override;
|
||||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn & src, size_t n) override;
|
||||||
|
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
void popBack(size_t length) override;
|
void popBack(size_t length) override;
|
||||||
Field operator[](size_t n) const override;
|
Field operator[](size_t n) const override;
|
||||||
void get(size_t n, Field & res) const override;
|
void get(size_t n, Field & res) const override;
|
||||||
@ -228,7 +235,11 @@ public:
|
|||||||
/// Order of rows in ColumnObject is undefined.
|
/// Order of rows in ColumnObject is undefined.
|
||||||
void getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const override;
|
void getPermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation & res) const override;
|
||||||
void updatePermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &, EqualRanges &) const override {}
|
void updatePermutation(PermutationSortDirection, PermutationSortStability, size_t, int, Permutation &, EqualRanges &) const override {}
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
|
int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
|
||||||
|
#endif
|
||||||
void getExtremes(Field & min, Field & max) const override;
|
void getExtremes(Field & min, Field & max) const override;
|
||||||
|
|
||||||
/// All other methods throw exception.
|
/// All other methods throw exception.
|
||||||
|
@ -174,7 +174,11 @@ const char * ColumnSparse::skipSerializedInArena(const char * pos) const
|
|||||||
return values->skipSerializedInArena(pos);
|
return values->skipSerializedInArena(pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnSparse::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
void ColumnSparse::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnSparse::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
if (length == 0)
|
if (length == 0)
|
||||||
return;
|
return;
|
||||||
@ -248,7 +252,11 @@ bool ColumnSparse::tryInsert(const Field & x)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnSparse::insertFrom(const IColumn & src, size_t n)
|
void ColumnSparse::insertFrom(const IColumn & src, size_t n)
|
||||||
|
#else
|
||||||
|
void ColumnSparse::doInsertFrom(const IColumn & src, size_t n)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
if (const auto * src_sparse = typeid_cast<const ColumnSparse *>(&src))
|
if (const auto * src_sparse = typeid_cast<const ColumnSparse *>(&src))
|
||||||
{
|
{
|
||||||
@ -446,7 +454,11 @@ ColumnPtr ColumnSparse::indexImpl(const PaddedPODArray<Type> & indexes, size_t l
|
|||||||
return ColumnSparse::create(std::move(res_values), std::move(res_offsets), limit);
|
return ColumnSparse::create(std::move(res_values), std::move(res_offsets), limit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int ColumnSparse::compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
|
int ColumnSparse::compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
|
||||||
|
#else
|
||||||
|
int ColumnSparse::doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
if (const auto * rhs_sparse = typeid_cast<const ColumnSparse *>(&rhs_))
|
if (const auto * rhs_sparse = typeid_cast<const ColumnSparse *>(&rhs_))
|
||||||
return values->compareAt(getValueIndex(n), rhs_sparse->getValueIndex(m), rhs_sparse->getValuesColumn(), null_direction_hint);
|
return values->compareAt(getValueIndex(n), rhs_sparse->getValueIndex(m), rhs_sparse->getValuesColumn(), null_direction_hint);
|
||||||
|
@ -81,10 +81,18 @@ public:
|
|||||||
char * serializeValueIntoMemory(size_t n, char * memory) const override;
|
char * serializeValueIntoMemory(size_t n, char * memory) const override;
|
||||||
const char * deserializeAndInsertFromArena(const char * pos) override;
|
const char * deserializeAndInsertFromArena(const char * pos) override;
|
||||||
const char * skipSerializedInArena(const char *) const override;
|
const char * skipSerializedInArena(const char *) const override;
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#endif
|
||||||
void insert(const Field & x) override;
|
void insert(const Field & x) override;
|
||||||
bool tryInsert(const Field & x) override;
|
bool tryInsert(const Field & x) override;
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & src, size_t n) override;
|
void insertFrom(const IColumn & src, size_t n) override;
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn & src, size_t n) override;
|
||||||
|
#endif
|
||||||
void insertDefault() override;
|
void insertDefault() override;
|
||||||
void insertManyDefaults(size_t length) override;
|
void insertManyDefaults(size_t length) override;
|
||||||
|
|
||||||
@ -98,7 +106,11 @@ public:
|
|||||||
template <typename Type>
|
template <typename Type>
|
||||||
ColumnPtr indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const;
|
ColumnPtr indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
|
int compareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int null_direction_hint) const override;
|
||||||
|
#endif
|
||||||
void compareColumn(const IColumn & rhs, size_t rhs_row_num,
|
void compareColumn(const IColumn & rhs, size_t rhs_row_num,
|
||||||
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
|
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
|
||||||
int direction, int nan_direction_hint) const override;
|
int direction, int nan_direction_hint) const override;
|
||||||
|
@ -39,7 +39,11 @@ ColumnString::ColumnString(const ColumnString & src)
|
|||||||
last_offset, chars.size());
|
last_offset, chars.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnString::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
void ColumnString::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnString::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnString & src_concrete = assert_cast<const ColumnString &>(src);
|
const ColumnString & src_concrete = assert_cast<const ColumnString &>(src);
|
||||||
const UInt8 * src_buf = &src_concrete.chars[src_concrete.offsets[position - 1]];
|
const UInt8 * src_buf = &src_concrete.chars[src_concrete.offsets[position - 1]];
|
||||||
@ -129,7 +133,11 @@ void ColumnString::updateWeakHash32(WeakHash32 & hash) const
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnString::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
void ColumnString::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnString::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
if (length == 0)
|
if (length == 0)
|
||||||
return;
|
return;
|
||||||
|
@ -142,7 +142,11 @@ public:
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & src_, size_t n) override
|
void insertFrom(const IColumn & src_, size_t n) override
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn & src_, size_t n) override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnString & src = assert_cast<const ColumnString &>(src_);
|
const ColumnString & src = assert_cast<const ColumnString &>(src_);
|
||||||
const size_t size_to_append = src.offsets[n] - src.offsets[n - 1]; /// -1th index is Ok, see PaddedPODArray.
|
const size_t size_to_append = src.offsets[n] - src.offsets[n - 1]; /// -1th index is Ok, see PaddedPODArray.
|
||||||
@ -165,7 +169,11 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
void insertData(const char * pos, size_t length) override
|
void insertData(const char * pos, size_t length) override
|
||||||
{
|
{
|
||||||
@ -212,7 +220,11 @@ public:
|
|||||||
hash.update(reinterpret_cast<const char *>(chars.data()), chars.size() * sizeof(chars[0]));
|
hash.update(reinterpret_cast<const char *>(chars.data()), chars.size() * sizeof(chars[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
|
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
|
||||||
|
|
||||||
@ -238,7 +250,11 @@ public:
|
|||||||
offsets.push_back(offsets.back() + 1);
|
offsets.push_back(offsets.back() + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int /*nan_direction_hint*/) const override
|
int compareAt(size_t n, size_t m, const IColumn & rhs_, int /*nan_direction_hint*/) const override
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int /*nan_direction_hint*/) const override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnString & rhs = assert_cast<const ColumnString &>(rhs_);
|
const ColumnString & rhs = assert_cast<const ColumnString &>(rhs_);
|
||||||
return memcmpSmallAllowOverflow15(chars.data() + offsetAt(n), sizeAt(n) - 1, rhs.chars.data() + rhs.offsetAt(m), rhs.sizeAt(m) - 1);
|
return memcmpSmallAllowOverflow15(chars.data() + offsetAt(n), sizeAt(n) - 1, rhs.chars.data() + rhs.offsetAt(m), rhs.sizeAt(m) - 1);
|
||||||
|
@ -205,7 +205,11 @@ bool ColumnTuple::tryInsert(const Field & x)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnTuple::insertFrom(const IColumn & src_, size_t n)
|
void ColumnTuple::insertFrom(const IColumn & src_, size_t n)
|
||||||
|
#else
|
||||||
|
void ColumnTuple::doInsertFrom(const IColumn & src_, size_t n)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnTuple & src = assert_cast<const ColumnTuple &>(src_);
|
const ColumnTuple & src = assert_cast<const ColumnTuple &>(src_);
|
||||||
|
|
||||||
@ -218,7 +222,11 @@ void ColumnTuple::insertFrom(const IColumn & src_, size_t n)
|
|||||||
columns[i]->insertFrom(*src.columns[i], n);
|
columns[i]->insertFrom(*src.columns[i], n);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnTuple::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
void ColumnTuple::insertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnTuple::doInsertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnTuple & src_tuple = assert_cast<const ColumnTuple &>(src);
|
const ColumnTuple & src_tuple = assert_cast<const ColumnTuple &>(src);
|
||||||
|
|
||||||
@ -318,7 +326,11 @@ void ColumnTuple::updateHashFast(SipHash & hash) const
|
|||||||
column->updateHashFast(hash);
|
column->updateHashFast(hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnTuple::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
void ColumnTuple::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnTuple::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
column_length += length;
|
column_length += length;
|
||||||
const size_t tuple_size = columns.size();
|
const size_t tuple_size = columns.size();
|
||||||
@ -470,7 +482,11 @@ int ColumnTuple::compareAtImpl(size_t n, size_t m, const IColumn & rhs, int nan_
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int ColumnTuple::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
int ColumnTuple::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||||
|
#else
|
||||||
|
int ColumnTuple::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
return compareAtImpl(n, m, rhs, nan_direction_hint);
|
return compareAtImpl(n, m, rhs, nan_direction_hint);
|
||||||
}
|
}
|
||||||
|
@ -65,8 +65,15 @@ public:
|
|||||||
void insertData(const char * pos, size_t length) override;
|
void insertData(const char * pos, size_t length) override;
|
||||||
void insert(const Field & x) override;
|
void insert(const Field & x) override;
|
||||||
bool tryInsert(const Field & x) override;
|
bool tryInsert(const Field & x) override;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & src_, size_t n) override;
|
void insertFrom(const IColumn & src_, size_t n) override;
|
||||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
void insertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn & src_, size_t n) override;
|
||||||
|
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
void insertDefault() override;
|
void insertDefault() override;
|
||||||
void popBack(size_t n) override;
|
void popBack(size_t n) override;
|
||||||
StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
|
StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
|
||||||
@ -76,14 +83,22 @@ public:
|
|||||||
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
void updateHashWithValue(size_t n, SipHash & hash) const override;
|
||||||
void updateWeakHash32(WeakHash32 & hash) const override;
|
void updateWeakHash32(WeakHash32 & hash) const override;
|
||||||
void updateHashFast(SipHash & hash) const override;
|
void updateHashFast(SipHash & hash) const override;
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#endif
|
||||||
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
|
ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override;
|
||||||
void expand(const Filter & mask, bool inverted) override;
|
void expand(const Filter & mask, bool inverted) override;
|
||||||
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
|
ColumnPtr permute(const Permutation & perm, size_t limit) const override;
|
||||||
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
ColumnPtr index(const IColumn & indexes, size_t limit) const override;
|
||||||
ColumnPtr replicate(const Offsets & offsets) const override;
|
ColumnPtr replicate(const Offsets & offsets) const override;
|
||||||
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
|
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||||
|
#endif
|
||||||
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator & collator) const override;
|
int compareAtWithCollation(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint, const Collator & collator) const override;
|
||||||
void getExtremes(Field & min, Field & max) const override;
|
void getExtremes(Field & min, Field & max) const override;
|
||||||
void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
||||||
|
@ -90,7 +90,11 @@ public:
|
|||||||
return getNestedColumn()->updateHashWithValue(n, hash_func);
|
return getNestedColumn()->updateHashWithValue(n, hash_func);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||||
|
#endif
|
||||||
|
|
||||||
void getExtremes(Field & min, Field & max) const override { column_holder->getExtremes(min, max); }
|
void getExtremes(Field & min, Field & max) const override { column_holder->getExtremes(min, max); }
|
||||||
bool valuesHaveFixedSize() const override { return column_holder->valuesHaveFixedSize(); }
|
bool valuesHaveFixedSize() const override { return column_holder->valuesHaveFixedSize(); }
|
||||||
@ -488,7 +492,11 @@ const char * ColumnUnique<ColumnType>::skipSerializedInArena(const char *) const
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename ColumnType>
|
template <typename ColumnType>
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int ColumnUnique<ColumnType>::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
int ColumnUnique<ColumnType>::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||||
|
#else
|
||||||
|
int ColumnUnique<ColumnType>::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
if (is_nullable)
|
if (is_nullable)
|
||||||
{
|
{
|
||||||
|
@ -595,17 +595,29 @@ void ColumnVariant::insertManyFromImpl(const DB::IColumn & src_, size_t position
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnVariant::insertFrom(const IColumn & src_, size_t n)
|
void ColumnVariant::insertFrom(const IColumn & src_, size_t n)
|
||||||
|
#else
|
||||||
|
void ColumnVariant::doInsertFrom(const IColumn & src_, size_t n)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
insertFromImpl(src_, n, nullptr);
|
insertFromImpl(src_, n, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnVariant::insertRangeFrom(const IColumn & src_, size_t start, size_t length)
|
void ColumnVariant::insertRangeFrom(const IColumn & src_, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnVariant::doInsertRangeFrom(const IColumn & src_, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
insertRangeFromImpl(src_, start, length, nullptr);
|
insertRangeFromImpl(src_, start, length, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnVariant::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length)
|
void ColumnVariant::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnVariant::doInsertManyFrom(const DB::IColumn & src_, size_t position, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
insertManyFromImpl(src_, position, length, nullptr);
|
insertManyFromImpl(src_, position, length, nullptr);
|
||||||
}
|
}
|
||||||
@ -1174,7 +1186,11 @@ bool ColumnVariant::hasEqualValues() const
|
|||||||
return local_discriminators->hasEqualValues() && variants[localDiscriminatorAt(0)]->hasEqualValues();
|
return local_discriminators->hasEqualValues() && variants[localDiscriminatorAt(0)]->hasEqualValues();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int ColumnVariant::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
int ColumnVariant::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||||
|
#else
|
||||||
|
int ColumnVariant::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const auto & rhs_variant = assert_cast<const ColumnVariant &>(rhs);
|
const auto & rhs_variant = assert_cast<const ColumnVariant &>(rhs);
|
||||||
Discriminator left_discr = globalDiscriminatorAt(n);
|
Discriminator left_discr = globalDiscriminatorAt(n);
|
||||||
|
@ -180,9 +180,19 @@ public:
|
|||||||
void insert(const Field & x) override;
|
void insert(const Field & x) override;
|
||||||
bool tryInsert(const Field & x) override;
|
bool tryInsert(const Field & x) override;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & src_, size_t n) override;
|
void insertFrom(const IColumn & src_, size_t n) override;
|
||||||
void insertRangeFrom(const IColumn & src_, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn & src_, size_t start, size_t length) override;
|
||||||
void insertManyFrom(const IColumn & src_, size_t position, size_t length) override;
|
void insertManyFrom(const IColumn & src_, size_t position, size_t length) override;
|
||||||
|
#else
|
||||||
|
using IColumn::insertFrom;
|
||||||
|
using IColumn::insertManyFrom;
|
||||||
|
using IColumn::insertRangeFrom;
|
||||||
|
|
||||||
|
void doInsertFrom(const IColumn & src_, size_t n) override;
|
||||||
|
void doInsertRangeFrom(const IColumn & src_, size_t start, size_t length) override;
|
||||||
|
void doInsertManyFrom(const IColumn & src_, size_t position, size_t length) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
/// Methods for insertion from another Variant but with known mapping between global discriminators.
|
/// Methods for insertion from another Variant but with known mapping between global discriminators.
|
||||||
void insertFrom(const IColumn & src_, size_t n, const std::vector<ColumnVariant::Discriminator> & global_discriminators_mapping);
|
void insertFrom(const IColumn & src_, size_t n, const std::vector<ColumnVariant::Discriminator> & global_discriminators_mapping);
|
||||||
@ -213,7 +223,11 @@ public:
|
|||||||
ColumnPtr indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const;
|
ColumnPtr indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const;
|
||||||
ColumnPtr replicate(const Offsets & replicate_offsets) const override;
|
ColumnPtr replicate(const Offsets & replicate_offsets) const override;
|
||||||
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
|
MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override;
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const override;
|
||||||
|
#endif
|
||||||
bool hasEqualValues() const override;
|
bool hasEqualValues() const override;
|
||||||
void getExtremes(Field & min, Field & max) const override;
|
void getExtremes(Field & min, Field & max) const override;
|
||||||
void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability,
|
||||||
|
@ -503,7 +503,11 @@ bool ColumnVector<T>::tryInsert(const DB::Field & x)
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void ColumnVector<T>::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
void ColumnVector<T>::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#else
|
||||||
|
void ColumnVector<T>::doInsertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
const ColumnVector & src_vec = assert_cast<const ColumnVector &>(src);
|
const ColumnVector & src_vec = assert_cast<const ColumnVector &>(src);
|
||||||
|
|
||||||
|
@ -64,12 +64,20 @@ public:
|
|||||||
return data.size();
|
return data.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn & src, size_t n) override
|
void insertFrom(const IColumn & src, size_t n) override
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn & src, size_t n) override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
data.push_back(assert_cast<const Self &>(src).getData()[n]);
|
data.push_back(assert_cast<const Self &>(src).getData()[n]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertManyFrom(const IColumn & src, size_t position, size_t length) override
|
void insertManyFrom(const IColumn & src, size_t position, size_t length) override
|
||||||
|
#else
|
||||||
|
void doInsertManyFrom(const IColumn & src, size_t position, size_t length) override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
ValueType v = assert_cast<const Self &>(src).getData()[position];
|
ValueType v = assert_cast<const Self &>(src).getData()[position];
|
||||||
data.resize_fill(data.size() + length, v);
|
data.resize_fill(data.size() + length, v);
|
||||||
@ -142,7 +150,11 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// This method implemented in header because it could be possibly devirtualized.
|
/// This method implemented in header because it could be possibly devirtualized.
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override
|
int compareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t n, size_t m, const IColumn & rhs_, int nan_direction_hint) const override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
return CompareHelper<T>::compare(data[n], assert_cast<const Self &>(rhs_).data[m], nan_direction_hint);
|
return CompareHelper<T>::compare(data[n], assert_cast<const Self &>(rhs_).data[m], nan_direction_hint);
|
||||||
}
|
}
|
||||||
@ -228,7 +240,11 @@ public:
|
|||||||
|
|
||||||
bool tryInsert(const DB::Field & x) override;
|
bool tryInsert(const DB::Field & x) override;
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) override;
|
||||||
|
#endif
|
||||||
|
|
||||||
ColumnPtr filter(const IColumn::Filter & filt, ssize_t result_size_hint) const override;
|
ColumnPtr filter(const IColumn::Filter & filt, ssize_t result_size_hint) const override;
|
||||||
|
|
||||||
|
@ -46,7 +46,11 @@ String IColumn::dumpStructure() const
|
|||||||
return res.str();
|
return res.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void IColumn::insertFrom(const IColumn & src, size_t n)
|
void IColumn::insertFrom(const IColumn & src, size_t n)
|
||||||
|
#else
|
||||||
|
void IColumn::doInsertFrom(const IColumn & src, size_t n)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
insert(src[n]);
|
insert(src[n]);
|
||||||
}
|
}
|
||||||
|
@ -1,15 +1,14 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Common/COW.h>
|
|
||||||
#include <Common/PODArray_fwd.h>
|
|
||||||
#include <Common/Exception.h>
|
|
||||||
#include <Common/typeid_cast.h>
|
|
||||||
#include <base/StringRef.h>
|
|
||||||
#include <Core/TypeId.h>
|
#include <Core/TypeId.h>
|
||||||
|
#include <base/StringRef.h>
|
||||||
|
#include <Common/COW.h>
|
||||||
|
#include <Common/Exception.h>
|
||||||
|
#include <Common/PODArray_fwd.h>
|
||||||
|
#include <Common/typeid_cast.h>
|
||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
|
|
||||||
class SipHash;
|
class SipHash;
|
||||||
class Collator;
|
class Collator;
|
||||||
|
|
||||||
@ -180,18 +179,42 @@ public:
|
|||||||
|
|
||||||
/// Appends n-th element from other column with the same type.
|
/// Appends n-th element from other column with the same type.
|
||||||
/// Is used in merge-sort and merges. It could be implemented in inherited classes more optimally than default implementation.
|
/// Is used in merge-sort and merges. It could be implemented in inherited classes more optimally than default implementation.
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
virtual void insertFrom(const IColumn & src, size_t n);
|
virtual void insertFrom(const IColumn & src, size_t n);
|
||||||
|
#else
|
||||||
|
void insertFrom(const IColumn & src, size_t n)
|
||||||
|
{
|
||||||
|
assertTypeEquality(src);
|
||||||
|
doInsertFrom(src, n);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/// Appends range of elements from other column with the same type.
|
/// Appends range of elements from other column with the same type.
|
||||||
/// Could be used to concatenate columns.
|
/// Could be used to concatenate columns.
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
virtual void insertRangeFrom(const IColumn & src, size_t start, size_t length) = 0;
|
virtual void insertRangeFrom(const IColumn & src, size_t start, size_t length) = 0;
|
||||||
|
#else
|
||||||
|
void insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
|
{
|
||||||
|
assertTypeEquality(src);
|
||||||
|
doInsertRangeFrom(src, start, length);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/// Appends one element from other column with the same type multiple times.
|
/// Appends one element from other column with the same type multiple times.
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
virtual void insertManyFrom(const IColumn & src, size_t position, size_t length)
|
virtual void insertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||||
{
|
{
|
||||||
for (size_t i = 0; i < length; ++i)
|
for (size_t i = 0; i < length; ++i)
|
||||||
insertFrom(src, position);
|
insertFrom(src, position);
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
void insertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||||
|
{
|
||||||
|
assertTypeEquality(src);
|
||||||
|
doInsertManyFrom(src, position, length);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/// Appends one field multiple times. Can be optimized in inherited classes.
|
/// Appends one field multiple times. Can be optimized in inherited classes.
|
||||||
virtual void insertMany(const Field & field, size_t length)
|
virtual void insertMany(const Field & field, size_t length)
|
||||||
@ -322,7 +345,15 @@ public:
|
|||||||
*
|
*
|
||||||
* For non Nullable and non floating point types, nan_direction_hint is ignored.
|
* For non Nullable and non floating point types, nan_direction_hint is ignored.
|
||||||
*/
|
*/
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
[[nodiscard]] virtual int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const = 0;
|
[[nodiscard]] virtual int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const = 0;
|
||||||
|
#else
|
||||||
|
[[nodiscard]] int compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const
|
||||||
|
{
|
||||||
|
assertTypeEquality(rhs);
|
||||||
|
return doCompareAt(n, m, rhs, nan_direction_hint);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
|
||||||
@ -610,6 +641,8 @@ public:
|
|||||||
|
|
||||||
[[nodiscard]] virtual bool isSparse() const { return false; }
|
[[nodiscard]] virtual bool isSparse() const { return false; }
|
||||||
|
|
||||||
|
[[nodiscard]] virtual bool isConst() const { return false; }
|
||||||
|
|
||||||
[[nodiscard]] virtual bool isCollationSupported() const { return false; }
|
[[nodiscard]] virtual bool isCollationSupported() const { return false; }
|
||||||
|
|
||||||
virtual ~IColumn() = default;
|
virtual ~IColumn() = default;
|
||||||
@ -633,6 +666,29 @@ protected:
|
|||||||
Equals equals,
|
Equals equals,
|
||||||
Sort full_sort,
|
Sort full_sort,
|
||||||
PartialSort partial_sort) const;
|
PartialSort partial_sort) const;
|
||||||
|
|
||||||
|
#if defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
|
virtual void doInsertFrom(const IColumn & src, size_t n);
|
||||||
|
|
||||||
|
virtual void doInsertRangeFrom(const IColumn & src, size_t start, size_t length) = 0;
|
||||||
|
|
||||||
|
virtual void doInsertManyFrom(const IColumn & src, size_t position, size_t length)
|
||||||
|
{
|
||||||
|
for (size_t i = 0; i < length; ++i)
|
||||||
|
insertFrom(src, position);
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual int doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const = 0;
|
||||||
|
|
||||||
|
private:
|
||||||
|
void assertTypeEquality(const IColumn & rhs) const
|
||||||
|
{
|
||||||
|
/// For Sparse and Const columns, we can compare only internal types. It is considered normal to e.g. insert from normal vector column to a sparse vector column.
|
||||||
|
/// This case is specifically handled in ColumnSparse implementation. Similar situation with Const column.
|
||||||
|
/// For the rest of column types we can compare the types directly.
|
||||||
|
chassert((isConst() || isSparse()) ? getDataType() == rhs.getDataType() : typeid(*this) == typeid(rhs));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
using ColumnPtr = IColumn::Ptr;
|
using ColumnPtr = IColumn::Ptr;
|
||||||
|
@ -26,7 +26,11 @@ public:
|
|||||||
size_t byteSize() const override { return 0; }
|
size_t byteSize() const override { return 0; }
|
||||||
size_t byteSizeAt(size_t) const override { return 0; }
|
size_t byteSizeAt(size_t) const override { return 0; }
|
||||||
size_t allocatedBytes() const override { return 0; }
|
size_t allocatedBytes() const override { return 0; }
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
|
int compareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
|
||||||
|
#else
|
||||||
|
int doCompareAt(size_t, size_t, const IColumn &, int) const override { return 0; }
|
||||||
|
#endif
|
||||||
void compareColumn(const IColumn &, size_t, PaddedPODArray<UInt64> *, PaddedPODArray<Int8> &, int, int) const override
|
void compareColumn(const IColumn &, size_t, PaddedPODArray<UInt64> *, PaddedPODArray<Int8> &, int, int) const override
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -67,12 +71,20 @@ public:
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertFrom(const IColumn &, size_t) override
|
void insertFrom(const IColumn &, size_t) override
|
||||||
|
#else
|
||||||
|
void doInsertFrom(const IColumn &, size_t) override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
++s;
|
++s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn & /*src*/, size_t /*start*/, size_t length) override
|
void insertRangeFrom(const IColumn & /*src*/, size_t /*start*/, size_t length) override
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn & /*src*/, size_t /*start*/, size_t length) override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
s += length;
|
s += length;
|
||||||
}
|
}
|
||||||
|
@ -85,7 +85,11 @@ public:
|
|||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method tryInsert is not supported for ColumnUnique.");
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method tryInsert is not supported for ColumnUnique.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
void insertRangeFrom(const IColumn &, size_t, size_t) override
|
void insertRangeFrom(const IColumn &, size_t, size_t) override
|
||||||
|
#else
|
||||||
|
void doInsertRangeFrom(const IColumn &, size_t, size_t) override
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method insertRangeFrom is not supported for ColumnUnique.");
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method insertRangeFrom is not supported for ColumnUnique.");
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,11 @@ static ColumnPtr mockColumn(const DataTypePtr & type, size_t rows)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#if !defined(ABORT_ON_LOGICAL_ERROR)
|
||||||
static NO_INLINE void insertManyFrom(IColumn & dst, const IColumn & src)
|
static NO_INLINE void insertManyFrom(IColumn & dst, const IColumn & src)
|
||||||
|
#else
|
||||||
|
static NO_INLINE void doInsertManyFrom(IColumn & dst, const IColumn & src)
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
size_t size = src.size();
|
size_t size = src.size();
|
||||||
dst.insertManyFrom(src, size / 2, size);
|
dst.insertManyFrom(src, size / 2, size);
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <mutex>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
#include <list>
|
#include <list>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
@ -38,10 +38,19 @@ namespace ErrorCodes
|
|||||||
extern const int CANNOT_MREMAP;
|
extern const int CANNOT_MREMAP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void abortOnFailedAssertion(const String & description, void * const * trace, size_t trace_offset, size_t trace_size)
|
||||||
|
{
|
||||||
|
auto & logger = Poco::Logger::root();
|
||||||
|
LOG_FATAL(&logger, "Logical error: '{}'.", description);
|
||||||
|
if (trace)
|
||||||
|
LOG_FATAL(&logger, "Stack trace (when copying this message, always include the lines below):\n\n{}", StackTrace::toString(trace, trace_offset, trace_size));
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
|
||||||
void abortOnFailedAssertion(const String & description)
|
void abortOnFailedAssertion(const String & description)
|
||||||
{
|
{
|
||||||
LOG_FATAL(&Poco::Logger::root(), "Logical error: '{}'.", description);
|
StackTrace st;
|
||||||
abort();
|
abortOnFailedAssertion(description, st.getFramePointers().data(), st.getOffset(), st.getSize());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool terminate_on_any_exception = false;
|
bool terminate_on_any_exception = false;
|
||||||
@ -58,7 +67,7 @@ void handle_error_code(const std::string & msg, int code, bool remote, const Exc
|
|||||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||||
if (code == ErrorCodes::LOGICAL_ERROR)
|
if (code == ErrorCodes::LOGICAL_ERROR)
|
||||||
{
|
{
|
||||||
abortOnFailedAssertion(msg);
|
abortOnFailedAssertion(msg, trace.data(), 0, trace.size());
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -25,8 +25,6 @@ namespace DB
|
|||||||
|
|
||||||
class AtomicLogger;
|
class AtomicLogger;
|
||||||
|
|
||||||
[[noreturn]] void abortOnFailedAssertion(const String & description);
|
|
||||||
|
|
||||||
/// This flag can be set for testing purposes - to check that no exceptions are thrown.
|
/// This flag can be set for testing purposes - to check that no exceptions are thrown.
|
||||||
extern bool terminate_on_any_exception;
|
extern bool terminate_on_any_exception;
|
||||||
|
|
||||||
@ -167,6 +165,8 @@ protected:
|
|||||||
mutable std::vector<StackTrace::FramePointers> capture_thread_frame_pointers;
|
mutable std::vector<StackTrace::FramePointers> capture_thread_frame_pointers;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
[[noreturn]] void abortOnFailedAssertion(const String & description, void * const * trace, size_t trace_offset, size_t trace_size);
|
||||||
|
[[noreturn]] void abortOnFailedAssertion(const String & description);
|
||||||
|
|
||||||
std::string getExceptionStackTraceString(const std::exception & e);
|
std::string getExceptionStackTraceString(const std::exception & e);
|
||||||
std::string getExceptionStackTraceString(std::exception_ptr e);
|
std::string getExceptionStackTraceString(std::exception_ptr e);
|
||||||
|
389
src/Common/FieldBinaryEncoding.cpp
Normal file
389
src/Common/FieldBinaryEncoding.cpp
Normal file
@ -0,0 +1,389 @@
|
|||||||
|
#include <Common/FieldBinaryEncoding.h>
|
||||||
|
#include <IO/WriteHelpers.h>
|
||||||
|
#include <IO/ReadHelpers.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int UNSUPPORTED_METHOD;
|
||||||
|
extern const int INCORRECT_DATA;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
enum class FieldBinaryTypeIndex: uint8_t
|
||||||
|
{
|
||||||
|
Null = 0x00,
|
||||||
|
UInt64 = 0x01,
|
||||||
|
Int64 = 0x02,
|
||||||
|
UInt128 = 0x03,
|
||||||
|
Int128 = 0x04,
|
||||||
|
UInt256 = 0x05,
|
||||||
|
Int256 = 0x06,
|
||||||
|
Float64 = 0x07,
|
||||||
|
Decimal32 = 0x08,
|
||||||
|
Decimal64 = 0x09,
|
||||||
|
Decimal128 = 0x0A,
|
||||||
|
Decimal256 = 0x0B,
|
||||||
|
String = 0x0C,
|
||||||
|
Array = 0x0D,
|
||||||
|
Tuple = 0x0E,
|
||||||
|
Map = 0x0F,
|
||||||
|
IPv4 = 0x10,
|
||||||
|
IPv6 = 0x11,
|
||||||
|
UUID = 0x12,
|
||||||
|
Bool = 0x13,
|
||||||
|
Object = 0x14,
|
||||||
|
AggregateFunctionState = 0x15,
|
||||||
|
|
||||||
|
NegativeInfinity = 0xFE,
|
||||||
|
PositiveInfinity = 0xFF,
|
||||||
|
};
|
||||||
|
|
||||||
|
class FieldVisitorEncodeBinary
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
void operator() (const Null & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const UInt64 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const UInt128 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const UInt256 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Int64 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Int128 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Int256 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const UUID & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const IPv4 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const IPv6 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Float64 & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const String & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Array & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Tuple & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Map & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const Object & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const DecimalField<Decimal32> & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const DecimalField<Decimal64> & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const DecimalField<Decimal128> & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const DecimalField<Decimal256> & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const AggregateFunctionStateData & x, WriteBuffer & buf) const;
|
||||||
|
[[noreturn]] void operator() (const CustomType & x, WriteBuffer & buf) const;
|
||||||
|
void operator() (const bool & x, WriteBuffer & buf) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const Null & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
if (x.isNull())
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::Null), buf);
|
||||||
|
else if (x.isPositiveInfinity())
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::PositiveInfinity), buf);
|
||||||
|
else if (x.isNegativeInfinity())
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::NegativeInfinity), buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const UInt64 & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::UInt64), buf);
|
||||||
|
writeVarUInt(x, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const Int64 & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::Int64), buf);
|
||||||
|
writeVarInt(x, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const Float64 & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::Float64), buf);
|
||||||
|
writeBinaryLittleEndian(x, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const String & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::String), buf);
|
||||||
|
writeStringBinary(x, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const UInt128 & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::UInt128), buf);
|
||||||
|
writeBinaryLittleEndian(x, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const Int128 & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::Int128), buf);
|
||||||
|
writeBinaryLittleEndian(x, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const UInt256 & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::UInt256), buf);
|
||||||
|
writeBinaryLittleEndian(x, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const Int256 & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::Int256), buf);
|
||||||
|
writeBinaryLittleEndian(x, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const UUID & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::UUID), buf);
|
||||||
|
writeBinaryLittleEndian(x, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const IPv4 & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::IPv4), buf);
|
||||||
|
writeBinaryLittleEndian(x, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const IPv6 & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::IPv6), buf);
|
||||||
|
writeBinaryLittleEndian(x, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const DecimalField<Decimal32> & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::Decimal32), buf);
|
||||||
|
writeVarUInt(x.getScale(), buf);
|
||||||
|
writeBinaryLittleEndian(x.getValue(), buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const DecimalField<Decimal64> & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::Decimal64), buf);
|
||||||
|
writeVarUInt(x.getScale(), buf);
|
||||||
|
writeBinaryLittleEndian(x.getValue(), buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const DecimalField<Decimal128> & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::Decimal128), buf);
|
||||||
|
writeVarUInt(x.getScale(), buf);
|
||||||
|
writeBinaryLittleEndian(x.getValue(), buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const DecimalField<Decimal256> & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::Decimal256), buf);
|
||||||
|
writeVarUInt(x.getScale(), buf);
|
||||||
|
writeBinaryLittleEndian(x.getValue(), buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const AggregateFunctionStateData & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::AggregateFunctionState), buf);
|
||||||
|
writeStringBinary(x.name, buf);
|
||||||
|
writeStringBinary(x.data, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const Array & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::Array), buf);
|
||||||
|
size_t size = x.size();
|
||||||
|
writeVarUInt(size, buf);
|
||||||
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, x[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const Tuple & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::Tuple), buf);
|
||||||
|
size_t size = x.size();
|
||||||
|
writeVarUInt(size, buf);
|
||||||
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, x[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const Map & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::Map), buf);
|
||||||
|
size_t size = x.size();
|
||||||
|
writeVarUInt(size, buf);
|
||||||
|
for (size_t i = 0; i < size; ++i)
|
||||||
|
{
|
||||||
|
const Tuple & key_and_value = x[i].get<Tuple>();
|
||||||
|
Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, key_and_value[0]);
|
||||||
|
Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, key_and_value[1]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator() (const Object & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::Object), buf);
|
||||||
|
|
||||||
|
size_t size = x.size();
|
||||||
|
writeVarUInt(size, buf);
|
||||||
|
for (const auto & [key, value] : x)
|
||||||
|
{
|
||||||
|
writeStringBinary(key, buf);
|
||||||
|
Field::dispatch([&buf] (const auto & val) { FieldVisitorEncodeBinary()(val, buf); }, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void FieldVisitorEncodeBinary::operator()(const bool & x, WriteBuffer & buf) const
|
||||||
|
{
|
||||||
|
writeBinary(UInt8(FieldBinaryTypeIndex::Bool), buf);
|
||||||
|
writeBinary(static_cast<UInt8>(x), buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
[[noreturn]] void FieldVisitorEncodeBinary::operator()(const CustomType &, WriteBuffer &) const
|
||||||
|
{
|
||||||
|
/// TODO: Support binary encoding/decoding for custom types somehow.
|
||||||
|
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Binary encoding of Field with custom type is not supported");
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
Field decodeBigInteger(ReadBuffer & buf)
|
||||||
|
{
|
||||||
|
T value;
|
||||||
|
readBinaryLittleEndian(value, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
DecimalField<T> decodeDecimal(ReadBuffer & buf)
|
||||||
|
{
|
||||||
|
UInt32 scale;
|
||||||
|
readVarUInt(scale, buf);
|
||||||
|
T value;
|
||||||
|
readBinaryLittleEndian(value, buf);
|
||||||
|
return DecimalField<T>(value, scale);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
T decodeValueLittleEndian(ReadBuffer & buf)
|
||||||
|
{
|
||||||
|
T value;
|
||||||
|
readBinaryLittleEndian(value, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
T decodeArrayLikeField(ReadBuffer & buf)
|
||||||
|
{
|
||||||
|
size_t size;
|
||||||
|
readVarUInt(size, buf);
|
||||||
|
T value;
|
||||||
|
for (size_t i = 0; i != size; ++i)
|
||||||
|
value.push_back(decodeField(buf));
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
void encodeField(const Field & x, WriteBuffer & buf)
|
||||||
|
{
|
||||||
|
Field::dispatch([&buf] (const auto & val) { FieldVisitorEncodeBinary()(val, buf); }, x);
|
||||||
|
}
|
||||||
|
|
||||||
|
Field decodeField(ReadBuffer & buf)
|
||||||
|
{
|
||||||
|
UInt8 type;
|
||||||
|
readBinary(type, buf);
|
||||||
|
switch (FieldBinaryTypeIndex(type))
|
||||||
|
{
|
||||||
|
case FieldBinaryTypeIndex::Null:
|
||||||
|
return Null();
|
||||||
|
case FieldBinaryTypeIndex::PositiveInfinity:
|
||||||
|
return POSITIVE_INFINITY;
|
||||||
|
case FieldBinaryTypeIndex::NegativeInfinity:
|
||||||
|
return NEGATIVE_INFINITY;
|
||||||
|
case FieldBinaryTypeIndex::Int64:
|
||||||
|
{
|
||||||
|
Int64 value;
|
||||||
|
readVarInt(value, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
case FieldBinaryTypeIndex::UInt64:
|
||||||
|
{
|
||||||
|
UInt64 value;
|
||||||
|
readVarUInt(value, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
case FieldBinaryTypeIndex::Int128:
|
||||||
|
return decodeBigInteger<Int128>(buf);
|
||||||
|
case FieldBinaryTypeIndex::UInt128:
|
||||||
|
return decodeBigInteger<UInt128>(buf);
|
||||||
|
case FieldBinaryTypeIndex::Int256:
|
||||||
|
return decodeBigInteger<Int256>(buf);
|
||||||
|
case FieldBinaryTypeIndex::UInt256:
|
||||||
|
return decodeBigInteger<UInt256>(buf);
|
||||||
|
case FieldBinaryTypeIndex::Float64:
|
||||||
|
return decodeValueLittleEndian<Float64>(buf);
|
||||||
|
case FieldBinaryTypeIndex::Decimal32:
|
||||||
|
return decodeDecimal<Decimal32>(buf);
|
||||||
|
case FieldBinaryTypeIndex::Decimal64:
|
||||||
|
return decodeDecimal<Decimal64>(buf);
|
||||||
|
case FieldBinaryTypeIndex::Decimal128:
|
||||||
|
return decodeDecimal<Decimal128>(buf);
|
||||||
|
case FieldBinaryTypeIndex::Decimal256:
|
||||||
|
return decodeDecimal<Decimal256>(buf);
|
||||||
|
case FieldBinaryTypeIndex::String:
|
||||||
|
{
|
||||||
|
String value;
|
||||||
|
readStringBinary(value, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
case FieldBinaryTypeIndex::UUID:
|
||||||
|
return decodeValueLittleEndian<UUID>(buf);
|
||||||
|
case FieldBinaryTypeIndex::IPv4:
|
||||||
|
return decodeValueLittleEndian<IPv4>(buf);
|
||||||
|
case FieldBinaryTypeIndex::IPv6:
|
||||||
|
return decodeValueLittleEndian<IPv6>(buf);
|
||||||
|
case FieldBinaryTypeIndex::Bool:
|
||||||
|
{
|
||||||
|
bool value;
|
||||||
|
readBinary(value, buf);
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
case FieldBinaryTypeIndex::Array:
|
||||||
|
return decodeArrayLikeField<Array>(buf);
|
||||||
|
case FieldBinaryTypeIndex::Tuple:
|
||||||
|
return decodeArrayLikeField<Tuple>(buf);
|
||||||
|
case FieldBinaryTypeIndex::Map:
|
||||||
|
{
|
||||||
|
size_t size;
|
||||||
|
readVarUInt(size, buf);
|
||||||
|
Map map;
|
||||||
|
for (size_t i = 0; i != size; ++i)
|
||||||
|
{
|
||||||
|
Tuple key_and_value;
|
||||||
|
key_and_value.push_back(decodeField(buf));
|
||||||
|
key_and_value.push_back(decodeField(buf));
|
||||||
|
map.push_back(key_and_value);
|
||||||
|
}
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
case FieldBinaryTypeIndex::Object:
|
||||||
|
{
|
||||||
|
size_t size;
|
||||||
|
readVarUInt(size, buf);
|
||||||
|
Object value;
|
||||||
|
for (size_t i = 0; i != size; ++i)
|
||||||
|
{
|
||||||
|
String name;
|
||||||
|
readStringBinary(name, buf);
|
||||||
|
value[name] = decodeField(buf);
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
case FieldBinaryTypeIndex::AggregateFunctionState:
|
||||||
|
{
|
||||||
|
String name;
|
||||||
|
readStringBinary(name, buf);
|
||||||
|
String data;
|
||||||
|
readStringBinary(data, buf);
|
||||||
|
return AggregateFunctionStateData{.name = name, .data = data};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
throw Exception(ErrorCodes::INCORRECT_DATA, "Unknown Field type: {0:#04x}", UInt64(type));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
43
src/Common/FieldBinaryEncoding.h
Normal file
43
src/Common/FieldBinaryEncoding.h
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Core/Field.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/**
|
||||||
|
Binary encoding for Fields:
|
||||||
|
|--------------------------|--------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| Field type | Binary encoding |
|
||||||
|
|--------------------------|--------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `Null` | `0x00` |
|
||||||
|
| `UInt64` | `0x01<var_uint_value>` |
|
||||||
|
| `Int64` | `0x02<var_int_value>` |
|
||||||
|
| `UInt128` | `0x03<uint128_little_endian_value>` |
|
||||||
|
| `Int128` | `0x04<int128_little_endian_value>` |
|
||||||
|
| `UInt128` | `0x05<uint128_little_endian_value>` |
|
||||||
|
| `Int128` | `0x06<int128_little_endian_value>` |
|
||||||
|
| `Float64` | `0x07<float64_little_endian_value>` |
|
||||||
|
| `Decimal32` | `0x08<var_uint_scale><int32_little_endian_value>` |
|
||||||
|
| `Decimal64` | `0x09<var_uint_scale><int64_little_endian_value>` |
|
||||||
|
| `Decimal128` | `0x0A<var_uint_scale><int128_little_endian_value>` |
|
||||||
|
| `Decimal256` | `0x0B<var_uint_scale><int256_little_endian_value>` |
|
||||||
|
| `String` | `0x0C<var_uint_size><data>` |
|
||||||
|
| `Array` | `0x0D<var_uint_size><value_encoding_1>...<value_encoding_N>` |
|
||||||
|
| `Tuple` | `0x0E<var_uint_size><value_encoding_1>...<value_encoding_N>` |
|
||||||
|
| `Map` | `0x0F<var_uint_size><key_encoding_1><value_encoding_1>...<key_endoding_N><value_encoding_N>` |
|
||||||
|
| `IPv4` | `0x10<uint32_little_endian_value>` |
|
||||||
|
| `IPv6` | `0x11<uint128_little_endian_value>` |
|
||||||
|
| `UUID` | `0x12<uuid_value>` |
|
||||||
|
| `Bool` | `0x13<bool_value>` |
|
||||||
|
| `Object` | `0x14<var_uint_size><var_uint_key_size_1><key_data_1><value_encoding_1>...<var_uint_key_size_N><key_data_N><value_encoding_N>` |
|
||||||
|
| `AggregateFunctionState` | `0x15<var_uint_name_size><name_data><var_uint_data_size><data>` |
|
||||||
|
| `Negative infinity` | `0xFE` |
|
||||||
|
| `Positive infinity` | `0xFF` |
|
||||||
|
|--------------------------|--------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
*/
|
||||||
|
|
||||||
|
void encodeField(const Field &, WriteBuffer & buf);
|
||||||
|
Field decodeField(ReadBuffer & buf);
|
||||||
|
|
||||||
|
}
|
@ -7,19 +7,20 @@ namespace DB
|
|||||||
/// Kind of a temporal interval.
|
/// Kind of a temporal interval.
|
||||||
struct IntervalKind
|
struct IntervalKind
|
||||||
{
|
{
|
||||||
|
/// note: The order and numbers are important and used in binary encoding, append new interval kinds to the end of list.
|
||||||
enum class Kind : uint8_t
|
enum class Kind : uint8_t
|
||||||
{
|
{
|
||||||
Nanosecond,
|
Nanosecond = 0x00,
|
||||||
Microsecond,
|
Microsecond = 0x01,
|
||||||
Millisecond,
|
Millisecond = 0x02,
|
||||||
Second,
|
Second = 0x03,
|
||||||
Minute,
|
Minute = 0x04,
|
||||||
Hour,
|
Hour = 0x05,
|
||||||
Day,
|
Day = 0x06,
|
||||||
Week,
|
Week = 0x07,
|
||||||
Month,
|
Month = 0x08,
|
||||||
Quarter,
|
Quarter = 0x09,
|
||||||
Year,
|
Year = 0x0A,
|
||||||
};
|
};
|
||||||
Kind kind = Kind::Second;
|
Kind kind = Kind::Second;
|
||||||
|
|
||||||
|
@ -235,7 +235,7 @@ bool NamedCollectionFactory::loadIfNot(std::lock_guard<std::mutex> & lock)
|
|||||||
loadFromConfig(context->getConfigRef(), lock);
|
loadFromConfig(context->getConfigRef(), lock);
|
||||||
loadFromSQL(lock);
|
loadFromSQL(lock);
|
||||||
|
|
||||||
if (metadata_storage->supportsPeriodicUpdate())
|
if (metadata_storage->isReplicated())
|
||||||
{
|
{
|
||||||
update_task = context->getSchedulePool().createTask("NamedCollectionsMetadataStorage", [this]{ updateFunc(); });
|
update_task = context->getSchedulePool().createTask("NamedCollectionsMetadataStorage", [this]{ updateFunc(); });
|
||||||
update_task->activate();
|
update_task->activate();
|
||||||
@ -357,6 +357,13 @@ void NamedCollectionFactory::reloadFromSQL()
|
|||||||
add(std::move(collections), lock);
|
add(std::move(collections), lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool NamedCollectionFactory::usesReplicatedStorage()
|
||||||
|
{
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
|
loadIfNot(lock);
|
||||||
|
return metadata_storage->isReplicated();
|
||||||
|
}
|
||||||
|
|
||||||
void NamedCollectionFactory::updateFunc()
|
void NamedCollectionFactory::updateFunc()
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "Named collections background updating thread started");
|
LOG_TRACE(log, "Named collections background updating thread started");
|
||||||
|
@ -34,6 +34,8 @@ public:
|
|||||||
|
|
||||||
void updateFromSQL(const ASTAlterNamedCollectionQuery & query);
|
void updateFromSQL(const ASTAlterNamedCollectionQuery & query);
|
||||||
|
|
||||||
|
bool usesReplicatedStorage();
|
||||||
|
|
||||||
void loadIfNot();
|
void loadIfNot();
|
||||||
|
|
||||||
void shutdown();
|
void shutdown();
|
||||||
|
@ -67,7 +67,7 @@ public:
|
|||||||
|
|
||||||
virtual bool removeIfExists(const std::string & path) = 0;
|
virtual bool removeIfExists(const std::string & path) = 0;
|
||||||
|
|
||||||
virtual bool supportsPeriodicUpdate() const = 0;
|
virtual bool isReplicated() const = 0;
|
||||||
|
|
||||||
virtual bool waitUpdate(size_t /* timeout */) { return false; }
|
virtual bool waitUpdate(size_t /* timeout */) { return false; }
|
||||||
};
|
};
|
||||||
@ -89,7 +89,7 @@ public:
|
|||||||
|
|
||||||
~LocalStorage() override = default;
|
~LocalStorage() override = default;
|
||||||
|
|
||||||
bool supportsPeriodicUpdate() const override { return false; }
|
bool isReplicated() const override { return false; }
|
||||||
|
|
||||||
std::vector<std::string> list() const override
|
std::vector<std::string> list() const override
|
||||||
{
|
{
|
||||||
@ -221,7 +221,7 @@ public:
|
|||||||
|
|
||||||
~ZooKeeperStorage() override = default;
|
~ZooKeeperStorage() override = default;
|
||||||
|
|
||||||
bool supportsPeriodicUpdate() const override { return true; }
|
bool isReplicated() const override { return true; }
|
||||||
|
|
||||||
/// Return true if children changed.
|
/// Return true if children changed.
|
||||||
bool waitUpdate(size_t timeout) override
|
bool waitUpdate(size_t timeout) override
|
||||||
@ -465,14 +465,14 @@ void NamedCollectionsMetadataStorage::writeCreateQuery(const ASTCreateNamedColle
|
|||||||
storage->write(getFileName(query.collection_name), serializeAST(*normalized_query), replace);
|
storage->write(getFileName(query.collection_name), serializeAST(*normalized_query), replace);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NamedCollectionsMetadataStorage::supportsPeriodicUpdate() const
|
bool NamedCollectionsMetadataStorage::isReplicated() const
|
||||||
{
|
{
|
||||||
return storage->supportsPeriodicUpdate();
|
return storage->isReplicated();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NamedCollectionsMetadataStorage::waitUpdate()
|
bool NamedCollectionsMetadataStorage::waitUpdate()
|
||||||
{
|
{
|
||||||
if (!storage->supportsPeriodicUpdate())
|
if (!storage->isReplicated())
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Periodic updates are not supported");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Periodic updates are not supported");
|
||||||
|
|
||||||
const auto & config = Context::getGlobalContextInstance()->getConfigRef();
|
const auto & config = Context::getGlobalContextInstance()->getConfigRef();
|
||||||
|
@ -30,7 +30,7 @@ public:
|
|||||||
/// Return true if update was made
|
/// Return true if update was made
|
||||||
bool waitUpdate();
|
bool waitUpdate();
|
||||||
|
|
||||||
bool supportsPeriodicUpdate() const;
|
bool isReplicated() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
class INamedCollectionsStorage;
|
class INamedCollectionsStorage;
|
||||||
|
@ -28,7 +28,6 @@ namespace ErrorCodes
|
|||||||
|
|
||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
{
|
{
|
||||||
extern const Event DistributedConnectionFailTry;
|
|
||||||
extern const Event DistributedConnectionFailAtAll;
|
extern const Event DistributedConnectionFailAtAll;
|
||||||
extern const Event DistributedConnectionSkipReadOnlyReplica;
|
extern const Event DistributedConnectionSkipReadOnlyReplica;
|
||||||
}
|
}
|
||||||
@ -285,7 +284,6 @@ PoolWithFailoverBase<TNestedPool>::getMany(
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message);
|
LOG_WARNING(log, "Connection failed at try №{}, reason: {}", (shuffled_pool.error_count + 1), fail_message);
|
||||||
ProfileEvents::increment(ProfileEvents::DistributedConnectionFailTry);
|
|
||||||
|
|
||||||
shuffled_pool.error_count = std::min(max_error_cap, shuffled_pool.error_count + 1);
|
shuffled_pool.error_count = std::min(max_error_cap, shuffled_pool.error_count + 1);
|
||||||
|
|
||||||
|
@ -568,6 +568,7 @@ The server successfully detected this situation and will download merged part fr
|
|||||||
M(AggregationPreallocatedElementsInHashTables, "How many elements were preallocated in hash tables for aggregation.") \
|
M(AggregationPreallocatedElementsInHashTables, "How many elements were preallocated in hash tables for aggregation.") \
|
||||||
M(AggregationHashTablesInitializedAsTwoLevel, "How many hash tables were inited as two-level for aggregation.") \
|
M(AggregationHashTablesInitializedAsTwoLevel, "How many hash tables were inited as two-level for aggregation.") \
|
||||||
M(AggregationOptimizedEqualRangesOfKeys, "For how many blocks optimization of equal ranges of keys was applied") \
|
M(AggregationOptimizedEqualRangesOfKeys, "For how many blocks optimization of equal ranges of keys was applied") \
|
||||||
|
M(HashJoinPreallocatedElementsInHashTables, "How many elements were preallocated in hash tables for hash join.") \
|
||||||
\
|
\
|
||||||
M(MetadataFromKeeperCacheHit, "Number of times an object storage metadata request was answered from cache without making request to Keeper") \
|
M(MetadataFromKeeperCacheHit, "Number of times an object storage metadata request was answered from cache without making request to Keeper") \
|
||||||
M(MetadataFromKeeperCacheMiss, "Number of times an object storage metadata request had to be answered from Keeper") \
|
M(MetadataFromKeeperCacheMiss, "Number of times an object storage metadata request had to be answered from Keeper") \
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
@ -545,7 +545,7 @@ std::string StackTrace::toString() const
|
|||||||
return toStringCached(frame_pointers, offset, size);
|
return toStringCached(frame_pointers, offset, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string StackTrace::toString(void ** frame_pointers_raw, size_t offset, size_t size)
|
std::string StackTrace::toString(void * const * frame_pointers_raw, size_t offset, size_t size)
|
||||||
{
|
{
|
||||||
__msan_unpoison(frame_pointers_raw, size * sizeof(*frame_pointers_raw));
|
__msan_unpoison(frame_pointers_raw, size * sizeof(*frame_pointers_raw));
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ public:
|
|||||||
const FramePointers & getFramePointers() const { return frame_pointers; }
|
const FramePointers & getFramePointers() const { return frame_pointers; }
|
||||||
std::string toString() const;
|
std::string toString() const;
|
||||||
|
|
||||||
static std::string toString(void ** frame_pointers, size_t offset, size_t size);
|
static std::string toString(void * const * frame_pointers, size_t offset, size_t size);
|
||||||
static void dropCache();
|
static void dropCache();
|
||||||
|
|
||||||
/// @param fatal - if true, will process inline frames (slower)
|
/// @param fatal - if true, will process inline frames (slower)
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include <Common/ConcurrentBoundedQueue.h>
|
#include <Common/ConcurrentBoundedQueue.h>
|
||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
|
#include <variant>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <unordered_set>
|
#include <unordered_set>
|
||||||
#include <future>
|
#include <future>
|
||||||
|
@ -2,9 +2,11 @@
|
|||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
#include <boost/noncopyable.hpp>
|
#include <boost/noncopyable.hpp>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
#include <base/hex.h>
|
#include <base/hex.h>
|
||||||
#include "Common/ZooKeeper/IKeeper.h"
|
#include "Common/ZooKeeper/IKeeper.h"
|
||||||
|
#include "Common/ZooKeeper/ZooKeeperCommon.h"
|
||||||
#include <Common/setThreadName.h>
|
#include <Common/setThreadName.h>
|
||||||
#include <Common/ZooKeeper/KeeperException.h>
|
#include <Common/ZooKeeper/KeeperException.h>
|
||||||
#include <Common/checkStackSize.h>
|
#include <Common/checkStackSize.h>
|
||||||
@ -320,7 +321,7 @@ void KeeperDispatcher::responseThread()
|
|||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
setResponse(response_for_session.session_id, response_for_session.response);
|
setResponse(response_for_session.session_id, response_for_session.response, response_for_session.request);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -355,7 +356,7 @@ void KeeperDispatcher::snapshotThread()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response)
|
void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response, Coordination::ZooKeeperRequestPtr request)
|
||||||
{
|
{
|
||||||
std::lock_guard lock(session_to_response_callback_mutex);
|
std::lock_guard lock(session_to_response_callback_mutex);
|
||||||
|
|
||||||
@ -369,7 +370,7 @@ void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKe
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
auto callback = new_session_id_response_callback[session_id_resp.internal_id];
|
auto callback = new_session_id_response_callback[session_id_resp.internal_id];
|
||||||
callback(response);
|
callback(response, request);
|
||||||
new_session_id_response_callback.erase(session_id_resp.internal_id);
|
new_session_id_response_callback.erase(session_id_resp.internal_id);
|
||||||
}
|
}
|
||||||
else /// Normal response, just write to client
|
else /// Normal response, just write to client
|
||||||
@ -380,7 +381,7 @@ void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKe
|
|||||||
if (session_response_callback == session_to_response_callback.end())
|
if (session_response_callback == session_to_response_callback.end())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
session_response_callback->second(response);
|
session_response_callback->second(response, request);
|
||||||
|
|
||||||
/// Session closed, no more writes
|
/// Session closed, no more writes
|
||||||
if (response->xid != Coordination::WATCH_XID && response->getOpNum() == Coordination::OpNum::Close)
|
if (response->xid != Coordination::WATCH_XID && response->getOpNum() == Coordination::OpNum::Close)
|
||||||
@ -771,21 +772,27 @@ int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms)
|
|||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard lock(session_to_response_callback_mutex);
|
std::lock_guard lock(session_to_response_callback_mutex);
|
||||||
new_session_id_response_callback[request->internal_id] = [promise, internal_id = request->internal_id] (const Coordination::ZooKeeperResponsePtr & response)
|
new_session_id_response_callback[request->internal_id]
|
||||||
|
= [promise, internal_id = request->internal_id](
|
||||||
|
const Coordination::ZooKeeperResponsePtr & response, Coordination::ZooKeeperRequestPtr /*request*/)
|
||||||
{
|
{
|
||||||
if (response->getOpNum() != Coordination::OpNum::SessionID)
|
if (response->getOpNum() != Coordination::OpNum::SessionID)
|
||||||
promise->set_exception(std::make_exception_ptr(Exception(ErrorCodes::LOGICAL_ERROR,
|
promise->set_exception(std::make_exception_ptr(Exception(
|
||||||
"Incorrect response of type {} instead of SessionID response", response->getOpNum())));
|
ErrorCodes::LOGICAL_ERROR, "Incorrect response of type {} instead of SessionID response", response->getOpNum())));
|
||||||
|
|
||||||
auto session_id_response = dynamic_cast<const Coordination::ZooKeeperSessionIDResponse &>(*response);
|
auto session_id_response = dynamic_cast<const Coordination::ZooKeeperSessionIDResponse &>(*response);
|
||||||
if (session_id_response.internal_id != internal_id)
|
if (session_id_response.internal_id != internal_id)
|
||||||
{
|
{
|
||||||
promise->set_exception(std::make_exception_ptr(Exception(ErrorCodes::LOGICAL_ERROR,
|
promise->set_exception(std::make_exception_ptr(Exception(
|
||||||
"Incorrect response with internal id {} instead of {}", session_id_response.internal_id, internal_id)));
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Incorrect response with internal id {} instead of {}",
|
||||||
|
session_id_response.internal_id,
|
||||||
|
internal_id)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (response->error != Coordination::Error::ZOK)
|
if (response->error != Coordination::Error::ZOK)
|
||||||
promise->set_exception(std::make_exception_ptr(zkutil::KeeperException::fromMessage(response->error, "SessionID request failed with error")));
|
promise->set_exception(
|
||||||
|
std::make_exception_ptr(zkutil::KeeperException::fromMessage(response->error, "SessionID request failed with error")));
|
||||||
|
|
||||||
promise->set_value(session_id_response.session_id);
|
promise->set_value(session_id_response.session_id);
|
||||||
};
|
};
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
using ZooKeeperResponseCallback = std::function<void(const Coordination::ZooKeeperResponsePtr & response)>;
|
using ZooKeeperResponseCallback = std::function<void(const Coordination::ZooKeeperResponsePtr & response, Coordination::ZooKeeperRequestPtr request)>;
|
||||||
|
|
||||||
/// Highlevel wrapper for ClickHouse Keeper.
|
/// Highlevel wrapper for ClickHouse Keeper.
|
||||||
/// Process user requests via consensus and return responses.
|
/// Process user requests via consensus and return responses.
|
||||||
@ -92,7 +92,7 @@ private:
|
|||||||
void clusterUpdateWithReconfigDisabledThread();
|
void clusterUpdateWithReconfigDisabledThread();
|
||||||
void clusterUpdateThread();
|
void clusterUpdateThread();
|
||||||
|
|
||||||
void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response);
|
void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response, Coordination::ZooKeeperRequestPtr request = nullptr);
|
||||||
|
|
||||||
/// Add error responses for requests to responses queue.
|
/// Add error responses for requests to responses queue.
|
||||||
/// Clears requests.
|
/// Clears requests.
|
||||||
|
@ -407,7 +407,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
|||||||
if (!keeper_context->localLogsPreprocessed() && !preprocess(*request_for_session))
|
if (!keeper_context->localLogsPreprocessed() && !preprocess(*request_for_session))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
auto try_push = [&](const KeeperStorage::ResponseForSession& response)
|
auto try_push = [&](const KeeperStorage::ResponseForSession & response)
|
||||||
{
|
{
|
||||||
if (!responses_queue.push(response))
|
if (!responses_queue.push(response))
|
||||||
{
|
{
|
||||||
@ -416,17 +416,6 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
|||||||
"Failed to push response with session id {} to the queue, probably because of shutdown",
|
"Failed to push response with session id {} to the queue, probably because of shutdown",
|
||||||
response.session_id);
|
response.session_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
using namespace std::chrono;
|
|
||||||
uint64_t elapsed = duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count() - request_for_session->time;
|
|
||||||
if (elapsed > keeper_context->getCoordinationSettings()->log_slow_total_threshold_ms)
|
|
||||||
{
|
|
||||||
LOG_INFO(
|
|
||||||
log,
|
|
||||||
"Total time to process a request took too long ({}ms).\nRequest info: {}",
|
|
||||||
elapsed,
|
|
||||||
request_for_session->request->toString(/*short_format=*/true));
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
try
|
try
|
||||||
@ -443,6 +432,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
|||||||
KeeperStorage::ResponseForSession response_for_session;
|
KeeperStorage::ResponseForSession response_for_session;
|
||||||
response_for_session.session_id = -1;
|
response_for_session.session_id = -1;
|
||||||
response_for_session.response = response;
|
response_for_session.response = response;
|
||||||
|
response_for_session.request = request_for_session->request;
|
||||||
|
|
||||||
LockGuardWithStats lock(storage_and_responses_lock);
|
LockGuardWithStats lock(storage_and_responses_lock);
|
||||||
session_id = storage->getSessionID(session_id_request.session_timeout_ms);
|
session_id = storage->getSessionID(session_id_request.session_timeout_ms);
|
||||||
@ -462,8 +452,14 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
|
|||||||
LockGuardWithStats lock(storage_and_responses_lock);
|
LockGuardWithStats lock(storage_and_responses_lock);
|
||||||
KeeperStorage::ResponsesForSessions responses_for_sessions
|
KeeperStorage::ResponsesForSessions responses_for_sessions
|
||||||
= storage->processRequest(request_for_session->request, request_for_session->session_id, request_for_session->zxid);
|
= storage->processRequest(request_for_session->request, request_for_session->session_id, request_for_session->zxid);
|
||||||
|
|
||||||
for (auto & response_for_session : responses_for_sessions)
|
for (auto & response_for_session : responses_for_sessions)
|
||||||
|
{
|
||||||
|
if (response_for_session.response->xid != Coordination::WATCH_XID)
|
||||||
|
response_for_session.request = request_for_session->request;
|
||||||
|
|
||||||
try_push(response_for_session);
|
try_push(response_for_session);
|
||||||
|
}
|
||||||
|
|
||||||
if (keeper_context->digestEnabled() && request_for_session->digest)
|
if (keeper_context->digestEnabled() && request_for_session->digest)
|
||||||
assertDigest(*request_for_session->digest, storage->getNodesDigest(true), *request_for_session->request, request_for_session->log_idx, true);
|
assertDigest(*request_for_session->digest, storage->getNodesDigest(true), *request_for_session->request, request_for_session->log_idx, true);
|
||||||
@ -797,9 +793,14 @@ void KeeperStateMachine::processReadRequest(const KeeperStorage::RequestForSessi
|
|||||||
LockGuardWithStats lock(storage_and_responses_lock);
|
LockGuardWithStats lock(storage_and_responses_lock);
|
||||||
auto responses = storage->processRequest(
|
auto responses = storage->processRequest(
|
||||||
request_for_session.request, request_for_session.session_id, std::nullopt, true /*check_acl*/, true /*is_local*/);
|
request_for_session.request, request_for_session.session_id, std::nullopt, true /*check_acl*/, true /*is_local*/);
|
||||||
for (const auto & response : responses)
|
|
||||||
if (!responses_queue.push(response))
|
for (auto & response_for_session : responses)
|
||||||
LOG_WARNING(log, "Failed to push response with session id {} to the queue, probably because of shutdown", response.session_id);
|
{
|
||||||
|
if (response_for_session.response->xid != Coordination::WATCH_XID)
|
||||||
|
response_for_session.request = request_for_session.request;
|
||||||
|
if (!responses_queue.push(response_for_session))
|
||||||
|
LOG_WARNING(log, "Failed to push response with session id {} to the queue, probably because of shutdown", response_for_session.session_id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KeeperStateMachine::shutdownStorage()
|
void KeeperStateMachine::shutdownStorage()
|
||||||
|
@ -206,6 +206,7 @@ public:
|
|||||||
{
|
{
|
||||||
int64_t session_id;
|
int64_t session_id;
|
||||||
Coordination::ZooKeeperResponsePtr response;
|
Coordination::ZooKeeperResponsePtr response;
|
||||||
|
Coordination::ZooKeeperRequestPtr request = nullptr;
|
||||||
};
|
};
|
||||||
using ResponsesForSessions = std::vector<ResponseForSession>;
|
using ResponsesForSessions = std::vector<ResponseForSession>;
|
||||||
|
|
||||||
|
@ -27,7 +27,8 @@ PoolWithFailover::PoolWithFailover(
|
|||||||
size_t pool_size,
|
size_t pool_size,
|
||||||
size_t pool_wait_timeout_,
|
size_t pool_wait_timeout_,
|
||||||
size_t max_tries_,
|
size_t max_tries_,
|
||||||
bool auto_close_connection_)
|
bool auto_close_connection_,
|
||||||
|
size_t connection_attempt_timeout_)
|
||||||
: pool_wait_timeout(pool_wait_timeout_)
|
: pool_wait_timeout(pool_wait_timeout_)
|
||||||
, max_tries(max_tries_)
|
, max_tries(max_tries_)
|
||||||
, auto_close_connection(auto_close_connection_)
|
, auto_close_connection(auto_close_connection_)
|
||||||
@ -39,8 +40,13 @@ PoolWithFailover::PoolWithFailover(
|
|||||||
{
|
{
|
||||||
for (const auto & replica_configuration : configurations)
|
for (const auto & replica_configuration : configurations)
|
||||||
{
|
{
|
||||||
auto connection_info = formatConnectionString(replica_configuration.database,
|
auto connection_info = formatConnectionString(
|
||||||
replica_configuration.host, replica_configuration.port, replica_configuration.username, replica_configuration.password);
|
replica_configuration.database,
|
||||||
|
replica_configuration.host,
|
||||||
|
replica_configuration.port,
|
||||||
|
replica_configuration.username,
|
||||||
|
replica_configuration.password,
|
||||||
|
connection_attempt_timeout_);
|
||||||
replicas_with_priority[priority].emplace_back(connection_info, pool_size);
|
replicas_with_priority[priority].emplace_back(connection_info, pool_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -51,7 +57,8 @@ PoolWithFailover::PoolWithFailover(
|
|||||||
size_t pool_size,
|
size_t pool_size,
|
||||||
size_t pool_wait_timeout_,
|
size_t pool_wait_timeout_,
|
||||||
size_t max_tries_,
|
size_t max_tries_,
|
||||||
bool auto_close_connection_)
|
bool auto_close_connection_,
|
||||||
|
size_t connection_attempt_timeout_)
|
||||||
: pool_wait_timeout(pool_wait_timeout_)
|
: pool_wait_timeout(pool_wait_timeout_)
|
||||||
, max_tries(max_tries_)
|
, max_tries(max_tries_)
|
||||||
, auto_close_connection(auto_close_connection_)
|
, auto_close_connection(auto_close_connection_)
|
||||||
@ -63,7 +70,13 @@ PoolWithFailover::PoolWithFailover(
|
|||||||
for (const auto & [host, port] : configuration.addresses)
|
for (const auto & [host, port] : configuration.addresses)
|
||||||
{
|
{
|
||||||
LOG_DEBUG(getLogger("PostgreSQLPoolWithFailover"), "Adding address host: {}, port: {} to connection pool", host, port);
|
LOG_DEBUG(getLogger("PostgreSQLPoolWithFailover"), "Adding address host: {}, port: {} to connection pool", host, port);
|
||||||
auto connection_string = formatConnectionString(configuration.database, host, port, configuration.username, configuration.password);
|
auto connection_string = formatConnectionString(
|
||||||
|
configuration.database,
|
||||||
|
host,
|
||||||
|
port,
|
||||||
|
configuration.username,
|
||||||
|
configuration.password,
|
||||||
|
connection_attempt_timeout_);
|
||||||
replicas_with_priority[0].emplace_back(connection_string, pool_size);
|
replicas_with_priority[0].emplace_back(connection_string, pool_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
static constexpr inline auto POSTGRESQL_POOL_DEFAULT_SIZE = 16;
|
static constexpr inline auto POSTGRESQL_POOL_DEFAULT_SIZE = 16;
|
||||||
static constexpr inline auto POSTGRESQL_POOL_WAIT_TIMEOUT = 5000;
|
static constexpr inline auto POSTGRESQL_POOL_WAIT_TIMEOUT = 5000;
|
||||||
static constexpr inline auto POSTGRESQL_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES = 2;
|
|
||||||
|
|
||||||
namespace postgres
|
namespace postgres
|
||||||
{
|
{
|
||||||
@ -30,14 +29,16 @@ public:
|
|||||||
size_t pool_size,
|
size_t pool_size,
|
||||||
size_t pool_wait_timeout,
|
size_t pool_wait_timeout,
|
||||||
size_t max_tries_,
|
size_t max_tries_,
|
||||||
bool auto_close_connection_);
|
bool auto_close_connection_,
|
||||||
|
size_t connection_attempt_timeout_);
|
||||||
|
|
||||||
explicit PoolWithFailover(
|
explicit PoolWithFailover(
|
||||||
const DB::StoragePostgreSQL::Configuration & configuration,
|
const DB::StoragePostgreSQL::Configuration & configuration,
|
||||||
size_t pool_size,
|
size_t pool_size,
|
||||||
size_t pool_wait_timeout,
|
size_t pool_wait_timeout,
|
||||||
size_t max_tries_,
|
size_t max_tries_,
|
||||||
bool auto_close_connection_);
|
bool auto_close_connection_,
|
||||||
|
size_t connection_attempt_timeout_);
|
||||||
|
|
||||||
PoolWithFailover(const PoolWithFailover & other) = delete;
|
PoolWithFailover(const PoolWithFailover & other) = delete;
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
namespace postgres
|
namespace postgres
|
||||||
{
|
{
|
||||||
|
|
||||||
ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password)
|
ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password, UInt64 timeout)
|
||||||
{
|
{
|
||||||
DB::WriteBufferFromOwnString out;
|
DB::WriteBufferFromOwnString out;
|
||||||
out << "dbname=" << DB::quote << dbname
|
out << "dbname=" << DB::quote << dbname
|
||||||
@ -16,7 +16,7 @@ ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, S
|
|||||||
<< " port=" << port
|
<< " port=" << port
|
||||||
<< " user=" << DB::quote << user
|
<< " user=" << DB::quote << user
|
||||||
<< " password=" << DB::quote << password
|
<< " password=" << DB::quote << password
|
||||||
<< " connect_timeout=2";
|
<< " connect_timeout=" << timeout;
|
||||||
return {out.str(), host + ':' + DB::toString(port)};
|
return {out.str(), host + ':' + DB::toString(port)};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ namespace pqxx
|
|||||||
namespace postgres
|
namespace postgres
|
||||||
{
|
{
|
||||||
|
|
||||||
ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password);
|
ConnectionInfo formatConnectionString(String dbname, String host, UInt16 port, String user, String password, UInt64 timeout);
|
||||||
|
|
||||||
String getConnectionForLog(const String & host, UInt16 port);
|
String getConnectionForLog(const String & host, UInt16 port);
|
||||||
|
|
||||||
|
@ -151,6 +151,7 @@ namespace DB
|
|||||||
M(UInt64, global_profiler_real_time_period_ns, 0, "Period for real clock timer of global profiler (in nanoseconds). Set 0 value to turn off the real clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
M(UInt64, global_profiler_real_time_period_ns, 0, "Period for real clock timer of global profiler (in nanoseconds). Set 0 value to turn off the real clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
||||||
M(UInt64, global_profiler_cpu_time_period_ns, 0, "Period for CPU clock timer of global profiler (in nanoseconds). Set 0 value to turn off the CPU clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
M(UInt64, global_profiler_cpu_time_period_ns, 0, "Period for CPU clock timer of global profiler (in nanoseconds). Set 0 value to turn off the CPU clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
||||||
M(Bool, enable_azure_sdk_logging, false, "Enables logging from Azure sdk", 0) \
|
M(Bool, enable_azure_sdk_logging, false, "Enables logging from Azure sdk", 0) \
|
||||||
|
M(UInt64, max_entries_for_hash_table_stats, 10'000, "How many entries hash table statistics collected during aggregation is allowed to have", 0) \
|
||||||
M(String, merge_workload, "default", "Name of workload to be used to access resources for all merges (may be overridden by a merge tree setting)", 0) \
|
M(String, merge_workload, "default", "Name of workload to be used to access resources for all merges (may be overridden by a merge tree setting)", 0) \
|
||||||
M(String, mutation_workload, "default", "Name of workload to be used to access resources for all mutations (may be overridden by a merge tree setting)", 0) \
|
M(String, mutation_workload, "default", "Name of workload to be used to access resources for all mutations (may be overridden by a merge tree setting)", 0) \
|
||||||
M(Bool, prepare_system_log_tables_on_startup, false, "If true, ClickHouse creates all configured `system.*_log` tables before the startup. It can be helpful if some startup scripts depend on these tables.", 0) \
|
M(Bool, prepare_system_log_tables_on_startup, false, "If true, ClickHouse creates all configured `system.*_log` tables before the startup. It can be helpful if some startup scripts depend on these tables.", 0) \
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user