mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge remote-tracking branch 'upstream/master' into local-send-fatal-log
This commit is contained in:
commit
fd26fab07f
@ -164,7 +164,7 @@ if (OS_LINUX)
|
|||||||
# and whatever is poisoning it by LD_PRELOAD should not link to our symbols.
|
# and whatever is poisoning it by LD_PRELOAD should not link to our symbols.
|
||||||
# - The clickhouse-odbc-bridge and clickhouse-library-bridge binaries
|
# - The clickhouse-odbc-bridge and clickhouse-library-bridge binaries
|
||||||
# should not expose their symbols to ODBC drivers and libraries.
|
# should not expose their symbols to ODBC drivers and libraries.
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic -Wl,--gc-sections")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (OS_DARWIN)
|
if (OS_DARWIN)
|
||||||
@ -187,9 +187,10 @@ if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
|||||||
endif ()
|
endif ()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE"
|
if (NOT (SANITIZE_COVERAGE OR WITH_COVERAGE)
|
||||||
OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO"
|
AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE"
|
||||||
OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL")
|
OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO"
|
||||||
|
OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL"))
|
||||||
set (OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT ON)
|
set (OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT ON)
|
||||||
else()
|
else()
|
||||||
set (OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT OFF)
|
set (OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT OFF)
|
||||||
@ -273,6 +274,11 @@ option (ENABLE_BUILD_PROFILING "Enable profiling of build time" OFF)
|
|||||||
if (ENABLE_BUILD_PROFILING)
|
if (ENABLE_BUILD_PROFILING)
|
||||||
if (COMPILER_CLANG)
|
if (COMPILER_CLANG)
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ftime-trace")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ftime-trace")
|
||||||
|
|
||||||
|
if (LINKER_NAME MATCHES "lld")
|
||||||
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--time-trace")
|
||||||
|
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--time-trace")
|
||||||
|
endif ()
|
||||||
else ()
|
else ()
|
||||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Build profiling is only available with CLang")
|
message (${RECONFIGURE_MESSAGE_LEVEL} "Build profiling is only available with CLang")
|
||||||
endif ()
|
endif ()
|
||||||
@ -286,9 +292,6 @@ set (CMAKE_C_STANDARD 11)
|
|||||||
set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C
|
set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C
|
||||||
set (CMAKE_C_STANDARD_REQUIRED ON)
|
set (CMAKE_C_STANDARD_REQUIRED ON)
|
||||||
|
|
||||||
# Compiler-specific coverage flags e.g. -fcoverage-mapping
|
|
||||||
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
|
|
||||||
|
|
||||||
if (COMPILER_CLANG)
|
if (COMPILER_CLANG)
|
||||||
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
|
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
|
||||||
# See https://reviews.llvm.org/D112921
|
# See https://reviews.llvm.org/D112921
|
||||||
@ -304,18 +307,12 @@ if (COMPILER_CLANG)
|
|||||||
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
|
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
|
||||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
|
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (WITH_COVERAGE)
|
|
||||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
|
|
||||||
# If we want to disable coverage for specific translation units
|
|
||||||
set(WITHOUT_COVERAGE "-fno-profile-instr-generate -fno-coverage-mapping")
|
|
||||||
endif()
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS}")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS}")
|
||||||
|
|
||||||
# Our built-in unwinder only supports DWARF version up to 4.
|
# Our built-in unwinder only supports DWARF version up to 4.
|
||||||
set (DEBUG_INFO_FLAGS "-g -gdwarf-4")
|
set (DEBUG_INFO_FLAGS "-g")
|
||||||
|
|
||||||
# Disable omit frame pointer compiler optimization using -fno-omit-frame-pointer
|
# Disable omit frame pointer compiler optimization using -fno-omit-frame-pointer
|
||||||
option(DISABLE_OMIT_FRAME_POINTER "Disable omit frame pointer compiler optimization" OFF)
|
option(DISABLE_OMIT_FRAME_POINTER "Disable omit frame pointer compiler optimization" OFF)
|
||||||
@ -554,10 +551,16 @@ if (ENABLE_RUST)
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND NOT SANITIZE AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64))
|
||||||
|
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT ON)
|
||||||
|
else ()
|
||||||
|
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT OFF)
|
||||||
|
endif ()
|
||||||
|
option(CHECK_LARGE_OBJECT_SIZES "Check that there are no large object files after build." ${CHECK_LARGE_OBJECT_SIZES_DEFAULT})
|
||||||
|
|
||||||
add_subdirectory (base)
|
add_subdirectory (base)
|
||||||
add_subdirectory (src)
|
add_subdirectory (src)
|
||||||
add_subdirectory (programs)
|
add_subdirectory (programs)
|
||||||
add_subdirectory (tests)
|
|
||||||
add_subdirectory (utils)
|
add_subdirectory (utils)
|
||||||
|
|
||||||
if (FUZZER)
|
if (FUZZER)
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
add_compile_options($<$<OR:$<COMPILE_LANGUAGE:C>,$<COMPILE_LANGUAGE:CXX>>:${COVERAGE_FLAGS}>)
|
||||||
|
|
||||||
if (USE_CLANG_TIDY)
|
if (USE_CLANG_TIDY)
|
||||||
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -1,11 +1,15 @@
|
|||||||
#include "coverage.h"
|
#include "coverage.h"
|
||||||
|
|
||||||
#if WITH_COVERAGE
|
|
||||||
|
|
||||||
#pragma GCC diagnostic ignored "-Wreserved-identifier"
|
#pragma GCC diagnostic ignored "-Wreserved-identifier"
|
||||||
|
|
||||||
# include <mutex>
|
|
||||||
# include <unistd.h>
|
/// WITH_COVERAGE enables the default implementation of code coverage,
|
||||||
|
/// that dumps a map to the filesystem.
|
||||||
|
|
||||||
|
#if WITH_COVERAGE
|
||||||
|
|
||||||
|
#include <mutex>
|
||||||
|
#include <unistd.h>
|
||||||
|
|
||||||
|
|
||||||
# if defined(__clang__)
|
# if defined(__clang__)
|
||||||
@ -31,3 +35,131 @@ void dumpCoverageReportIfPossible()
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/// SANITIZE_COVERAGE enables code instrumentation,
|
||||||
|
/// but leaves the callbacks implementation to us,
|
||||||
|
/// which we use to calculate coverage on a per-test basis
|
||||||
|
/// and to write it to system tables.
|
||||||
|
|
||||||
|
#if defined(SANITIZE_COVERAGE)
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
bool pc_guards_initialized = false;
|
||||||
|
bool pc_table_initialized = false;
|
||||||
|
|
||||||
|
uint32_t * guards_start = nullptr;
|
||||||
|
uint32_t * guards_end = nullptr;
|
||||||
|
|
||||||
|
uintptr_t * coverage_array = nullptr;
|
||||||
|
size_t coverage_array_size = 0;
|
||||||
|
|
||||||
|
uintptr_t * all_addresses_array = nullptr;
|
||||||
|
size_t all_addresses_array_size = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C"
|
||||||
|
{
|
||||||
|
|
||||||
|
/// This is called at least once for every DSO for initialization.
|
||||||
|
/// But we will use it only for the main DSO.
|
||||||
|
void __sanitizer_cov_trace_pc_guard_init(uint32_t * start, uint32_t * stop)
|
||||||
|
{
|
||||||
|
if (pc_guards_initialized)
|
||||||
|
return;
|
||||||
|
pc_guards_initialized = true;
|
||||||
|
|
||||||
|
/// The function can be called multiple times, but we need to initialize only once.
|
||||||
|
if (start == stop || *start)
|
||||||
|
return;
|
||||||
|
|
||||||
|
guards_start = start;
|
||||||
|
guards_end = stop;
|
||||||
|
coverage_array_size = stop - start;
|
||||||
|
|
||||||
|
/// Note: we will leak this.
|
||||||
|
coverage_array = static_cast<uintptr_t*>(malloc(sizeof(uintptr_t) * coverage_array_size));
|
||||||
|
|
||||||
|
resetCoverage();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This is called at least once for every DSO for initialization
|
||||||
|
/// and provides information about all instrumented addresses.
|
||||||
|
void __sanitizer_cov_pcs_init(const uintptr_t * pcs_begin, const uintptr_t * pcs_end)
|
||||||
|
{
|
||||||
|
if (pc_table_initialized)
|
||||||
|
return;
|
||||||
|
pc_table_initialized = true;
|
||||||
|
|
||||||
|
all_addresses_array = static_cast<uintptr_t*>(malloc(sizeof(uintptr_t) * coverage_array_size));
|
||||||
|
all_addresses_array_size = pcs_end - pcs_begin;
|
||||||
|
|
||||||
|
/// They are not a real pointers, but also contain a flag in the most significant bit,
|
||||||
|
/// in which we are not interested for now. Reset it.
|
||||||
|
for (size_t i = 0; i < all_addresses_array_size; ++i)
|
||||||
|
all_addresses_array[i] = pcs_begin[i] & 0x7FFFFFFFFFFFFFFFULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This is called at every basic block / edge, etc.
|
||||||
|
void __sanitizer_cov_trace_pc_guard(uint32_t * guard)
|
||||||
|
{
|
||||||
|
/// Duplicate the guard check.
|
||||||
|
if (!*guard)
|
||||||
|
return;
|
||||||
|
*guard = 0;
|
||||||
|
|
||||||
|
/// If you set *guard to 0 this code will not be called again for this edge.
|
||||||
|
/// Now we can get the PC and do whatever you want:
|
||||||
|
/// - store it somewhere or symbolize it and print right away.
|
||||||
|
/// The values of `*guard` are as you set them in
|
||||||
|
/// __sanitizer_cov_trace_pc_guard_init and so you can make them consecutive
|
||||||
|
/// and use them to dereference an array or a bit vector.
|
||||||
|
void * pc = __builtin_return_address(0);
|
||||||
|
|
||||||
|
coverage_array[guard - guards_start] = reinterpret_cast<uintptr_t>(pc);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
__attribute__((no_sanitize("coverage")))
|
||||||
|
std::span<const uintptr_t> getCoverage()
|
||||||
|
{
|
||||||
|
return {coverage_array, coverage_array_size};
|
||||||
|
}
|
||||||
|
|
||||||
|
__attribute__((no_sanitize("coverage")))
|
||||||
|
std::span<const uintptr_t> getAllInstrumentedAddresses()
|
||||||
|
{
|
||||||
|
return {all_addresses_array, all_addresses_array_size};
|
||||||
|
}
|
||||||
|
|
||||||
|
__attribute__((no_sanitize("coverage")))
|
||||||
|
void resetCoverage()
|
||||||
|
{
|
||||||
|
memset(coverage_array, 0, coverage_array_size * sizeof(*coverage_array));
|
||||||
|
|
||||||
|
/// The guard defines whether the __sanitizer_cov_trace_pc_guard should be called.
|
||||||
|
/// For example, you can unset it after first invocation to prevent excessive work.
|
||||||
|
/// Initially set all the guards to 1 to enable callbacks.
|
||||||
|
for (uint32_t * x = guards_start; x < guards_end; ++x)
|
||||||
|
*x = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
std::span<const uintptr_t> getCoverage()
|
||||||
|
{
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::span<const uintptr_t> getAllInstrumentedAddresses()
|
||||||
|
{
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
void resetCoverage()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
@ -1,5 +1,8 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <span>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
/// Flush coverage report to file, depending on coverage system
|
/// Flush coverage report to file, depending on coverage system
|
||||||
/// proposed by compiler (llvm for clang and gcov for gcc).
|
/// proposed by compiler (llvm for clang and gcov for gcc).
|
||||||
///
|
///
|
||||||
@ -7,3 +10,16 @@
|
|||||||
/// Thread safe (use exclusive lock).
|
/// Thread safe (use exclusive lock).
|
||||||
/// Idempotent, may be called multiple times.
|
/// Idempotent, may be called multiple times.
|
||||||
void dumpCoverageReportIfPossible();
|
void dumpCoverageReportIfPossible();
|
||||||
|
|
||||||
|
/// This is effective if SANITIZE_COVERAGE is enabled at build time.
|
||||||
|
/// Get accumulated unique program addresses of the instrumented parts of the code,
|
||||||
|
/// seen so far after program startup or after previous reset.
|
||||||
|
/// The returned span will be represented as a sparse map, containing mostly zeros, which you should filter away.
|
||||||
|
std::span<const uintptr_t> getCoverage();
|
||||||
|
|
||||||
|
/// Get all instrumented addresses that could be in the coverage.
|
||||||
|
std::span<const uintptr_t> getAllInstrumentedAddresses();
|
||||||
|
|
||||||
|
/// Reset the accumulated coverage.
|
||||||
|
/// This is useful to compare coverage of different tests, including differential coverage.
|
||||||
|
void resetCoverage();
|
||||||
|
@ -65,7 +65,7 @@ class IsTupleLike
|
|||||||
static void check(...);
|
static void check(...);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static constexpr const bool value = !std::is_void<decltype(check<T>(nullptr))>::value;
|
static constexpr const bool value = !std::is_void_v<decltype(check<T>(nullptr))>;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -79,7 +79,7 @@ class numeric_limits<wide::integer<Bits, Signed>>
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
static constexpr bool is_specialized = true;
|
static constexpr bool is_specialized = true;
|
||||||
static constexpr bool is_signed = is_same<Signed, signed>::value;
|
static constexpr bool is_signed = is_same_v<Signed, signed>;
|
||||||
static constexpr bool is_integer = true;
|
static constexpr bool is_integer = true;
|
||||||
static constexpr bool is_exact = true;
|
static constexpr bool is_exact = true;
|
||||||
static constexpr bool has_infinity = false;
|
static constexpr bool has_infinity = false;
|
||||||
@ -91,7 +91,7 @@ public:
|
|||||||
static constexpr bool is_iec559 = false;
|
static constexpr bool is_iec559 = false;
|
||||||
static constexpr bool is_bounded = true;
|
static constexpr bool is_bounded = true;
|
||||||
static constexpr bool is_modulo = true;
|
static constexpr bool is_modulo = true;
|
||||||
static constexpr int digits = Bits - (is_same<Signed, signed>::value ? 1 : 0);
|
static constexpr int digits = Bits - (is_same_v<Signed, signed> ? 1 : 0);
|
||||||
static constexpr int digits10 = digits * 0.30103 /*std::log10(2)*/;
|
static constexpr int digits10 = digits * 0.30103 /*std::log10(2)*/;
|
||||||
static constexpr int max_digits10 = 0;
|
static constexpr int max_digits10 = 0;
|
||||||
static constexpr int radix = 2;
|
static constexpr int radix = 2;
|
||||||
@ -104,7 +104,7 @@ public:
|
|||||||
|
|
||||||
static constexpr wide::integer<Bits, Signed> min() noexcept
|
static constexpr wide::integer<Bits, Signed> min() noexcept
|
||||||
{
|
{
|
||||||
if (is_same<Signed, signed>::value)
|
if constexpr (is_same_v<Signed, signed>)
|
||||||
{
|
{
|
||||||
using T = wide::integer<Bits, signed>;
|
using T = wide::integer<Bits, signed>;
|
||||||
T res{};
|
T res{};
|
||||||
@ -118,7 +118,7 @@ public:
|
|||||||
{
|
{
|
||||||
using T = wide::integer<Bits, Signed>;
|
using T = wide::integer<Bits, Signed>;
|
||||||
T res{};
|
T res{};
|
||||||
res.items[T::_impl::big(0)] = is_same<Signed, signed>::value
|
res.items[T::_impl::big(0)] = is_same_v<Signed, signed>
|
||||||
? std::numeric_limits<typename wide::integer<Bits, Signed>::signed_base_type>::max()
|
? std::numeric_limits<typename wide::integer<Bits, Signed>::signed_base_type>::max()
|
||||||
: std::numeric_limits<typename wide::integer<Bits, Signed>::base_type>::max();
|
: std::numeric_limits<typename wide::integer<Bits, Signed>::base_type>::max();
|
||||||
for (unsigned i = 1; i < wide::integer<Bits, Signed>::_impl::item_count; ++i)
|
for (unsigned i = 1; i < wide::integer<Bits, Signed>::_impl::item_count; ++i)
|
||||||
|
@ -5,9 +5,6 @@ if (GLIBC_COMPATIBILITY)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
enable_language(ASM)
|
enable_language(ASM)
|
||||||
include(CheckIncludeFile)
|
|
||||||
|
|
||||||
check_include_file("sys/random.h" HAVE_SYS_RANDOM_H)
|
|
||||||
|
|
||||||
add_headers_and_sources(glibc_compatibility .)
|
add_headers_and_sources(glibc_compatibility .)
|
||||||
add_headers_and_sources(glibc_compatibility musl)
|
add_headers_and_sources(glibc_compatibility musl)
|
||||||
@ -21,11 +18,6 @@ if (GLIBC_COMPATIBILITY)
|
|||||||
message (FATAL_ERROR "glibc_compatibility can only be used on x86_64 or aarch64.")
|
message (FATAL_ERROR "glibc_compatibility can only be used on x86_64 or aarch64.")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
list(REMOVE_ITEM glibc_compatibility_sources musl/getentropy.c)
|
|
||||||
if(HAVE_SYS_RANDOM_H)
|
|
||||||
list(APPEND glibc_compatibility_sources musl/getentropy.c)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Need to omit frame pointers to match the performance of glibc
|
# Need to omit frame pointers to match the performance of glibc
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer")
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#include "memcpy.h"
|
#include "memcpy.h"
|
||||||
|
|
||||||
|
__attribute__((no_sanitize("coverage")))
|
||||||
extern "C" void * memcpy(void * __restrict dst, const void * __restrict src, size_t size)
|
extern "C" void * memcpy(void * __restrict dst, const void * __restrict src, size_t size)
|
||||||
{
|
{
|
||||||
return inline_memcpy(dst, src, size);
|
return inline_memcpy(dst, src, size);
|
||||||
|
@ -93,7 +93,7 @@
|
|||||||
* See https://habr.com/en/company/yandex/blog/457612/
|
* See https://habr.com/en/company/yandex/blog/457612/
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
__attribute__((no_sanitize("coverage")))
|
||||||
static inline void * inline_memcpy(void * __restrict dst_, const void * __restrict src_, size_t size)
|
static inline void * inline_memcpy(void * __restrict dst_, const void * __restrict src_, size_t size)
|
||||||
{
|
{
|
||||||
/// We will use pointer arithmetic, so char pointer will be used.
|
/// We will use pointer arithmetic, so char pointer will be used.
|
||||||
|
@ -26,7 +26,6 @@ HTTPServerSession::HTTPServerSession(const StreamSocket& socket, HTTPServerParam
|
|||||||
_maxKeepAliveRequests(pParams->getMaxKeepAliveRequests())
|
_maxKeepAliveRequests(pParams->getMaxKeepAliveRequests())
|
||||||
{
|
{
|
||||||
setTimeout(pParams->getTimeout());
|
setTimeout(pParams->getTimeout());
|
||||||
this->socket().setReceiveTimeout(pParams->getTimeout());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -93,9 +93,34 @@ void HTTPSession::setTimeout(const Poco::Timespan& timeout)
|
|||||||
|
|
||||||
void HTTPSession::setTimeout(const Poco::Timespan& connectionTimeout, const Poco::Timespan& sendTimeout, const Poco::Timespan& receiveTimeout)
|
void HTTPSession::setTimeout(const Poco::Timespan& connectionTimeout, const Poco::Timespan& sendTimeout, const Poco::Timespan& receiveTimeout)
|
||||||
{
|
{
|
||||||
_connectionTimeout = connectionTimeout;
|
try
|
||||||
_sendTimeout = sendTimeout;
|
{
|
||||||
_receiveTimeout = receiveTimeout;
|
_connectionTimeout = connectionTimeout;
|
||||||
|
|
||||||
|
if (_sendTimeout.totalMicroseconds() != sendTimeout.totalMicroseconds()) {
|
||||||
|
_sendTimeout = sendTimeout;
|
||||||
|
|
||||||
|
if (connected())
|
||||||
|
_socket.setSendTimeout(_sendTimeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_receiveTimeout.totalMicroseconds() != receiveTimeout.totalMicroseconds()) {
|
||||||
|
_receiveTimeout = receiveTimeout;
|
||||||
|
|
||||||
|
if (connected())
|
||||||
|
_socket.setReceiveTimeout(_receiveTimeout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (NetException &)
|
||||||
|
{
|
||||||
|
#ifndef NDEBUG
|
||||||
|
throw;
|
||||||
|
#else
|
||||||
|
// mute exceptions in release
|
||||||
|
// just in case when changing settings on socket is not allowed
|
||||||
|
// however it should be OK for timeouts
|
||||||
|
#endif
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,19 +0,0 @@
|
|||||||
# Adding test output on failure
|
|
||||||
enable_testing ()
|
|
||||||
|
|
||||||
if (NOT TARGET check)
|
|
||||||
if (CMAKE_CONFIGURATION_TYPES)
|
|
||||||
add_custom_target (check COMMAND ${CMAKE_CTEST_COMMAND}
|
|
||||||
--force-new-ctest-process --output-on-failure --build-config "$<CONFIGURATION>"
|
|
||||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
|
|
||||||
else ()
|
|
||||||
add_custom_target (check COMMAND ${CMAKE_CTEST_COMMAND}
|
|
||||||
--force-new-ctest-process --output-on-failure
|
|
||||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
|
|
||||||
endif ()
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
macro (add_check target)
|
|
||||||
add_test (NAME test_${target} COMMAND ${target} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
|
|
||||||
add_dependencies (check ${target})
|
|
||||||
endmacro (add_check)
|
|
@ -1,10 +1,5 @@
|
|||||||
# https://software.intel.com/sites/landingpage/IntrinsicsGuide/
|
# https://software.intel.com/sites/landingpage/IntrinsicsGuide/
|
||||||
|
|
||||||
include (CheckCXXSourceCompiles)
|
|
||||||
include (CMakePushCheckState)
|
|
||||||
|
|
||||||
cmake_push_check_state ()
|
|
||||||
|
|
||||||
# The variables HAVE_* determine if compiler has support for the flag to use the corresponding instruction set.
|
# The variables HAVE_* determine if compiler has support for the flag to use the corresponding instruction set.
|
||||||
# The options ENABLE_* determine if we will tell compiler to actually use the corresponding instruction set if compiler can do it.
|
# The options ENABLE_* determine if we will tell compiler to actually use the corresponding instruction set if compiler can do it.
|
||||||
|
|
||||||
@ -137,189 +132,54 @@ elseif (ARCH_AMD64)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
# ClickHouse can be cross-compiled (e.g. on an ARM host for x86) but it is also possible to build ClickHouse on x86 w/o AVX for x86 w/
|
# ClickHouse can be cross-compiled (e.g. on an ARM host for x86) but it is also possible to build ClickHouse on x86 w/o AVX for x86 w/
|
||||||
# AVX. We only check that the compiler can emit certain SIMD instructions, we don't care if the host system is able to run the binary.
|
# AVX. We only assume that the compiler can emit certain SIMD instructions, we don't care if the host system is able to run the binary.
|
||||||
# Therefore, use check_cxx_source_compiles (= does the code compile+link?) instead of check_cxx_source_runs (= does the code
|
|
||||||
# compile+link+run).
|
|
||||||
|
|
||||||
set (TEST_FLAG "-mssse3")
|
if (ENABLE_SSSE3)
|
||||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mssse3")
|
||||||
check_cxx_source_compiles("
|
|
||||||
#include <tmmintrin.h>
|
|
||||||
int main() {
|
|
||||||
__m64 a = _mm_abs_pi8(__m64());
|
|
||||||
(void)a;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
" HAVE_SSSE3)
|
|
||||||
if (HAVE_SSSE3 AND ENABLE_SSSE3)
|
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set (TEST_FLAG "-msse4.1")
|
if (ENABLE_SSE41)
|
||||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -msse4.1")
|
||||||
check_cxx_source_compiles("
|
|
||||||
#include <smmintrin.h>
|
|
||||||
int main() {
|
|
||||||
auto a = _mm_insert_epi8(__m128i(), 0, 0);
|
|
||||||
(void)a;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
" HAVE_SSE41)
|
|
||||||
if (HAVE_SSE41 AND ENABLE_SSE41)
|
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set (TEST_FLAG "-msse4.2")
|
if (ENABLE_SSE42)
|
||||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -msse4.2")
|
||||||
check_cxx_source_compiles("
|
|
||||||
#include <nmmintrin.h>
|
|
||||||
int main() {
|
|
||||||
auto a = _mm_crc32_u64(0, 0);
|
|
||||||
(void)a;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
" HAVE_SSE42)
|
|
||||||
if (HAVE_SSE42 AND ENABLE_SSE42)
|
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set (TEST_FLAG "-mpclmul")
|
if (ENABLE_PCLMULQDQ)
|
||||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mpclmul")
|
||||||
check_cxx_source_compiles("
|
|
||||||
#include <wmmintrin.h>
|
|
||||||
int main() {
|
|
||||||
auto a = _mm_clmulepi64_si128(__m128i(), __m128i(), 0);
|
|
||||||
(void)a;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
" HAVE_PCLMULQDQ)
|
|
||||||
if (HAVE_PCLMULQDQ AND ENABLE_PCLMULQDQ)
|
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set (TEST_FLAG "-mpopcnt")
|
if (ENABLE_BMI)
|
||||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mbmi")
|
||||||
check_cxx_source_compiles("
|
|
||||||
int main() {
|
|
||||||
auto a = __builtin_popcountll(0);
|
|
||||||
(void)a;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
" HAVE_POPCNT)
|
|
||||||
if (HAVE_POPCNT AND ENABLE_POPCNT)
|
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set (TEST_FLAG "-mavx")
|
if (ENABLE_POPCNT)
|
||||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mpopcnt")
|
||||||
check_cxx_source_compiles("
|
|
||||||
#include <immintrin.h>
|
|
||||||
int main() {
|
|
||||||
auto a = _mm256_insert_epi8(__m256i(), 0, 0);
|
|
||||||
(void)a;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
" HAVE_AVX)
|
|
||||||
if (HAVE_AVX AND ENABLE_AVX)
|
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set (TEST_FLAG "-mavx2")
|
if (ENABLE_AVX)
|
||||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx")
|
||||||
check_cxx_source_compiles("
|
|
||||||
#include <immintrin.h>
|
|
||||||
int main() {
|
|
||||||
auto a = _mm256_add_epi16(__m256i(), __m256i());
|
|
||||||
(void)a;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
" HAVE_AVX2)
|
|
||||||
if (HAVE_AVX2 AND ENABLE_AVX2)
|
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
set (TEST_FLAG "-mavx512f -mavx512bw -mavx512vl")
|
if (ENABLE_AVX2)
|
||||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx2")
|
||||||
check_cxx_source_compiles("
|
if (ENABLE_BMI2)
|
||||||
#include <immintrin.h>
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mbmi2")
|
||||||
int main() {
|
|
||||||
auto a = _mm512_setzero_epi32();
|
|
||||||
(void)a;
|
|
||||||
auto b = _mm512_add_epi16(__m512i(), __m512i());
|
|
||||||
(void)b;
|
|
||||||
auto c = _mm_cmp_epi8_mask(__m128i(), __m128i(), 0);
|
|
||||||
(void)c;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
" HAVE_AVX512)
|
|
||||||
if (HAVE_AVX512 AND ENABLE_AVX512)
|
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
set (TEST_FLAG "-mavx512vbmi")
|
|
||||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
|
||||||
check_cxx_source_compiles("
|
|
||||||
#include <immintrin.h>
|
|
||||||
int main() {
|
|
||||||
auto a = _mm512_permutexvar_epi8(__m512i(), __m512i());
|
|
||||||
(void)a;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
" HAVE_AVX512_VBMI)
|
|
||||||
if (HAVE_AVX512 AND ENABLE_AVX512 AND HAVE_AVX512_VBMI AND ENABLE_AVX512_VBMI)
|
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
set (TEST_FLAG "-mbmi")
|
|
||||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
|
||||||
check_cxx_source_compiles("
|
|
||||||
#include <immintrin.h>
|
|
||||||
int main() {
|
|
||||||
auto a = _blsr_u32(0);
|
|
||||||
(void)a;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
" HAVE_BMI)
|
|
||||||
if (HAVE_BMI AND ENABLE_BMI)
|
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
set (TEST_FLAG "-mbmi2")
|
|
||||||
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
|
||||||
check_cxx_source_compiles("
|
|
||||||
#include <immintrin.h>
|
|
||||||
int main() {
|
|
||||||
auto a = _pdep_u64(0, 0);
|
|
||||||
(void)a;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
" HAVE_BMI2)
|
|
||||||
if (HAVE_BMI2 AND HAVE_AVX2 AND ENABLE_AVX2 AND ENABLE_BMI2)
|
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
# Limit avx2/avx512 flag for specific source build
|
|
||||||
set (X86_INTRINSICS_FLAGS "")
|
|
||||||
if (ENABLE_AVX2_FOR_SPEC_OP)
|
|
||||||
if (HAVE_BMI)
|
|
||||||
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mbmi")
|
|
||||||
endif ()
|
endif ()
|
||||||
if (HAVE_AVX AND HAVE_AVX2)
|
endif ()
|
||||||
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mavx -mavx2")
|
|
||||||
|
if (ENABLE_AVX512)
|
||||||
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx512f -mavx512bw -mavx512vl")
|
||||||
|
if (ENABLE_AVX512_VBMI)
|
||||||
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -mavx512vbmi")
|
||||||
endif ()
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (ENABLE_AVX512_FOR_SPEC_OP)
|
if (ENABLE_AVX512_FOR_SPEC_OP)
|
||||||
set (X86_INTRINSICS_FLAGS "")
|
set (X86_INTRINSICS_FLAGS "-mbmi -mavx512f -mavx512bw -mavx512vl -mprefer-vector-width=256")
|
||||||
if (HAVE_BMI)
|
|
||||||
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mbmi")
|
|
||||||
endif ()
|
|
||||||
if (HAVE_AVX512)
|
|
||||||
set (X86_INTRINSICS_FLAGS "${X86_INTRINSICS_FLAGS} -mavx512f -mavx512bw -mavx512vl -mprefer-vector-width=256")
|
|
||||||
endif ()
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
else ()
|
else ()
|
||||||
# RISC-V + exotic platforms
|
# RISC-V + exotic platforms
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
cmake_pop_check_state ()
|
|
||||||
|
@ -9,9 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "aarch64-apple-darwin")
|
|||||||
set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-aarch64")
|
set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-aarch64")
|
||||||
|
|
||||||
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
||||||
|
|
||||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
|
||||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
@ -9,9 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "x86_64-apple-darwin")
|
|||||||
set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-x86_64")
|
set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-x86_64")
|
||||||
|
|
||||||
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
||||||
|
|
||||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
|
||||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
@ -9,13 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "aarch64-unknown-freebsd12")
|
|||||||
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-aarch64")
|
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-aarch64")
|
||||||
|
|
||||||
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
||||||
|
|
||||||
# Will be changed later, but somehow needed to be set here.
|
|
||||||
set (CMAKE_AR "ar")
|
|
||||||
set (CMAKE_RANLIB "ranlib")
|
|
||||||
|
|
||||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
|
||||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
@ -9,13 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "powerpc64le-unknown-freebsd13")
|
|||||||
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-ppc64le")
|
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-ppc64le")
|
||||||
|
|
||||||
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
||||||
|
|
||||||
# Will be changed later, but somehow needed to be set here.
|
|
||||||
set (CMAKE_AR "ar")
|
|
||||||
set (CMAKE_RANLIB "ranlib")
|
|
||||||
|
|
||||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
|
||||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
@ -9,13 +9,3 @@ set (CMAKE_ASM_COMPILER_TARGET "x86_64-pc-freebsd11")
|
|||||||
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-x86_64")
|
set (CMAKE_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/freebsd-x86_64")
|
||||||
|
|
||||||
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
|
||||||
|
|
||||||
# Will be changed later, but somehow needed to be set here.
|
|
||||||
set (CMAKE_AR "ar")
|
|
||||||
set (CMAKE_RANLIB "ranlib")
|
|
||||||
|
|
||||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
|
||||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "aarch64-linux-gnu")
|
|||||||
set (CMAKE_CXX_COMPILER_TARGET "aarch64-linux-gnu")
|
set (CMAKE_CXX_COMPILER_TARGET "aarch64-linux-gnu")
|
||||||
set (CMAKE_ASM_COMPILER_TARGET "aarch64-linux-gnu")
|
set (CMAKE_ASM_COMPILER_TARGET "aarch64-linux-gnu")
|
||||||
|
|
||||||
# Will be changed later, but somehow needed to be set here.
|
|
||||||
set (CMAKE_AR "ar")
|
|
||||||
set (CMAKE_RANLIB "ranlib")
|
|
||||||
|
|
||||||
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-aarch64")
|
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-aarch64")
|
||||||
|
|
||||||
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/aarch64-linux-gnu/libc")
|
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/aarch64-linux-gnu/libc")
|
||||||
@ -20,9 +16,3 @@ set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/aarch64-linux-gnu/libc")
|
|||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||||
|
|
||||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
|
||||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "powerpc64le-linux-gnu")
|
|||||||
set (CMAKE_CXX_COMPILER_TARGET "powerpc64le-linux-gnu")
|
set (CMAKE_CXX_COMPILER_TARGET "powerpc64le-linux-gnu")
|
||||||
set (CMAKE_ASM_COMPILER_TARGET "powerpc64le-linux-gnu")
|
set (CMAKE_ASM_COMPILER_TARGET "powerpc64le-linux-gnu")
|
||||||
|
|
||||||
# Will be changed later, but somehow needed to be set here.
|
|
||||||
set (CMAKE_AR "ar")
|
|
||||||
set (CMAKE_RANLIB "ranlib")
|
|
||||||
|
|
||||||
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-powerpc64le")
|
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-powerpc64le")
|
||||||
|
|
||||||
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/powerpc64le-linux-gnu/libc")
|
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/powerpc64le-linux-gnu/libc")
|
||||||
@ -20,9 +16,3 @@ set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/powerpc64le-linux-gnu/libc")
|
|||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||||
|
|
||||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
|
||||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "riscv64-linux-gnu")
|
|||||||
set (CMAKE_CXX_COMPILER_TARGET "riscv64-linux-gnu")
|
set (CMAKE_CXX_COMPILER_TARGET "riscv64-linux-gnu")
|
||||||
set (CMAKE_ASM_COMPILER_TARGET "riscv64-linux-gnu")
|
set (CMAKE_ASM_COMPILER_TARGET "riscv64-linux-gnu")
|
||||||
|
|
||||||
# Will be changed later, but somehow needed to be set here.
|
|
||||||
set (CMAKE_AR "ar")
|
|
||||||
set (CMAKE_RANLIB "ranlib")
|
|
||||||
|
|
||||||
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-riscv64")
|
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-riscv64")
|
||||||
|
|
||||||
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}")
|
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}")
|
||||||
@ -27,9 +23,3 @@ set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=bfd")
|
|||||||
# ld.lld: error: section size decrease is too large
|
# ld.lld: error: section size decrease is too large
|
||||||
# But GNU BinUtils work.
|
# But GNU BinUtils work.
|
||||||
set (LINKER_NAME "riscv64-linux-gnu-ld.bfd" CACHE STRING "Linker name" FORCE)
|
set (LINKER_NAME "riscv64-linux-gnu-ld.bfd" CACHE STRING "Linker name" FORCE)
|
||||||
|
|
||||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
|
||||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "s390x-linux-gnu")
|
|||||||
set (CMAKE_CXX_COMPILER_TARGET "s390x-linux-gnu")
|
set (CMAKE_CXX_COMPILER_TARGET "s390x-linux-gnu")
|
||||||
set (CMAKE_ASM_COMPILER_TARGET "s390x-linux-gnu")
|
set (CMAKE_ASM_COMPILER_TARGET "s390x-linux-gnu")
|
||||||
|
|
||||||
# Will be changed later, but somehow needed to be set here.
|
|
||||||
set (CMAKE_AR "ar")
|
|
||||||
set (CMAKE_RANLIB "ranlib")
|
|
||||||
|
|
||||||
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-s390x")
|
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-s390x")
|
||||||
|
|
||||||
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/s390x-linux-gnu/libc")
|
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/s390x-linux-gnu/libc")
|
||||||
@ -23,9 +19,3 @@ set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
|||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
|
||||||
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
|
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
|
||||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
|
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
|
||||||
|
|
||||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
|
||||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
@ -9,10 +9,6 @@ set (CMAKE_C_COMPILER_TARGET "x86_64-linux-musl")
|
|||||||
set (CMAKE_CXX_COMPILER_TARGET "x86_64-linux-musl")
|
set (CMAKE_CXX_COMPILER_TARGET "x86_64-linux-musl")
|
||||||
set (CMAKE_ASM_COMPILER_TARGET "x86_64-linux-musl")
|
set (CMAKE_ASM_COMPILER_TARGET "x86_64-linux-musl")
|
||||||
|
|
||||||
# Will be changed later, but somehow needed to be set here.
|
|
||||||
set (CMAKE_AR "ar")
|
|
||||||
set (CMAKE_RANLIB "ranlib")
|
|
||||||
|
|
||||||
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_64-musl")
|
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_64-musl")
|
||||||
|
|
||||||
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}")
|
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}")
|
||||||
@ -21,11 +17,5 @@ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
|||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||||
|
|
||||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
|
||||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
|
||||||
set (USE_MUSL 1)
|
set (USE_MUSL 1)
|
||||||
add_definitions(-DUSE_MUSL=1)
|
add_definitions(-DUSE_MUSL=1)
|
||||||
|
@ -19,10 +19,6 @@ set (CMAKE_C_COMPILER_TARGET "x86_64-linux-gnu")
|
|||||||
set (CMAKE_CXX_COMPILER_TARGET "x86_64-linux-gnu")
|
set (CMAKE_CXX_COMPILER_TARGET "x86_64-linux-gnu")
|
||||||
set (CMAKE_ASM_COMPILER_TARGET "x86_64-linux-gnu")
|
set (CMAKE_ASM_COMPILER_TARGET "x86_64-linux-gnu")
|
||||||
|
|
||||||
# Will be changed later, but somehow needed to be set here.
|
|
||||||
set (CMAKE_AR "ar")
|
|
||||||
set (CMAKE_RANLIB "ranlib")
|
|
||||||
|
|
||||||
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_64")
|
set (TOOLCHAIN_PATH "${CMAKE_CURRENT_LIST_DIR}/../../contrib/sysroot/linux-x86_64")
|
||||||
|
|
||||||
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/x86_64-linux-gnu/libc")
|
set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/x86_64-linux-gnu/libc")
|
||||||
@ -32,9 +28,3 @@ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
|||||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||||
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||||
|
|
||||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
|
||||||
set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
|
||||||
set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
|
||||||
|
@ -58,3 +58,27 @@ if (SANITIZE)
|
|||||||
message (FATAL_ERROR "Unknown sanitizer type: ${SANITIZE}")
|
message (FATAL_ERROR "Unknown sanitizer type: ${SANITIZE}")
|
||||||
endif ()
|
endif ()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# Default coverage instrumentation (dumping the coverage map on exit)
|
||||||
|
option(WITH_COVERAGE "Instrumentation for code coverage with default implementation" OFF)
|
||||||
|
|
||||||
|
if (WITH_COVERAGE)
|
||||||
|
message (INFORMATION "Enabled instrumentation for code coverage")
|
||||||
|
set(COVERAGE_FLAGS "-fprofile-instr-generate -fcoverage-mapping")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
|
||||||
|
|
||||||
|
if (SANITIZE_COVERAGE)
|
||||||
|
message (INFORMATION "Enabled instrumentation for code coverage")
|
||||||
|
|
||||||
|
# We set this define for whole build to indicate that at least some parts are compiled with coverage.
|
||||||
|
# And to expose it in system.build_options.
|
||||||
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DSANITIZE_COVERAGE=1")
|
||||||
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DSANITIZE_COVERAGE=1")
|
||||||
|
|
||||||
|
# But the actual coverage will be enabled on per-library basis: for ClickHouse code, but not for 3rd-party.
|
||||||
|
set (COVERAGE_FLAGS "-fsanitize-coverage=trace-pc-guard,pc-table")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set (WITHOUT_COVERAGE_FLAGS "-fno-profile-instr-generate -fno-coverage-mapping -fno-sanitize-coverage=trace-pc-guard,pc-table")
|
||||||
|
2
contrib/AMQP-CPP
vendored
2
contrib/AMQP-CPP
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 818c2d8ad96a08a5d20fece7d1e1e8855a2b0860
|
Subproject commit 00f09897ce020a84e38f87dc416af4a19c5da9ae
|
13
contrib/CMakeLists.txt
vendored
13
contrib/CMakeLists.txt
vendored
@ -1,16 +1,7 @@
|
|||||||
#"${folder}/CMakeLists.txt" Third-party libraries may have substandard code.
|
#"${folder}/CMakeLists.txt" Third-party libraries may have substandard code.
|
||||||
|
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w -ffunction-sections -fdata-sections")
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -ffunction-sections -fdata-sections")
|
||||||
|
|
||||||
if (WITH_COVERAGE)
|
|
||||||
set (WITHOUT_COVERAGE_LIST ${WITHOUT_COVERAGE})
|
|
||||||
separate_arguments(WITHOUT_COVERAGE_LIST)
|
|
||||||
# disable coverage for contib files and build with optimisations
|
|
||||||
if (COMPILER_CLANG)
|
|
||||||
add_compile_options(-O3 -DNDEBUG -finline-functions -finline-hint-functions ${WITHOUT_COVERAGE_LIST})
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (SANITIZE STREQUAL "undefined")
|
if (SANITIZE STREQUAL "undefined")
|
||||||
# 3rd-party libraries usually not intended to work with UBSan.
|
# 3rd-party libraries usually not intended to work with UBSan.
|
||||||
|
2
contrib/abseil-cpp
vendored
2
contrib/abseil-cpp
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 5655528c41830f733160de4fb0b99073841bae9e
|
Subproject commit 3bd86026c93da5a40006fd53403dff9d5f5e30e3
|
File diff suppressed because it is too large
Load Diff
@ -77,16 +77,16 @@ set(FLATBUFFERS_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/flatbuffers")
|
|||||||
set(FLATBUFFERS_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/flatbuffers")
|
set(FLATBUFFERS_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/flatbuffers")
|
||||||
set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_SRC_DIR}/include")
|
set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_SRC_DIR}/include")
|
||||||
|
|
||||||
# set flatbuffers CMake options
|
set(FLATBUFFERS_SRCS
|
||||||
set(FLATBUFFERS_BUILD_FLATLIB ON CACHE BOOL "Enable the build of the flatbuffers library")
|
${FLATBUFFERS_SRC_DIR}/src/idl_parser.cpp
|
||||||
set(FLATBUFFERS_BUILD_SHAREDLIB OFF CACHE BOOL "Disable the build of the flatbuffers shared library")
|
${FLATBUFFERS_SRC_DIR}/src/idl_gen_text.cpp
|
||||||
set(FLATBUFFERS_BUILD_TESTS OFF CACHE BOOL "Skip flatbuffers tests")
|
${FLATBUFFERS_SRC_DIR}/src/reflection.cpp
|
||||||
|
${FLATBUFFERS_SRC_DIR}/src/util.cpp)
|
||||||
|
|
||||||
add_subdirectory(${FLATBUFFERS_SRC_DIR} "${FLATBUFFERS_BINARY_DIR}")
|
add_library(_flatbuffers STATIC ${FLATBUFFERS_SRCS})
|
||||||
|
target_include_directories(_flatbuffers PUBLIC ${FLATBUFFERS_INCLUDE_DIR})
|
||||||
|
target_compile_definitions(_flatbuffers PRIVATE -DFLATBUFFERS_LOCALE_INDEPENDENT=0)
|
||||||
|
|
||||||
add_library(_flatbuffers INTERFACE)
|
|
||||||
target_link_libraries(_flatbuffers INTERFACE flatbuffers)
|
|
||||||
target_include_directories(_flatbuffers INTERFACE ${FLATBUFFERS_INCLUDE_DIR})
|
|
||||||
|
|
||||||
# === hdfs
|
# === hdfs
|
||||||
# NOTE: cannot use ch_contrib::hdfs since it's INCLUDE_DIRECTORIES does not includes trailing "hdfs/"
|
# NOTE: cannot use ch_contrib::hdfs since it's INCLUDE_DIRECTORIES does not includes trailing "hdfs/"
|
||||||
@ -127,7 +127,6 @@ set(ORC_SRCS
|
|||||||
"${ORC_SOURCE_SRC_DIR}/BpackingDefault.hh"
|
"${ORC_SOURCE_SRC_DIR}/BpackingDefault.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/ByteRLE.cc"
|
"${ORC_SOURCE_SRC_DIR}/ByteRLE.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/ByteRLE.hh"
|
"${ORC_SOURCE_SRC_DIR}/ByteRLE.hh"
|
||||||
"${ORC_SOURCE_SRC_DIR}/CMakeLists.txt"
|
|
||||||
"${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc"
|
"${ORC_SOURCE_SRC_DIR}/ColumnPrinter.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/ColumnReader.cc"
|
"${ORC_SOURCE_SRC_DIR}/ColumnReader.cc"
|
||||||
"${ORC_SOURCE_SRC_DIR}/ColumnReader.hh"
|
"${ORC_SOURCE_SRC_DIR}/ColumnReader.hh"
|
||||||
|
@ -1,114 +1,13 @@
|
|||||||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
# SPDX-License-Identifier: Apache-2.0.
|
# SPDX-License-Identifier: Apache-2.0.
|
||||||
|
|
||||||
include(CheckCSourceRuns)
|
|
||||||
|
|
||||||
option(USE_CPU_EXTENSIONS "Whenever possible, use functions optimized for CPUs with specific extensions (ex: SSE, AVX)." ON)
|
option(USE_CPU_EXTENSIONS "Whenever possible, use functions optimized for CPUs with specific extensions (ex: SSE, AVX)." ON)
|
||||||
|
|
||||||
# In the current (11/2/21) state of mingw64, the packaged gcc is not capable of emitting properly aligned avx2 instructions under certain circumstances.
|
if (ARCH_AMD64)
|
||||||
# This leads to crashes for windows builds using mingw64 when invoking the avx2-enabled versions of certain functions. Until we can find a better
|
set (AWS_ARCH_INTEL 1)
|
||||||
# work-around, disable avx2 (and all other extensions) in mingw builds.
|
elseif (ARCH_AARCH64)
|
||||||
#
|
set (AWS_ARCH_ARM64 1)
|
||||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412
|
endif ()
|
||||||
#
|
|
||||||
if (MINGW)
|
|
||||||
message(STATUS "MINGW detected! Disabling avx2 and other CPU extensions")
|
|
||||||
set(USE_CPU_EXTENSIONS OFF)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(NOT CMAKE_CROSSCOMPILING)
|
set (AWS_HAVE_GCC_INLINE_ASM 1)
|
||||||
check_c_source_runs("
|
set (AWS_HAVE_AUXV 1)
|
||||||
#include <stdbool.h>
|
|
||||||
bool foo(int a, int b, int *c) {
|
|
||||||
return __builtin_mul_overflow(a, b, c);
|
|
||||||
}
|
|
||||||
|
|
||||||
int main() {
|
|
||||||
int out;
|
|
||||||
if (foo(1, 2, &out)) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}" AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS)
|
|
||||||
|
|
||||||
if (USE_CPU_EXTENSIONS)
|
|
||||||
check_c_source_runs("
|
|
||||||
int main() {
|
|
||||||
int foo = 42;
|
|
||||||
_mulx_u32(1, 2, &foo);
|
|
||||||
return foo != 2;
|
|
||||||
}" AWS_HAVE_MSVC_MULX)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
endif()
|
|
||||||
|
|
||||||
check_c_source_compiles("
|
|
||||||
#include <Windows.h>
|
|
||||||
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
|
|
||||||
int main() {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
it's not windows desktop
|
|
||||||
#endif
|
|
||||||
" AWS_HAVE_WINAPI_DESKTOP)
|
|
||||||
|
|
||||||
check_c_source_compiles("
|
|
||||||
int main() {
|
|
||||||
#if !(defined(__x86_64__) || defined(__i386__) || defined(_M_X64) || defined(_M_IX86))
|
|
||||||
# error \"not intel\"
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
" AWS_ARCH_INTEL)
|
|
||||||
|
|
||||||
check_c_source_compiles("
|
|
||||||
int main() {
|
|
||||||
#if !(defined(__aarch64__) || defined(_M_ARM64))
|
|
||||||
# error \"not arm64\"
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
" AWS_ARCH_ARM64)
|
|
||||||
|
|
||||||
check_c_source_compiles("
|
|
||||||
int main() {
|
|
||||||
#if !(defined(__arm__) || defined(_M_ARM))
|
|
||||||
# error \"not arm\"
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
" AWS_ARCH_ARM32)
|
|
||||||
|
|
||||||
check_c_source_compiles("
|
|
||||||
int main() {
|
|
||||||
int foo = 42, bar = 24;
|
|
||||||
__asm__ __volatile__(\"\":\"=r\"(foo):\"r\"(bar):\"memory\");
|
|
||||||
}" AWS_HAVE_GCC_INLINE_ASM)
|
|
||||||
|
|
||||||
check_c_source_compiles("
|
|
||||||
#include <sys/auxv.h>
|
|
||||||
int main() {
|
|
||||||
#ifdef __linux__
|
|
||||||
getauxval(AT_HWCAP);
|
|
||||||
getauxval(AT_HWCAP2);
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}" AWS_HAVE_AUXV)
|
|
||||||
|
|
||||||
string(REGEX MATCH "^(aarch64|arm)" ARM_CPU "${CMAKE_SYSTEM_PROCESSOR}")
|
|
||||||
if(NOT LEGACY_COMPILER_SUPPORT OR ARM_CPU)
|
|
||||||
check_c_source_compiles("
|
|
||||||
#include <execinfo.h>
|
|
||||||
int main() {
|
|
||||||
backtrace(NULL, 0);
|
|
||||||
return 0;
|
|
||||||
}" AWS_HAVE_EXECINFO)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
check_c_source_compiles("
|
|
||||||
#include <linux/if_link.h>
|
|
||||||
int main() {
|
|
||||||
return 1;
|
|
||||||
}" AWS_HAVE_LINUX_IF_LINK_H)
|
|
||||||
|
@ -1,54 +1,13 @@
|
|||||||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
# SPDX-License-Identifier: Apache-2.0.
|
# SPDX-License-Identifier: Apache-2.0.
|
||||||
|
|
||||||
include(CheckCCompilerFlag)
|
|
||||||
include(CheckIncludeFile)
|
|
||||||
|
|
||||||
if (USE_CPU_EXTENSIONS)
|
if (USE_CPU_EXTENSIONS)
|
||||||
if (MSVC)
|
if (ENABLE_AVX2)
|
||||||
check_c_compiler_flag("/arch:AVX2" HAVE_M_AVX2_FLAG)
|
set (AVX2_CFLAGS "-mavx -mavx2")
|
||||||
if (HAVE_M_AVX2_FLAG)
|
set (HAVE_AVX2_INTRINSICS 1)
|
||||||
set(AVX2_CFLAGS "/arch:AVX2")
|
set (HAVE_MM256_EXTRACT_EPI64 1)
|
||||||
endif()
|
|
||||||
else()
|
|
||||||
check_c_compiler_flag(-mavx2 HAVE_M_AVX2_FLAG)
|
|
||||||
if (HAVE_M_AVX2_FLAG)
|
|
||||||
set(AVX2_CFLAGS "-mavx -mavx2")
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
cmake_push_check_state()
|
|
||||||
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${AVX2_CFLAGS}")
|
|
||||||
|
|
||||||
check_c_source_compiles("
|
|
||||||
#include <immintrin.h>
|
|
||||||
#include <emmintrin.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
int main() {
|
|
||||||
__m256i vec;
|
|
||||||
memset(&vec, 0, sizeof(vec));
|
|
||||||
|
|
||||||
_mm256_shuffle_epi8(vec, vec);
|
|
||||||
_mm256_set_epi32(1,2,3,4,5,6,7,8);
|
|
||||||
_mm256_permutevar8x32_epi32(vec, vec);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}" HAVE_AVX2_INTRINSICS)
|
|
||||||
|
|
||||||
check_c_source_compiles("
|
|
||||||
#include <immintrin.h>
|
|
||||||
#include <string.h>
|
|
||||||
|
|
||||||
int main() {
|
|
||||||
__m256i vec;
|
|
||||||
memset(&vec, 0, sizeof(vec));
|
|
||||||
return (int)_mm256_extract_epi64(vec, 2);
|
|
||||||
}" HAVE_MM256_EXTRACT_EPI64)
|
|
||||||
|
|
||||||
cmake_pop_check_state()
|
|
||||||
endif() # USE_CPU_EXTENSIONS
|
|
||||||
|
|
||||||
macro(simd_add_definition_if target definition)
|
macro(simd_add_definition_if target definition)
|
||||||
if(${definition})
|
if(${definition})
|
||||||
|
@ -1,50 +1,9 @@
|
|||||||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
# SPDX-License-Identifier: Apache-2.0.
|
# SPDX-License-Identifier: Apache-2.0.
|
||||||
|
|
||||||
include(CheckSymbolExists)
|
|
||||||
|
|
||||||
# Check if the platform supports setting thread affinity
|
# Check if the platform supports setting thread affinity
|
||||||
# (important for hitting full NIC entitlement on NUMA architectures)
|
# (important for hitting full NIC entitlement on NUMA architectures)
|
||||||
function(aws_set_thread_affinity_method target)
|
function(aws_set_thread_affinity_method target)
|
||||||
|
# This code has been cut, because I don't care about it.
|
||||||
# Non-POSIX, Android, and Apple platforms do not support thread affinity.
|
target_compile_definitions(${target} PRIVATE -DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
|
||||||
if (NOT UNIX OR ANDROID OR APPLE)
|
|
||||||
target_compile_definitions(${target} PRIVATE
|
|
||||||
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
|
|
||||||
return()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
cmake_push_check_state()
|
|
||||||
list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
|
|
||||||
list(APPEND CMAKE_REQUIRED_LIBRARIES pthread)
|
|
||||||
|
|
||||||
set(headers "pthread.h")
|
|
||||||
# BSDs put nonportable pthread declarations in a separate header.
|
|
||||||
if(CMAKE_SYSTEM_NAME MATCHES BSD)
|
|
||||||
set(headers "${headers};pthread_np.h")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Using pthread attrs is the preferred method, but is glibc-specific.
|
|
||||||
check_symbol_exists(pthread_attr_setaffinity_np "${headers}" USE_PTHREAD_ATTR_SETAFFINITY)
|
|
||||||
if (USE_PTHREAD_ATTR_SETAFFINITY)
|
|
||||||
target_compile_definitions(${target} PRIVATE
|
|
||||||
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD_ATTR)
|
|
||||||
return()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# This method is still nonportable, but is supported by musl and BSDs.
|
|
||||||
check_symbol_exists(pthread_setaffinity_np "${headers}" USE_PTHREAD_SETAFFINITY)
|
|
||||||
if (USE_PTHREAD_SETAFFINITY)
|
|
||||||
target_compile_definitions(${target} PRIVATE
|
|
||||||
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD)
|
|
||||||
return()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# If we got here, we expected thread affinity support but didn't find it.
|
|
||||||
# We still build with degraded NUMA performance, but show a warning.
|
|
||||||
message(WARNING "No supported method for setting thread affinity")
|
|
||||||
target_compile_definitions(${target} PRIVATE
|
|
||||||
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
|
|
||||||
|
|
||||||
cmake_pop_check_state()
|
|
||||||
endfunction()
|
endfunction()
|
||||||
|
@ -1,61 +1,13 @@
|
|||||||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||||
# SPDX-License-Identifier: Apache-2.0.
|
# SPDX-License-Identifier: Apache-2.0.
|
||||||
|
|
||||||
include(CheckSymbolExists)
|
|
||||||
|
|
||||||
# Check how the platform supports setting thread name
|
# Check how the platform supports setting thread name
|
||||||
function(aws_set_thread_name_method target)
|
function(aws_set_thread_name_method target)
|
||||||
|
if (APPLE)
|
||||||
if (WINDOWS)
|
|
||||||
# On Windows we do a runtime check, instead of compile-time check
|
|
||||||
return()
|
|
||||||
elseif (APPLE)
|
|
||||||
# All Apple platforms we support have the same function, so no need for compile-time check.
|
# All Apple platforms we support have the same function, so no need for compile-time check.
|
||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
cmake_push_check_state()
|
|
||||||
list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
|
|
||||||
list(APPEND CMAKE_REQUIRED_LIBRARIES pthread)
|
|
||||||
|
|
||||||
# The start of the test program
|
|
||||||
set(c_source_start "
|
|
||||||
#define _GNU_SOURCE
|
|
||||||
#include <pthread.h>
|
|
||||||
|
|
||||||
#if defined(__FreeBSD__) || defined(__NETBSD__)
|
|
||||||
#include <pthread_np.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int main() {
|
|
||||||
pthread_t thread_id;
|
|
||||||
")
|
|
||||||
|
|
||||||
# The end of the test program
|
|
||||||
set(c_source_end "}")
|
|
||||||
|
|
||||||
# pthread_setname_np() usually takes 2 args
|
# pthread_setname_np() usually takes 2 args
|
||||||
check_c_source_compiles("
|
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_2ARGS)
|
||||||
${c_source_start}
|
|
||||||
pthread_setname_np(thread_id, \"asdf\");
|
|
||||||
${c_source_end}"
|
|
||||||
PTHREAD_SETNAME_TAKES_2ARGS)
|
|
||||||
if (PTHREAD_SETNAME_TAKES_2ARGS)
|
|
||||||
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_2ARGS)
|
|
||||||
return()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# But on NetBSD it takes 3!
|
|
||||||
check_c_source_compiles("
|
|
||||||
${c_source_start}
|
|
||||||
pthread_setname_np(thread_id, \"asdf\", NULL);
|
|
||||||
${c_source_end}
|
|
||||||
" PTHREAD_SETNAME_TAKES_3ARGS)
|
|
||||||
if (PTHREAD_SETNAME_TAKES_3ARGS)
|
|
||||||
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_3ARGS)
|
|
||||||
return()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# And on many older/weirder platforms it's just not supported
|
|
||||||
cmake_pop_check_state()
|
|
||||||
endfunction()
|
endfunction()
|
||||||
|
@ -48,9 +48,8 @@ set(AZURE_SDK_INCLUDES
|
|||||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/inc/"
|
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/inc/"
|
||||||
)
|
)
|
||||||
|
|
||||||
include("${AZURE_DIR}/cmake-modules/AzureTransportAdapters.cmake")
|
|
||||||
|
|
||||||
add_library(_azure_sdk ${AZURE_SDK_UNIFIED_SRC})
|
add_library(_azure_sdk ${AZURE_SDK_UNIFIED_SRC})
|
||||||
|
target_compile_definitions(_azure_sdk PRIVATE BUILD_CURL_HTTP_TRANSPORT_ADAPTER)
|
||||||
|
|
||||||
# Originally, on Windows azure-core is built with bcrypt and crypt32 by default
|
# Originally, on Windows azure-core is built with bcrypt and crypt32 by default
|
||||||
if (TARGET OpenSSL::SSL)
|
if (TARGET OpenSSL::SSL)
|
||||||
|
@ -68,8 +68,7 @@ list(APPEND INCLUDE_DIRS
|
|||||||
${CASS_SRC_DIR}/third_party/hdr_histogram
|
${CASS_SRC_DIR}/third_party/hdr_histogram
|
||||||
${CASS_SRC_DIR}/third_party/http-parser
|
${CASS_SRC_DIR}/third_party/http-parser
|
||||||
${CASS_SRC_DIR}/third_party/mt19937_64
|
${CASS_SRC_DIR}/third_party/mt19937_64
|
||||||
${CASS_SRC_DIR}/third_party/rapidjson/rapidjson
|
${CASS_SRC_DIR}/third_party/rapidjson/rapidjson)
|
||||||
${CASS_SRC_DIR}/third_party/sparsehash/src)
|
|
||||||
|
|
||||||
list(APPEND INCLUDE_DIRS ${CASS_INCLUDE_DIR} ${CASS_SRC_DIR})
|
list(APPEND INCLUDE_DIRS ${CASS_INCLUDE_DIR} ${CASS_SRC_DIR})
|
||||||
|
|
||||||
@ -83,10 +82,6 @@ set(HAVE_MEMCPY 1)
|
|||||||
set(HAVE_LONG_LONG 1)
|
set(HAVE_LONG_LONG 1)
|
||||||
set(HAVE_UINT16_T 1)
|
set(HAVE_UINT16_T 1)
|
||||||
|
|
||||||
configure_file("${CASS_SRC_DIR}/third_party/sparsehash/config.h.cmake" "${CMAKE_CURRENT_BINARY_DIR}/sparsehash/internal/sparseconfig.h")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Determine random availability
|
# Determine random availability
|
||||||
if (OS_LINUX)
|
if (OS_LINUX)
|
||||||
#set (HAVE_GETRANDOM 1) - not on every Linux kernel
|
#set (HAVE_GETRANDOM 1) - not on every Linux kernel
|
||||||
@ -116,17 +111,17 @@ configure_file(
|
|||||||
${CASS_ROOT_DIR}/driver_config.hpp.in
|
${CASS_ROOT_DIR}/driver_config.hpp.in
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/driver_config.hpp)
|
${CMAKE_CURRENT_BINARY_DIR}/driver_config.hpp)
|
||||||
|
|
||||||
|
|
||||||
add_library(_cassandra
|
add_library(_cassandra
|
||||||
${SOURCES}
|
${SOURCES}
|
||||||
$<TARGET_OBJECTS:_curl_hostcheck>
|
$<TARGET_OBJECTS:_curl_hostcheck>
|
||||||
$<TARGET_OBJECTS:_hdr_histogram>
|
$<TARGET_OBJECTS:_hdr_histogram>
|
||||||
$<TARGET_OBJECTS:_http-parser>)
|
$<TARGET_OBJECTS:_http-parser>)
|
||||||
|
|
||||||
target_link_libraries(_cassandra ch_contrib::zlib ch_contrib::minizip)
|
target_link_libraries(_cassandra ch_contrib::zlib ch_contrib::minizip ch_contrib::sparsehash)
|
||||||
target_include_directories(_cassandra PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ${INCLUDE_DIRS})
|
target_include_directories(_cassandra PRIVATE ${CMAKE_CURRENT_BINARY_DIR} ${INCLUDE_DIRS})
|
||||||
target_include_directories(_cassandra SYSTEM BEFORE PUBLIC ${CASS_INCLUDE_DIR})
|
target_include_directories(_cassandra SYSTEM BEFORE PUBLIC ${CASS_INCLUDE_DIR})
|
||||||
target_compile_definitions(_cassandra PRIVATE CASS_BUILDING)
|
target_compile_definitions(_cassandra PRIVATE CASS_BUILDING)
|
||||||
|
target_compile_definitions(_cassandra PRIVATE -DSPARSEHASH_HASH=std::hash -Dsparsehash=google)
|
||||||
|
|
||||||
target_link_libraries(_cassandra ch_contrib::uv)
|
target_link_libraries(_cassandra ch_contrib::uv)
|
||||||
|
|
||||||
|
@ -13,12 +13,10 @@ set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/fastops")
|
|||||||
|
|
||||||
set(SRCS "")
|
set(SRCS "")
|
||||||
|
|
||||||
if(HAVE_AVX)
|
if(ARCH_AMD64)
|
||||||
set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/avx/ops_avx.cpp")
|
set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/avx/ops_avx.cpp")
|
||||||
set_source_files_properties("${LIBRARY_DIR}/fastops/avx/ops_avx.cpp" PROPERTIES COMPILE_FLAGS "-mavx -DNO_AVX2")
|
set_source_files_properties("${LIBRARY_DIR}/fastops/avx/ops_avx.cpp" PROPERTIES COMPILE_FLAGS "-mavx -DNO_AVX2")
|
||||||
endif()
|
|
||||||
|
|
||||||
if(HAVE_AVX2)
|
|
||||||
set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp")
|
set (SRCS ${SRCS} "${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp")
|
||||||
set_source_files_properties("${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp" PROPERTIES COMPILE_FLAGS "-mavx2 -mfma")
|
set_source_files_properties("${LIBRARY_DIR}/fastops/avx2/ops_avx2.cpp" PROPERTIES COMPILE_FLAGS "-mavx2 -mfma")
|
||||||
endif()
|
endif()
|
||||||
|
2
contrib/google-protobuf
vendored
2
contrib/google-protobuf
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 2a4fa1a4e95012d754ac55d43c8bc462dd1c78a8
|
Subproject commit 0862007f6ca1f5723c58f10f0ca34f3f25a63b2e
|
@ -20,7 +20,6 @@ endif()
|
|||||||
set(protobuf_source_dir "${ClickHouse_SOURCE_DIR}/contrib/google-protobuf")
|
set(protobuf_source_dir "${ClickHouse_SOURCE_DIR}/contrib/google-protobuf")
|
||||||
set(protobuf_binary_dir "${ClickHouse_BINARY_DIR}/contrib/google-protobuf")
|
set(protobuf_binary_dir "${ClickHouse_BINARY_DIR}/contrib/google-protobuf")
|
||||||
|
|
||||||
|
|
||||||
add_definitions(-DGOOGLE_PROTOBUF_CMAKE_BUILD)
|
add_definitions(-DGOOGLE_PROTOBUF_CMAKE_BUILD)
|
||||||
|
|
||||||
add_definitions(-DHAVE_PTHREAD)
|
add_definitions(-DHAVE_PTHREAD)
|
||||||
@ -30,17 +29,69 @@ include_directories(
|
|||||||
${protobuf_binary_dir}
|
${protobuf_binary_dir}
|
||||||
${protobuf_source_dir}/src)
|
${protobuf_source_dir}/src)
|
||||||
|
|
||||||
|
add_library(utf8_range
|
||||||
|
${protobuf_source_dir}/third_party/utf8_range/naive.c
|
||||||
|
${protobuf_source_dir}/third_party/utf8_range/range2-neon.c
|
||||||
|
${protobuf_source_dir}/third_party/utf8_range/range2-sse.c
|
||||||
|
)
|
||||||
|
include_directories(${protobuf_source_dir}/third_party/utf8_range)
|
||||||
|
|
||||||
|
add_library(utf8_validity
|
||||||
|
${protobuf_source_dir}/third_party/utf8_range/utf8_validity.cc
|
||||||
|
)
|
||||||
|
target_link_libraries(utf8_validity PUBLIC absl::strings)
|
||||||
|
|
||||||
|
set(protobuf_absl_used_targets
|
||||||
|
absl::absl_check
|
||||||
|
absl::absl_log
|
||||||
|
absl::algorithm
|
||||||
|
absl::base
|
||||||
|
absl::bind_front
|
||||||
|
absl::bits
|
||||||
|
absl::btree
|
||||||
|
absl::cleanup
|
||||||
|
absl::cord
|
||||||
|
absl::core_headers
|
||||||
|
absl::debugging
|
||||||
|
absl::die_if_null
|
||||||
|
absl::dynamic_annotations
|
||||||
|
absl::flags
|
||||||
|
absl::flat_hash_map
|
||||||
|
absl::flat_hash_set
|
||||||
|
absl::function_ref
|
||||||
|
absl::hash
|
||||||
|
absl::layout
|
||||||
|
absl::log_initialize
|
||||||
|
absl::log_severity
|
||||||
|
absl::memory
|
||||||
|
absl::node_hash_map
|
||||||
|
absl::node_hash_set
|
||||||
|
absl::optional
|
||||||
|
absl::span
|
||||||
|
absl::status
|
||||||
|
absl::statusor
|
||||||
|
absl::strings
|
||||||
|
absl::synchronization
|
||||||
|
absl::time
|
||||||
|
absl::type_traits
|
||||||
|
absl::utility
|
||||||
|
absl::variant
|
||||||
|
)
|
||||||
|
|
||||||
set(libprotobuf_lite_files
|
set(libprotobuf_lite_files
|
||||||
${protobuf_source_dir}/src/google/protobuf/any_lite.cc
|
${protobuf_source_dir}/src/google/protobuf/any_lite.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/arena.cc
|
${protobuf_source_dir}/src/google/protobuf/arena.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/arena_align.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/arenastring.cc
|
${protobuf_source_dir}/src/google/protobuf/arenastring.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/arenaz_sampler.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/extension_set.cc
|
${protobuf_source_dir}/src/google/protobuf/extension_set.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/generated_enum_util.cc
|
${protobuf_source_dir}/src/google/protobuf/generated_enum_util.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/generated_message_tctable_lite.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/generated_message_util.cc
|
${protobuf_source_dir}/src/google/protobuf/generated_message_util.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/implicit_weak_message.cc
|
${protobuf_source_dir}/src/google/protobuf/implicit_weak_message.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/inlined_string_field.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/io/coded_stream.cc
|
${protobuf_source_dir}/src/google/protobuf/io/coded_stream.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/io/io_win32.cc
|
${protobuf_source_dir}/src/google/protobuf/io/io_win32.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/io/strtod.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream.cc
|
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream_impl.cc
|
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream_impl.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream_impl_lite.cc
|
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream_impl_lite.cc
|
||||||
@ -48,21 +99,15 @@ set(libprotobuf_lite_files
|
|||||||
${protobuf_source_dir}/src/google/protobuf/message_lite.cc
|
${protobuf_source_dir}/src/google/protobuf/message_lite.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/parse_context.cc
|
${protobuf_source_dir}/src/google/protobuf/parse_context.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/repeated_field.cc
|
${protobuf_source_dir}/src/google/protobuf/repeated_field.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/stubs/bytestream.cc
|
${protobuf_source_dir}/src/google/protobuf/repeated_ptr_field.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/stubs/common.cc
|
${protobuf_source_dir}/src/google/protobuf/stubs/common.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/stubs/int128.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/stubs/status.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/stubs/statusor.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/stubs/stringpiece.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/stubs/stringprintf.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/stubs/structurally_valid.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/stubs/strutil.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/stubs/time.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/wire_format_lite.cc
|
${protobuf_source_dir}/src/google/protobuf/wire_format_lite.cc
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library(_libprotobuf-lite ${libprotobuf_lite_files})
|
add_library(_libprotobuf-lite ${libprotobuf_lite_files})
|
||||||
target_link_libraries(_libprotobuf-lite pthread)
|
target_link_libraries(_libprotobuf-lite
|
||||||
|
pthread
|
||||||
|
utf8_validity)
|
||||||
if(${CMAKE_SYSTEM_NAME} STREQUAL "Android")
|
if(${CMAKE_SYSTEM_NAME} STREQUAL "Android")
|
||||||
target_link_libraries(_libprotobuf-lite log)
|
target_link_libraries(_libprotobuf-lite log)
|
||||||
endif()
|
endif()
|
||||||
@ -71,67 +116,93 @@ add_library(protobuf::libprotobuf-lite ALIAS _libprotobuf-lite)
|
|||||||
|
|
||||||
|
|
||||||
set(libprotobuf_files
|
set(libprotobuf_files
|
||||||
${protobuf_source_dir}/src/google/protobuf/any.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/any.pb.cc
|
${protobuf_source_dir}/src/google/protobuf/any.pb.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/api.pb.cc
|
${protobuf_source_dir}/src/google/protobuf/api.pb.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/duration.pb.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/empty.pb.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/field_mask.pb.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/source_context.pb.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/struct.pb.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/timestamp.pb.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/type.pb.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/wrappers.pb.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/any.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/any_lite.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/arena.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/arena_align.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/arenastring.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/arenaz_sampler.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/importer.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/importer.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/parser.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/parser.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/cpp_features.pb.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/descriptor.cc
|
${protobuf_source_dir}/src/google/protobuf/descriptor.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/descriptor.pb.cc
|
${protobuf_source_dir}/src/google/protobuf/descriptor.pb.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/descriptor_database.cc
|
${protobuf_source_dir}/src/google/protobuf/descriptor_database.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/duration.pb.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/dynamic_message.cc
|
${protobuf_source_dir}/src/google/protobuf/dynamic_message.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/empty.pb.cc
|
${protobuf_source_dir}/src/google/protobuf/extension_set.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/extension_set_heavy.cc
|
${protobuf_source_dir}/src/google/protobuf/extension_set_heavy.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/field_mask.pb.cc
|
${protobuf_source_dir}/src/google/protobuf/feature_resolver.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/generated_enum_util.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/generated_message_bases.cc
|
${protobuf_source_dir}/src/google/protobuf/generated_message_bases.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/generated_message_reflection.cc
|
${protobuf_source_dir}/src/google/protobuf/generated_message_reflection.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/generated_message_tctable_full.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/generated_message_tctable_gen.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/generated_message_tctable_lite.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/generated_message_util.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/implicit_weak_message.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/inlined_string_field.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/io/coded_stream.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/io/gzip_stream.cc
|
${protobuf_source_dir}/src/google/protobuf/io/gzip_stream.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/io/io_win32.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/io/printer.cc
|
${protobuf_source_dir}/src/google/protobuf/io/printer.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/io/strtod.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/io/tokenizer.cc
|
${protobuf_source_dir}/src/google/protobuf/io/tokenizer.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_sink.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream_impl.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/io/zero_copy_stream_impl_lite.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/json/internal/lexer.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/json/internal/message_path.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/json/internal/parser.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/json/internal/unparser.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/json/internal/untyped_message.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/json/internal/writer.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/json/internal/zero_copy_buffered_stream.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/json/json.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/map.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/map_field.cc
|
${protobuf_source_dir}/src/google/protobuf/map_field.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/message.cc
|
${protobuf_source_dir}/src/google/protobuf/message.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/message_lite.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/parse_context.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/port.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/raw_ptr.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/reflection_mode.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/reflection_ops.cc
|
${protobuf_source_dir}/src/google/protobuf/reflection_ops.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/repeated_field.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/repeated_ptr_field.cc
|
${protobuf_source_dir}/src/google/protobuf/repeated_ptr_field.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/service.cc
|
${protobuf_source_dir}/src/google/protobuf/service.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/source_context.pb.cc
|
${protobuf_source_dir}/src/google/protobuf/stubs/common.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/struct.pb.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/stubs/substitute.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/text_format.cc
|
${protobuf_source_dir}/src/google/protobuf/text_format.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/timestamp.pb.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/type.pb.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/unknown_field_set.cc
|
${protobuf_source_dir}/src/google/protobuf/unknown_field_set.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/delimited_message_util.cc
|
${protobuf_source_dir}/src/google/protobuf/util/delimited_message_util.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/field_comparator.cc
|
${protobuf_source_dir}/src/google/protobuf/util/field_comparator.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/field_mask_util.cc
|
${protobuf_source_dir}/src/google/protobuf/util/field_mask_util.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/internal/datapiece.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/internal/default_value_objectwriter.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/internal/error_listener.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/internal/field_mask_utility.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/internal/json_escaping.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/internal/json_objectwriter.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/internal/json_stream_parser.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/internal/object_writer.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/internal/proto_writer.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/internal/protostream_objectsource.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/internal/protostream_objectwriter.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/internal/type_info.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/internal/type_info_test_helper.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/internal/utility.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/json_util.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/message_differencer.cc
|
${protobuf_source_dir}/src/google/protobuf/util/message_differencer.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/time_util.cc
|
${protobuf_source_dir}/src/google/protobuf/util/time_util.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/util/type_resolver_util.cc
|
${protobuf_source_dir}/src/google/protobuf/util/type_resolver_util.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/wire_format.cc
|
${protobuf_source_dir}/src/google/protobuf/wire_format.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/wrappers.pb.cc
|
${protobuf_source_dir}/src/google/protobuf/wire_format_lite.cc
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library(_libprotobuf ${libprotobuf_lite_files} ${libprotobuf_files})
|
add_library(_libprotobuf ${libprotobuf_lite_files} ${libprotobuf_files})
|
||||||
if (ENABLE_FUZZING)
|
if (ENABLE_FUZZING)
|
||||||
target_compile_options(_libprotobuf PRIVATE "-fsanitize-recover=all")
|
target_compile_options(_libprotobuf PRIVATE "-fsanitize-recover=all")
|
||||||
endif()
|
endif()
|
||||||
target_link_libraries(_libprotobuf pthread)
|
target_link_libraries(_libprotobuf
|
||||||
target_link_libraries(_libprotobuf ch_contrib::zlib)
|
pthread
|
||||||
|
ch_contrib::zlib
|
||||||
|
utf8_validity
|
||||||
|
${protobuf_absl_used_targets})
|
||||||
if(${CMAKE_SYSTEM_NAME} STREQUAL "Android")
|
if(${CMAKE_SYSTEM_NAME} STREQUAL "Android")
|
||||||
target_link_libraries(_libprotobuf log)
|
target_link_libraries(_libprotobuf log)
|
||||||
endif()
|
endif()
|
||||||
@ -140,23 +211,26 @@ add_library(protobuf::libprotobuf ALIAS _libprotobuf)
|
|||||||
|
|
||||||
|
|
||||||
set(libprotoc_files
|
set(libprotoc_files
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/allowlists/editions.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/code_generator.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/code_generator.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/command_line_interface.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/command_line_interface.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/enum.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/enum.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/enum_field.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/extension.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/extension.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/cord_field.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/enum_field.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/map_field.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/message_field.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/primitive_field.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/field_generators/string_field.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/file.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/file.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/generator.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/generator.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/helpers.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/helpers.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/map_field.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/message.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/message.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/message_field.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/padding_optimizer.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/padding_optimizer.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/parse_function_generator.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/parse_function_generator.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/primitive_field.cc
|
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/service.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/service.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/string_field.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/cpp/tracker.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_doc_comment.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_doc_comment.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_enum.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_enum.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_enum_field.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_enum_field.cc
|
||||||
@ -173,6 +247,7 @@ set(libprotoc_files
|
|||||||
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_repeated_primitive_field.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_repeated_primitive_field.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_source_generator_base.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_source_generator_base.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_wrapper_field.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/csharp_wrapper_field.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/csharp/names.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/java/context.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/context.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/java/doc_comment.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/doc_comment.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/java/enum.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/enum.cc
|
||||||
@ -195,38 +270,55 @@ set(libprotoc_files
|
|||||||
${protobuf_source_dir}/src/google/protobuf/compiler/java/message_field.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/message_field.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/java/message_field_lite.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/message_field_lite.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/java/message_lite.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/message_lite.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/message_serialization.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/java/name_resolver.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/name_resolver.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/names.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/java/primitive_field.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/primitive_field.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/java/primitive_field_lite.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/primitive_field_lite.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/java/service.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/service.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/java/shared_code_generator.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/shared_code_generator.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/java/string_field.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/string_field.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/java/string_field_lite.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/java/string_field_lite.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_enum.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/enum.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_enum_field.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/enum_field.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_extension.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/extension.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_field.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/field.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_file.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/file.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_generator.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/generator.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/helpers.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_map_field.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/import_writer.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_message.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/line_consumer.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_message_field.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/map_field.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_oneof.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/message.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/objectivec_primitive_field.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/message_field.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/names.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/oneof.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/primitive_field.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/objectivec/text_format_decode_data.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/php/names.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/php/php_generator.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/php/php_generator.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/plugin.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/plugin.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/plugin.pb.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/plugin.pb.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/python/generator.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/python/generator.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/python/helpers.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/python/helpers.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/python/pyi_generator.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/python/pyi_generator.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/retention.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/ruby/ruby_generator.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/ruby/ruby_generator.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/rust/accessors/accessors.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/rust/accessors/singular_bytes.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/rust/accessors/singular_scalar.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/rust/context.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/rust/generator.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/rust/message.cc
|
||||||
|
${protobuf_source_dir}/src/google/protobuf/compiler/rust/naming.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/subprocess.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/subprocess.cc
|
||||||
${protobuf_source_dir}/src/google/protobuf/compiler/zip_writer.cc
|
${protobuf_source_dir}/src/google/protobuf/compiler/zip_writer.cc
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library(_libprotoc ${libprotoc_files})
|
add_library(_libprotoc ${libprotoc_files})
|
||||||
target_link_libraries(_libprotoc _libprotobuf)
|
target_link_libraries(_libprotoc
|
||||||
|
_libprotobuf
|
||||||
|
${protobuf_absl_used_targets})
|
||||||
add_library(protobuf::libprotoc ALIAS _libprotoc)
|
add_library(protobuf::libprotoc ALIAS _libprotoc)
|
||||||
|
|
||||||
set(protoc_files ${protobuf_source_dir}/src/google/protobuf/compiler/main.cc)
|
set(protoc_files ${protobuf_source_dir}/src/google/protobuf/compiler/main.cc)
|
||||||
@ -235,7 +327,11 @@ if (CMAKE_HOST_SYSTEM_NAME STREQUAL CMAKE_SYSTEM_NAME
|
|||||||
AND CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL CMAKE_SYSTEM_PROCESSOR)
|
AND CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL CMAKE_SYSTEM_PROCESSOR)
|
||||||
|
|
||||||
add_executable(protoc ${protoc_files})
|
add_executable(protoc ${protoc_files})
|
||||||
target_link_libraries(protoc _libprotoc _libprotobuf pthread)
|
target_link_libraries(protoc _libprotoc
|
||||||
|
_libprotobuf
|
||||||
|
pthread
|
||||||
|
utf8_validity
|
||||||
|
${protobuf_absl_used_targets})
|
||||||
add_executable(protobuf::protoc ALIAS protoc)
|
add_executable(protobuf::protoc ALIAS protoc)
|
||||||
|
|
||||||
if (ENABLE_FUZZING)
|
if (ENABLE_FUZZING)
|
||||||
@ -255,6 +351,8 @@ else ()
|
|||||||
|
|
||||||
# This is quite ugly but I cannot make dependencies work propery.
|
# This is quite ugly but I cannot make dependencies work propery.
|
||||||
|
|
||||||
|
set(abseil_source_dir "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp")
|
||||||
|
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND mkdir -p ${PROTOC_BUILD_DIR}
|
COMMAND mkdir -p ${PROTOC_BUILD_DIR}
|
||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT)
|
||||||
@ -269,7 +367,9 @@ else ()
|
|||||||
"-Dprotobuf_BUILD_CONFORMANCE=0"
|
"-Dprotobuf_BUILD_CONFORMANCE=0"
|
||||||
"-Dprotobuf_BUILD_EXAMPLES=0"
|
"-Dprotobuf_BUILD_EXAMPLES=0"
|
||||||
"-Dprotobuf_BUILD_PROTOC_BINARIES=1"
|
"-Dprotobuf_BUILD_PROTOC_BINARIES=1"
|
||||||
"${protobuf_source_dir}/cmake"
|
"-DABSL_ROOT_DIR=${abseil_source_dir}"
|
||||||
|
"-DABSL_ENABLE_INSTALL=0"
|
||||||
|
"${protobuf_source_dir}"
|
||||||
WORKING_DIRECTORY "${PROTOC_BUILD_DIR}"
|
WORKING_DIRECTORY "${PROTOC_BUILD_DIR}"
|
||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT)
|
||||||
|
|
||||||
@ -278,38 +378,6 @@ else ()
|
|||||||
COMMAND_ECHO STDOUT)
|
COMMAND_ECHO STDOUT)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# add_custom_command (
|
|
||||||
# OUTPUT ${PROTOC_BUILD_DIR}
|
|
||||||
# COMMAND mkdir -p ${PROTOC_BUILD_DIR})
|
|
||||||
#
|
|
||||||
# add_custom_command (
|
|
||||||
# OUTPUT "${PROTOC_BUILD_DIR}/CMakeCache.txt"
|
|
||||||
#
|
|
||||||
# COMMAND ${CMAKE_COMMAND}
|
|
||||||
# -G"${CMAKE_GENERATOR}"
|
|
||||||
# -DCMAKE_MAKE_PROGRAM="${CMAKE_MAKE_PROGRAM}"
|
|
||||||
# -DCMAKE_C_COMPILER="${CMAKE_C_COMPILER}"
|
|
||||||
# -DCMAKE_CXX_COMPILER="${CMAKE_CXX_COMPILER}"
|
|
||||||
# -Dprotobuf_BUILD_TESTS=0
|
|
||||||
# -Dprotobuf_BUILD_CONFORMANCE=0
|
|
||||||
# -Dprotobuf_BUILD_EXAMPLES=0
|
|
||||||
# -Dprotobuf_BUILD_PROTOC_BINARIES=1
|
|
||||||
# "${protobuf_source_dir}/cmake"
|
|
||||||
#
|
|
||||||
# DEPENDS "${PROTOC_BUILD_DIR}"
|
|
||||||
# WORKING_DIRECTORY "${PROTOC_BUILD_DIR}"
|
|
||||||
# COMMENT "Configuring 'protoc' for host architecture."
|
|
||||||
# USES_TERMINAL)
|
|
||||||
#
|
|
||||||
# add_custom_command (
|
|
||||||
# OUTPUT "${PROTOC_BUILD_DIR}/protoc"
|
|
||||||
# COMMAND ${CMAKE_COMMAND} --build "${PROTOC_BUILD_DIR}"
|
|
||||||
# DEPENDS "${PROTOC_BUILD_DIR}/CMakeCache.txt"
|
|
||||||
# COMMENT "Building 'protoc' for host architecture."
|
|
||||||
# USES_TERMINAL)
|
|
||||||
#
|
|
||||||
# add_custom_target (protoc-host DEPENDS "${PROTOC_BUILD_DIR}/protoc")
|
|
||||||
|
|
||||||
add_executable(protoc IMPORTED GLOBAL)
|
add_executable(protoc IMPORTED GLOBAL)
|
||||||
set_target_properties (protoc PROPERTIES IMPORTED_LOCATION "${PROTOC_BUILD_DIR}/protoc")
|
set_target_properties (protoc PROPERTIES IMPORTED_LOCATION "${PROTOC_BUILD_DIR}/protoc")
|
||||||
add_dependencies(protoc "${PROTOC_BUILD_DIR}/protoc")
|
add_dependencies(protoc "${PROTOC_BUILD_DIR}/protoc")
|
||||||
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
|||||||
Subproject commit b723ecae0991bb873fe87a595dfb187178733fde
|
Subproject commit 740e3dfd97301a52ad8165b65285bcc149d9e817
|
@ -1,5 +1,3 @@
|
|||||||
include(CheckCXXCompilerFlag)
|
|
||||||
|
|
||||||
set(LIBCXX_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/libcxx")
|
set(LIBCXX_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/libcxx")
|
||||||
|
|
||||||
set(SRCS
|
set(SRCS
|
||||||
|
@ -1,6 +1,3 @@
|
|||||||
include(CheckCCompilerFlag)
|
|
||||||
include(CheckCXXCompilerFlag)
|
|
||||||
|
|
||||||
set(LIBUNWIND_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libunwind")
|
set(LIBUNWIND_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libunwind")
|
||||||
|
|
||||||
set(LIBUNWIND_CXX_SOURCES
|
set(LIBUNWIND_CXX_SOURCES
|
||||||
@ -48,27 +45,11 @@ target_compile_definitions(unwind PRIVATE -D_LIBUNWIND_NO_HEAP=1 -D_DEBUG -D_LIB
|
|||||||
# and disable sanitizers (otherwise infinite loop may happen)
|
# and disable sanitizers (otherwise infinite loop may happen)
|
||||||
target_compile_options(unwind PRIVATE -O3 -fno-exceptions -funwind-tables -fno-sanitize=all $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++ -fno-rtti>)
|
target_compile_options(unwind PRIVATE -O3 -fno-exceptions -funwind-tables -fno-sanitize=all $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++ -fno-rtti>)
|
||||||
|
|
||||||
check_c_compiler_flag(-Wunused-but-set-variable HAVE_WARNING_UNUSED_BUT_SET_VARIABLE)
|
target_compile_options(unwind PRIVATE -Wno-unused-but-set-variable)
|
||||||
if (HAVE_WARNING_UNUSED_BUT_SET_VARIABLE)
|
|
||||||
target_compile_options(unwind PRIVATE -Wno-unused-but-set-variable)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
check_cxx_compiler_flag(-Wmissing-attributes HAVE_WARNING_MISSING_ATTRIBUTES)
|
|
||||||
if (HAVE_WARNING_MISSING_ATTRIBUTES)
|
|
||||||
target_compile_options(unwind PRIVATE -Wno-missing-attributes)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
check_cxx_compiler_flag(-Wmaybe-uninitialized HAVE_WARNING_MAYBE_UNINITIALIZED)
|
|
||||||
if (HAVE_WARNING_MAYBE_UNINITIALIZED)
|
|
||||||
target_compile_options(unwind PRIVATE -Wno-maybe-uninitialized)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
# The library is using register variables that are bound to specific registers
|
# The library is using register variables that are bound to specific registers
|
||||||
# Example: DwarfInstructions.hpp: register unsigned long long x16 __asm("x16") = cfa;
|
# Example: DwarfInstructions.hpp: register unsigned long long x16 __asm("x16") = cfa;
|
||||||
check_cxx_compiler_flag(-Wregister HAVE_WARNING_REGISTER)
|
target_compile_options(unwind PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:-Wno-register>")
|
||||||
if (HAVE_WARNING_REGISTER)
|
|
||||||
target_compile_options(unwind PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:-Wno-register>")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
install(
|
install(
|
||||||
TARGETS unwind
|
TARGETS unwind
|
||||||
|
@ -16,8 +16,7 @@ function(GetLibraryVersion _content _outputVar)
|
|||||||
SET(${_outputVar} ${CMAKE_MATCH_1} PARENT_SCOPE)
|
SET(${_outputVar} ${CMAKE_MATCH_1} PARENT_SCOPE)
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
FILE(READ "${QPL_PROJECT_DIR}/CMakeLists.txt" HEADER_CONTENT)
|
set (QPL_VERSION 1.2.0)
|
||||||
GetLibraryVersion("${HEADER_CONTENT}" QPL_VERSION)
|
|
||||||
|
|
||||||
message(STATUS "Intel QPL version: ${QPL_VERSION}")
|
message(STATUS "Intel QPL version: ${QPL_VERSION}")
|
||||||
|
|
||||||
@ -28,16 +27,422 @@ message(STATUS "Intel QPL version: ${QPL_VERSION}")
|
|||||||
# The qpl submodule comes with its own version of isal. It contains code which does not exist in upstream isal. It would be nice to link
|
# The qpl submodule comes with its own version of isal. It contains code which does not exist in upstream isal. It would be nice to link
|
||||||
# only upstream isal (ch_contrib::isal) but at this point we can't.
|
# only upstream isal (ch_contrib::isal) but at this point we can't.
|
||||||
|
|
||||||
include("${QPL_PROJECT_DIR}/cmake/CompileOptions.cmake")
|
# ==========================================================================
|
||||||
|
# Copyright (C) 2022 Intel Corporation
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: MIT
|
||||||
|
# ==========================================================================
|
||||||
|
|
||||||
|
set(QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS "-fno-exceptions;-fno-rtti")
|
||||||
|
|
||||||
|
function(modify_standard_language_flag)
|
||||||
|
# Declaring function parameters
|
||||||
|
set(OPTIONS "")
|
||||||
|
set(ONE_VALUE_ARGS
|
||||||
|
LANGUAGE_NAME
|
||||||
|
FLAG_NAME
|
||||||
|
NEW_FLAG_VALUE)
|
||||||
|
set(MULTI_VALUE_ARGS "")
|
||||||
|
|
||||||
|
# Parsing function parameters
|
||||||
|
cmake_parse_arguments(MODIFY
|
||||||
|
"${OPTIONS}"
|
||||||
|
"${ONE_VALUE_ARGS}"
|
||||||
|
"${MULTI_VALUE_ARGS}"
|
||||||
|
${ARGN})
|
||||||
|
|
||||||
|
# Variables
|
||||||
|
set(FLAG_REGULAR_EXPRESSION "${MODIFY_FLAG_NAME}.*[ ]*")
|
||||||
|
set(NEW_VALUE "${MODIFY_FLAG_NAME}${MODIFY_NEW_FLAG_VALUE}")
|
||||||
|
|
||||||
|
# Replacing specified flag with new value
|
||||||
|
string(REGEX REPLACE
|
||||||
|
${FLAG_REGULAR_EXPRESSION} ${NEW_VALUE}
|
||||||
|
NEW_COMPILE_FLAGS
|
||||||
|
"${CMAKE_${MODIFY_LANGUAGE_NAME}_FLAGS}")
|
||||||
|
|
||||||
|
# Returning the value
|
||||||
|
set(CMAKE_${MODIFY_LANGUAGE_NAME}_FLAGS ${NEW_COMPILE_FLAGS} PARENT_SCOPE)
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
function(get_function_name_with_default_bit_width in_function_name bit_width out_function_name)
|
||||||
|
|
||||||
|
if(in_function_name MATCHES ".*_i")
|
||||||
|
|
||||||
|
string(REPLACE "_i" "" in_function_name ${in_function_name})
|
||||||
|
|
||||||
|
set(${out_function_name} "${in_function_name}_${bit_width}_i" PARENT_SCOPE)
|
||||||
|
|
||||||
|
else()
|
||||||
|
|
||||||
|
set(${out_function_name} "${in_function_name}_${bit_width}" PARENT_SCOPE)
|
||||||
|
|
||||||
|
endif()
|
||||||
|
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
macro(get_list_of_supported_optimizations PLATFORMS_LIST)
|
||||||
|
list(APPEND PLATFORMS_LIST "")
|
||||||
|
list(APPEND PLATFORMS_LIST "px")
|
||||||
|
list(APPEND PLATFORMS_LIST "avx512")
|
||||||
|
endmacro(get_list_of_supported_optimizations)
|
||||||
|
|
||||||
|
function(generate_unpack_kernel_arrays current_directory PLATFORMS_LIST)
|
||||||
|
list(APPEND UNPACK_POSTFIX_LIST "")
|
||||||
|
list(APPEND UNPACK_PRLE_POSTFIX_LIST "")
|
||||||
|
list(APPEND PACK_POSTFIX_LIST "")
|
||||||
|
list(APPEND PACK_INDEX_POSTFIX_LIST "")
|
||||||
|
list(APPEND SCAN_POSTFIX_LIST "")
|
||||||
|
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "")
|
||||||
|
list(APPEND DEFAULT_BIT_WIDTH_LIST "")
|
||||||
|
|
||||||
|
#create list of functions that use only 8u 16u 32u postfixes
|
||||||
|
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "unpack_prle")
|
||||||
|
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "extract")
|
||||||
|
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "extract_i")
|
||||||
|
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "select")
|
||||||
|
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "select_i")
|
||||||
|
list(APPEND DEFAULT_BIT_WIDTH_FUNCTIONS_LIST "expand")
|
||||||
|
|
||||||
|
#create default bit width list
|
||||||
|
list(APPEND DEFAULT_BIT_WIDTH_LIST "8u")
|
||||||
|
list(APPEND DEFAULT_BIT_WIDTH_LIST "16u")
|
||||||
|
list(APPEND DEFAULT_BIT_WIDTH_LIST "32u")
|
||||||
|
|
||||||
|
#create scan kernel postfixes
|
||||||
|
list(APPEND SCAN_COMPARATOR_LIST "")
|
||||||
|
|
||||||
|
list(APPEND SCAN_COMPARATOR_LIST "eq")
|
||||||
|
list(APPEND SCAN_COMPARATOR_LIST "ne")
|
||||||
|
list(APPEND SCAN_COMPARATOR_LIST "lt")
|
||||||
|
list(APPEND SCAN_COMPARATOR_LIST "le")
|
||||||
|
list(APPEND SCAN_COMPARATOR_LIST "gt")
|
||||||
|
list(APPEND SCAN_COMPARATOR_LIST "ge")
|
||||||
|
list(APPEND SCAN_COMPARATOR_LIST "range")
|
||||||
|
list(APPEND SCAN_COMPARATOR_LIST "not_range")
|
||||||
|
|
||||||
|
foreach(SCAN_COMPARATOR IN LISTS SCAN_COMPARATOR_LIST)
|
||||||
|
list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_8u")
|
||||||
|
list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_16u8u")
|
||||||
|
list(APPEND SCAN_POSTFIX_LIST "_${SCAN_COMPARATOR}_32u8u")
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
# create unpack kernel postfixes
|
||||||
|
foreach(input_width RANGE 1 32 1)
|
||||||
|
if(input_width LESS 8 OR input_width EQUAL 8)
|
||||||
|
list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u8u")
|
||||||
|
|
||||||
|
elseif(input_width LESS 16 OR input_width EQUAL 16)
|
||||||
|
list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u16u")
|
||||||
|
|
||||||
|
else()
|
||||||
|
list(APPEND UNPACK_POSTFIX_LIST "_${input_width}u32u")
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
# create pack kernel postfixes
|
||||||
|
foreach(output_width RANGE 1 8 1)
|
||||||
|
list(APPEND PACK_POSTFIX_LIST "_8u${output_width}u")
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
foreach(output_width RANGE 9 16 1)
|
||||||
|
list(APPEND PACK_POSTFIX_LIST "_16u${output_width}u")
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
foreach(output_width RANGE 17 32 1)
|
||||||
|
list(APPEND PACK_POSTFIX_LIST "_32u${output_width}u")
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
list(APPEND PACK_POSTFIX_LIST "_8u16u")
|
||||||
|
list(APPEND PACK_POSTFIX_LIST "_8u32u")
|
||||||
|
list(APPEND PACK_POSTFIX_LIST "_16u32u")
|
||||||
|
|
||||||
|
# create pack index kernel postfixes
|
||||||
|
list(APPEND PACK_INDEX_POSTFIX_LIST "_nu")
|
||||||
|
list(APPEND PACK_INDEX_POSTFIX_LIST "_8u")
|
||||||
|
list(APPEND PACK_INDEX_POSTFIX_LIST "_8u16u")
|
||||||
|
list(APPEND PACK_INDEX_POSTFIX_LIST "_8u32u")
|
||||||
|
|
||||||
|
# write to file
|
||||||
|
file(MAKE_DIRECTORY ${current_directory}/generated)
|
||||||
|
|
||||||
|
foreach(PLATFORM_VALUE IN LISTS PLATFORMS_LIST)
|
||||||
|
set(directory "${current_directory}/generated")
|
||||||
|
set(PLATFORM_PREFIX "${PLATFORM_VALUE}_")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write unpack table
|
||||||
|
#
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}unpack.cpp "#include \"qplc_api.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "unpack_table_t ${PLATFORM_PREFIX}unpack_table = {\n")
|
||||||
|
|
||||||
|
#write LE kernels
|
||||||
|
foreach(UNPACK_POSTFIX IN LISTS UNPACK_POSTFIX_LIST)
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack${UNPACK_POSTFIX},\n")
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
#write BE kernels
|
||||||
|
|
||||||
|
#get last element of the list
|
||||||
|
set(LAST_ELEMENT "")
|
||||||
|
list(GET UNPACK_POSTFIX_LIST -1 LAST_ELEMENT)
|
||||||
|
|
||||||
|
foreach(UNPACK_POSTFIX IN LISTS UNPACK_POSTFIX_LIST)
|
||||||
|
|
||||||
|
if(UNPACK_POSTFIX STREQUAL LAST_ELEMENT)
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack_be${UNPACK_POSTFIX}};\n")
|
||||||
|
else()
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "\t${PLATFORM_PREFIX}qplc_unpack_be${UNPACK_POSTFIX},\n")
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}unpack.cpp "}\n")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write pack table
|
||||||
|
#
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}pack.cpp "#include \"qplc_api.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "pack_table_t ${PLATFORM_PREFIX}pack_table = {\n")
|
||||||
|
|
||||||
|
#write LE kernels
|
||||||
|
foreach(PACK_POSTFIX IN LISTS PACK_POSTFIX_LIST)
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack${PACK_POSTFIX},\n")
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
#write BE kernels
|
||||||
|
|
||||||
|
#get last element of the list
|
||||||
|
set(LAST_ELEMENT "")
|
||||||
|
list(GET PACK_POSTFIX_LIST -1 LAST_ELEMENT)
|
||||||
|
|
||||||
|
foreach(PACK_POSTFIX IN LISTS PACK_POSTFIX_LIST)
|
||||||
|
|
||||||
|
if(PACK_POSTFIX STREQUAL LAST_ELEMENT)
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack_be${PACK_POSTFIX}};\n")
|
||||||
|
else()
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "\t${PLATFORM_PREFIX}qplc_pack_be${PACK_POSTFIX},\n")
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack.cpp "}\n")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write scan table
|
||||||
|
#
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}scan.cpp "#include \"qplc_api.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "scan_table_t ${PLATFORM_PREFIX}scan_table = {\n")
|
||||||
|
|
||||||
|
#get last element of the list
|
||||||
|
set(LAST_ELEMENT "")
|
||||||
|
list(GET SCAN_POSTFIX_LIST -1 LAST_ELEMENT)
|
||||||
|
|
||||||
|
foreach(SCAN_POSTFIX IN LISTS SCAN_POSTFIX_LIST)
|
||||||
|
|
||||||
|
if(SCAN_POSTFIX STREQUAL LAST_ELEMENT)
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}};\n")
|
||||||
|
else()
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX},\n")
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}scan.cpp "}\n")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write scan_i table
|
||||||
|
#
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}scan_i.cpp "#include \"qplc_api.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "scan_i_table_t ${PLATFORM_PREFIX}scan_i_table = {\n")
|
||||||
|
|
||||||
|
#get last element of the list
|
||||||
|
set(LAST_ELEMENT "")
|
||||||
|
list(GET SCAN_POSTFIX_LIST -1 LAST_ELEMENT)
|
||||||
|
|
||||||
|
foreach(SCAN_POSTFIX IN LISTS SCAN_POSTFIX_LIST)
|
||||||
|
|
||||||
|
if(SCAN_POSTFIX STREQUAL LAST_ELEMENT)
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}_i};\n")
|
||||||
|
else()
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "\t${PLATFORM_PREFIX}qplc_scan${SCAN_POSTFIX}_i,\n")
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}scan_i.cpp "}\n")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write pack_index table
|
||||||
|
#
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}pack_index.cpp "#include \"qplc_api.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "pack_index_table_t ${PLATFORM_PREFIX}pack_index_table = {\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_bits_nu,\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u,\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u16u,\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u32u,\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_bits_be_nu,\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_8u,\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_be_8u16u,\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "\t${PLATFORM_PREFIX}qplc_pack_index_be_8u32u};\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}pack_index.cpp "}\n")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write default bit width functions
|
||||||
|
#
|
||||||
|
foreach(DEAULT_BIT_WIDTH_FUNCTION IN LISTS DEFAULT_BIT_WIDTH_FUNCTIONS_LIST)
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "#include \"qplc_api.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "${DEAULT_BIT_WIDTH_FUNCTION}_table_t ${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}_table = {\n")
|
||||||
|
|
||||||
|
#get last element of the list
|
||||||
|
set(LAST_ELEMENT "")
|
||||||
|
list(GET DEFAULT_BIT_WIDTH_LIST -1 LAST_ELEMENT)
|
||||||
|
|
||||||
|
foreach(BIT_WIDTH IN LISTS DEFAULT_BIT_WIDTH_LIST)
|
||||||
|
|
||||||
|
set(FUNCTION_NAME "")
|
||||||
|
get_function_name_with_default_bit_width(${DEAULT_BIT_WIDTH_FUNCTION} ${BIT_WIDTH} FUNCTION_NAME)
|
||||||
|
|
||||||
|
if(BIT_WIDTH STREQUAL LAST_ELEMENT)
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "\t${PLATFORM_PREFIX}qplc_${FUNCTION_NAME}};\n")
|
||||||
|
else()
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "\t${PLATFORM_PREFIX}qplc_${FUNCTION_NAME},\n")
|
||||||
|
endif()
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}${DEAULT_BIT_WIDTH_FUNCTION}.cpp "}\n")
|
||||||
|
endforeach()
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write aggregates table
|
||||||
|
#
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}aggregates.cpp "#include \"qplc_api.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "aggregates_table_t ${PLATFORM_PREFIX}aggregates_table = {\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_bit_aggregates_8u,\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_8u,\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_16u,\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "\t${PLATFORM_PREFIX}qplc_aggregates_32u};\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}aggregates.cpp "}\n")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write mem_copy functions table
|
||||||
|
#
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "#include \"qplc_api.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "memory_copy_table_t ${PLATFORM_PREFIX}memory_copy_table = {\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_8u,\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_16u,\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "\t${PLATFORM_PREFIX}qplc_copy_32u};\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}memory_copy.cpp "}\n")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write mem_copy functions table
|
||||||
|
#
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}zero.cpp "#include \"qplc_api.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "zero_table_t ${PLATFORM_PREFIX}zero_table = {\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "\t${PLATFORM_PREFIX}qplc_zero_8u};\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}zero.cpp "}\n")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write move functions table
|
||||||
|
#
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}move.cpp "#include \"qplc_api.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "move_table_t ${PLATFORM_PREFIX}move_table = {\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "\t${PLATFORM_PREFIX}qplc_move_8u};\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}move.cpp "}\n")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write crc64 function table
|
||||||
|
#
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}crc64.cpp "#include \"qplc_api.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "crc64_table_t ${PLATFORM_PREFIX}crc64_table = {\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "\t${PLATFORM_PREFIX}qplc_crc64};\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}crc64.cpp "}\n")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write xor_checksum function table
|
||||||
|
#
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "#include \"qplc_api.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "xor_checksum_table_t ${PLATFORM_PREFIX}xor_checksum_table = {\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "\t${PLATFORM_PREFIX}qplc_xor_checksum_8u};\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}xor_checksum.cpp "}\n")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write deflate functions table
|
||||||
|
#
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_slow_icf.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_hash_table.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"deflate_histogram.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "deflate_table_t ${PLATFORM_PREFIX}deflate_table = {\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}slow_deflate_icf_body),\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}deflate_histogram_reset),\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}deflate_hash_table_reset)};\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate.cpp "}\n")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write deflate fix functions table
|
||||||
|
#
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "#include \"deflate_slow.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "deflate_fix_table_t ${PLATFORM_PREFIX}deflate_fix_table = {\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}slow_deflate_body)};\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}deflate_fix.cpp "}\n")
|
||||||
|
|
||||||
|
#
|
||||||
|
# Write setup_dictionary functions table
|
||||||
|
#
|
||||||
|
file(WRITE ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "#include \"deflate_slow_utils.h\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "#include \"dispatcher/dispatcher.hpp\"\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "namespace qpl::core_sw::dispatcher\n{\n")
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "setup_dictionary_table_t ${PLATFORM_PREFIX}setup_dictionary_table = {\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "\t reinterpret_cast<void *>(&${PLATFORM_PREFIX}setup_dictionary)};\n")
|
||||||
|
|
||||||
|
file(APPEND ${directory}/${PLATFORM_PREFIX}setup_dictionary.cpp "}\n")
|
||||||
|
|
||||||
|
endforeach()
|
||||||
|
endfunction()
|
||||||
|
|
||||||
# check nasm compiler
|
|
||||||
include(CheckLanguage)
|
|
||||||
check_language(ASM_NASM)
|
|
||||||
if(NOT CMAKE_ASM_NASM_COMPILER)
|
|
||||||
message(FATAL_ERROR "Please install NASM from 'https://www.nasm.us/' because NASM compiler can not be found!")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# [SUBDIR]isal
|
|
||||||
enable_language(ASM_NASM)
|
enable_language(ASM_NASM)
|
||||||
|
|
||||||
set(ISAL_C_SRC ${QPL_SRC_DIR}/isal/igzip/adler32_base.c
|
set(ISAL_C_SRC ${QPL_SRC_DIR}/isal/igzip/adler32_base.c
|
||||||
@ -107,11 +512,6 @@ set_target_properties(isal PROPERTIES
|
|||||||
CXX_STANDARD 11
|
CXX_STANDARD 11
|
||||||
C_STANDARD 99)
|
C_STANDARD 99)
|
||||||
|
|
||||||
target_compile_options(isal PRIVATE
|
|
||||||
"$<$<C_COMPILER_ID:GNU>:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}>"
|
|
||||||
"$<$<CONFIG:Debug>:>"
|
|
||||||
"$<$<CONFIG:Release>:>")
|
|
||||||
|
|
||||||
# AS_FEATURE_LEVEL=10 means "Check SIMD capabilities of the target system at runtime and use up to AVX512 if available".
|
# AS_FEATURE_LEVEL=10 means "Check SIMD capabilities of the target system at runtime and use up to AVX512 if available".
|
||||||
# HAVE_KNOWS_AVX512 means rely on AVX512 being available on the target system.
|
# HAVE_KNOWS_AVX512 means rely on AVX512 being available on the target system.
|
||||||
target_compile_options(isal_asm PRIVATE "-I${QPL_SRC_DIR}/isal/include/"
|
target_compile_options(isal_asm PRIVATE "-I${QPL_SRC_DIR}/isal/include/"
|
||||||
@ -164,15 +564,7 @@ foreach(PLATFORM_ID IN LISTS PLATFORMS_LIST)
|
|||||||
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-sw/src/compression/include>
|
PUBLIC $<BUILD_INTERFACE:${QPL_SRC_DIR}/core-sw/src/compression/include>
|
||||||
PRIVATE $<TARGET_PROPERTY:isal,INTERFACE_INCLUDE_DIRECTORIES>)
|
PRIVATE $<TARGET_PROPERTY:isal,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||||
|
|
||||||
set_target_properties(qplcore_${PLATFORM_ID} PROPERTIES
|
# Set specific compiler options and/or definitions based on a platform
|
||||||
$<$<C_COMPILER_ID:GNU>:C_STANDARD 17>)
|
|
||||||
|
|
||||||
target_compile_options(qplcore_${PLATFORM_ID}
|
|
||||||
PRIVATE ${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS}
|
|
||||||
PRIVATE "$<$<CONFIG:Debug>:>"
|
|
||||||
PRIVATE "$<$<CONFIG:Release>:-O3;-D_FORTIFY_SOURCE=2>")
|
|
||||||
|
|
||||||
# Set specific compiler options and/or definitions based on a platform
|
|
||||||
if (${PLATFORM_ID} MATCHES "avx512")
|
if (${PLATFORM_ID} MATCHES "avx512")
|
||||||
target_compile_definitions(qplcore_${PLATFORM_ID} PRIVATE PLATFORM=2)
|
target_compile_definitions(qplcore_${PLATFORM_ID} PRIVATE PLATFORM=2)
|
||||||
target_compile_options(qplcore_${PLATFORM_ID} PRIVATE -march=skylake-avx512)
|
target_compile_options(qplcore_${PLATFORM_ID} PRIVATE -march=skylake-avx512)
|
||||||
@ -221,10 +613,7 @@ set_target_properties(qplcore_sw_dispatcher PROPERTIES CXX_STANDARD 17)
|
|||||||
target_compile_definitions(qplcore_sw_dispatcher PUBLIC -DQPL_LIB)
|
target_compile_definitions(qplcore_sw_dispatcher PUBLIC -DQPL_LIB)
|
||||||
|
|
||||||
target_compile_options(qplcore_sw_dispatcher
|
target_compile_options(qplcore_sw_dispatcher
|
||||||
PRIVATE $<$<C_COMPILER_ID:GNU>:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS};
|
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
|
||||||
${QPL_LINUX_TOOLCHAIN_DYNAMIC_LIBRARY_FLAGS};
|
|
||||||
$<$<CONFIG:Release>:-O3;-D_FORTIFY_SOURCE=2>>
|
|
||||||
PRIVATE $<$<COMPILE_LANG_AND_ID:CXX,GNU>:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>)
|
|
||||||
|
|
||||||
# [SUBDIR]core-iaa
|
# [SUBDIR]core-iaa
|
||||||
file(GLOB HW_PATH_SRC ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.c
|
file(GLOB HW_PATH_SRC ${QPL_SRC_DIR}/core-iaa/sources/aecs/*.c
|
||||||
@ -249,14 +638,6 @@ target_include_directories(core_iaa
|
|||||||
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/sources/c_api> # own_checkers.h
|
PRIVATE $<BUILD_INTERFACE:${QPL_PROJECT_DIR}/sources/c_api> # own_checkers.h
|
||||||
PRIVATE $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>)
|
PRIVATE $<TARGET_PROPERTY:qplcore_sw_dispatcher,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||||
|
|
||||||
set_target_properties(core_iaa PROPERTIES
|
|
||||||
$<$<C_COMPILER_ID:GNU>:C_STANDARD 17>
|
|
||||||
CXX_STANDARD 17)
|
|
||||||
|
|
||||||
target_compile_options(core_iaa
|
|
||||||
PRIVATE $<$<C_COMPILER_ID:GNU>:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS};
|
|
||||||
$<$<CONFIG:Release>:-O3;-D_FORTIFY_SOURCE=2>>)
|
|
||||||
|
|
||||||
target_compile_features(core_iaa PRIVATE c_std_11)
|
target_compile_features(core_iaa PRIVATE c_std_11)
|
||||||
|
|
||||||
target_compile_definitions(core_iaa PRIVATE QPL_BADARG_CHECK
|
target_compile_definitions(core_iaa PRIVATE QPL_BADARG_CHECK
|
||||||
@ -286,10 +667,7 @@ set_property(GLOBAL APPEND PROPERTY QPL_LIB_DEPS
|
|||||||
$<TARGET_OBJECTS:middle_layer_lib>)
|
$<TARGET_OBJECTS:middle_layer_lib>)
|
||||||
|
|
||||||
target_compile_options(middle_layer_lib
|
target_compile_options(middle_layer_lib
|
||||||
PRIVATE $<$<C_COMPILER_ID:GNU>:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS};
|
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
|
||||||
${QPL_LINUX_TOOLCHAIN_DYNAMIC_LIBRARY_FLAGS};
|
|
||||||
$<$<CONFIG:Release>:-O3;-D_FORTIFY_SOURCE=2>>
|
|
||||||
PRIVATE $<$<COMPILE_LANG_AND_ID:CXX,GNU>:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>)
|
|
||||||
|
|
||||||
target_compile_definitions(middle_layer_lib
|
target_compile_definitions(middle_layer_lib
|
||||||
PUBLIC QPL_VERSION="${QPL_VERSION}"
|
PUBLIC QPL_VERSION="${QPL_VERSION}"
|
||||||
@ -324,15 +702,8 @@ target_include_directories(_qpl
|
|||||||
PRIVATE $<TARGET_PROPERTY:middle_layer_lib,INTERFACE_INCLUDE_DIRECTORIES>
|
PRIVATE $<TARGET_PROPERTY:middle_layer_lib,INTERFACE_INCLUDE_DIRECTORIES>
|
||||||
PRIVATE $<BUILD_INTERFACE:${QPL_SRC_DIR}/c_api>)
|
PRIVATE $<BUILD_INTERFACE:${QPL_SRC_DIR}/c_api>)
|
||||||
|
|
||||||
set_target_properties(_qpl PROPERTIES
|
|
||||||
$<$<C_COMPILER_ID:GNU>:C_STANDARD 17>
|
|
||||||
CXX_STANDARD 17)
|
|
||||||
|
|
||||||
target_compile_options(_qpl
|
target_compile_options(_qpl
|
||||||
PRIVATE $<$<C_COMPILER_ID:GNU>:${QPL_LINUX_TOOLCHAIN_REQUIRED_FLAGS};
|
PRIVATE ${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS})
|
||||||
${QPL_LINUX_TOOLCHAIN_DYNAMIC_LIBRARY_FLAGS};
|
|
||||||
$<$<CONFIG:Release>:-O3;-D_FORTIFY_SOURCE=2>>
|
|
||||||
PRIVATE $<$<COMPILE_LANG_AND_ID:CXX,GNU>:${QPL_LINUX_TOOLCHAIN_CPP_EMBEDDED_FLAGS}>)
|
|
||||||
|
|
||||||
target_compile_definitions(_qpl
|
target_compile_definitions(_qpl
|
||||||
PRIVATE -DQPL_LIB
|
PRIVATE -DQPL_LIB
|
||||||
|
@ -1,530 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
ckhost="localhost"
|
|
||||||
ckport=("9000" "9001" "9002" "9003")
|
|
||||||
WORKING_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.."
|
|
||||||
OUTPUT_DIR="${WORKING_DIR}/output"
|
|
||||||
LOG_DIR="${OUTPUT_DIR}/log"
|
|
||||||
RAWDATA_DIR="${WORKING_DIR}/rawdata_dir"
|
|
||||||
database_dir="${WORKING_DIR}/database_dir"
|
|
||||||
CLIENT_SCRIPTS_DIR="${WORKING_DIR}/client_scripts"
|
|
||||||
LOG_PACK_FILE="$(date +%Y-%m-%d-%H-%M-%S)"
|
|
||||||
QUERY_FILE="queries_ssb.sql"
|
|
||||||
SERVER_BIND_CMD[0]="numactl -m 0 -N 0"
|
|
||||||
SERVER_BIND_CMD[1]="numactl -m 0 -N 0"
|
|
||||||
SERVER_BIND_CMD[2]="numactl -m 1 -N 1"
|
|
||||||
SERVER_BIND_CMD[3]="numactl -m 1 -N 1"
|
|
||||||
CLIENT_BIND_CMD=""
|
|
||||||
SSB_GEN_FACTOR=20
|
|
||||||
TABLE_NAME="lineorder_flat"
|
|
||||||
TALBE_ROWS="119994608"
|
|
||||||
CODEC_CONFIG="lz4 deflate zstd"
|
|
||||||
|
|
||||||
# define instance number
|
|
||||||
inst_num=$1
|
|
||||||
if [ ! -n "$1" ]; then
|
|
||||||
echo "Please clarify instance number from 1,2,3 or 4"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "Benchmarking with instance number:$1"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -d "$OUTPUT_DIR" ]; then
|
|
||||||
mkdir $OUTPUT_DIR
|
|
||||||
fi
|
|
||||||
if [ ! -d "$LOG_DIR" ]; then
|
|
||||||
mkdir $LOG_DIR
|
|
||||||
fi
|
|
||||||
if [ ! -d "$RAWDATA_DIR" ]; then
|
|
||||||
mkdir $RAWDATA_DIR
|
|
||||||
fi
|
|
||||||
|
|
||||||
# define different directories
|
|
||||||
dir_server=("" "_s2" "_s3" "_s4")
|
|
||||||
ckreadSql="
|
|
||||||
CREATE TABLE customer
|
|
||||||
(
|
|
||||||
C_CUSTKEY UInt32,
|
|
||||||
C_NAME String,
|
|
||||||
C_ADDRESS String,
|
|
||||||
C_CITY LowCardinality(String),
|
|
||||||
C_NATION LowCardinality(String),
|
|
||||||
C_REGION LowCardinality(String),
|
|
||||||
C_PHONE String,
|
|
||||||
C_MKTSEGMENT LowCardinality(String)
|
|
||||||
)
|
|
||||||
ENGINE = MergeTree ORDER BY (C_CUSTKEY);
|
|
||||||
|
|
||||||
CREATE TABLE lineorder
|
|
||||||
(
|
|
||||||
LO_ORDERKEY UInt32,
|
|
||||||
LO_LINENUMBER UInt8,
|
|
||||||
LO_CUSTKEY UInt32,
|
|
||||||
LO_PARTKEY UInt32,
|
|
||||||
LO_SUPPKEY UInt32,
|
|
||||||
LO_ORDERDATE Date,
|
|
||||||
LO_ORDERPRIORITY LowCardinality(String),
|
|
||||||
LO_SHIPPRIORITY UInt8,
|
|
||||||
LO_QUANTITY UInt8,
|
|
||||||
LO_EXTENDEDPRICE UInt32,
|
|
||||||
LO_ORDTOTALPRICE UInt32,
|
|
||||||
LO_DISCOUNT UInt8,
|
|
||||||
LO_REVENUE UInt32,
|
|
||||||
LO_SUPPLYCOST UInt32,
|
|
||||||
LO_TAX UInt8,
|
|
||||||
LO_COMMITDATE Date,
|
|
||||||
LO_SHIPMODE LowCardinality(String)
|
|
||||||
)
|
|
||||||
ENGINE = MergeTree PARTITION BY toYear(LO_ORDERDATE) ORDER BY (LO_ORDERDATE, LO_ORDERKEY);
|
|
||||||
|
|
||||||
CREATE TABLE part
|
|
||||||
(
|
|
||||||
P_PARTKEY UInt32,
|
|
||||||
P_NAME String,
|
|
||||||
P_MFGR LowCardinality(String),
|
|
||||||
P_CATEGORY LowCardinality(String),
|
|
||||||
P_BRAND LowCardinality(String),
|
|
||||||
P_COLOR LowCardinality(String),
|
|
||||||
P_TYPE LowCardinality(String),
|
|
||||||
P_SIZE UInt8,
|
|
||||||
P_CONTAINER LowCardinality(String)
|
|
||||||
)
|
|
||||||
ENGINE = MergeTree ORDER BY P_PARTKEY;
|
|
||||||
|
|
||||||
CREATE TABLE supplier
|
|
||||||
(
|
|
||||||
S_SUPPKEY UInt32,
|
|
||||||
S_NAME String,
|
|
||||||
S_ADDRESS String,
|
|
||||||
S_CITY LowCardinality(String),
|
|
||||||
S_NATION LowCardinality(String),
|
|
||||||
S_REGION LowCardinality(String),
|
|
||||||
S_PHONE String
|
|
||||||
)
|
|
||||||
ENGINE = MergeTree ORDER BY S_SUPPKEY;
|
|
||||||
"
|
|
||||||
supplier_table="
|
|
||||||
CREATE TABLE supplier
|
|
||||||
(
|
|
||||||
S_SUPPKEY UInt32,
|
|
||||||
S_NAME String,
|
|
||||||
S_ADDRESS String,
|
|
||||||
S_CITY LowCardinality(String),
|
|
||||||
S_NATION LowCardinality(String),
|
|
||||||
S_REGION LowCardinality(String),
|
|
||||||
S_PHONE String
|
|
||||||
)
|
|
||||||
ENGINE = MergeTree ORDER BY S_SUPPKEY;
|
|
||||||
"
|
|
||||||
part_table="
|
|
||||||
CREATE TABLE part
|
|
||||||
(
|
|
||||||
P_PARTKEY UInt32,
|
|
||||||
P_NAME String,
|
|
||||||
P_MFGR LowCardinality(String),
|
|
||||||
P_CATEGORY LowCardinality(String),
|
|
||||||
P_BRAND LowCardinality(String),
|
|
||||||
P_COLOR LowCardinality(String),
|
|
||||||
P_TYPE LowCardinality(String),
|
|
||||||
P_SIZE UInt8,
|
|
||||||
P_CONTAINER LowCardinality(String)
|
|
||||||
)
|
|
||||||
ENGINE = MergeTree ORDER BY P_PARTKEY;
|
|
||||||
"
|
|
||||||
lineorder_table="
|
|
||||||
CREATE TABLE lineorder
|
|
||||||
(
|
|
||||||
LO_ORDERKEY UInt32,
|
|
||||||
LO_LINENUMBER UInt8,
|
|
||||||
LO_CUSTKEY UInt32,
|
|
||||||
LO_PARTKEY UInt32,
|
|
||||||
LO_SUPPKEY UInt32,
|
|
||||||
LO_ORDERDATE Date,
|
|
||||||
LO_ORDERPRIORITY LowCardinality(String),
|
|
||||||
LO_SHIPPRIORITY UInt8,
|
|
||||||
LO_QUANTITY UInt8,
|
|
||||||
LO_EXTENDEDPRICE UInt32,
|
|
||||||
LO_ORDTOTALPRICE UInt32,
|
|
||||||
LO_DISCOUNT UInt8,
|
|
||||||
LO_REVENUE UInt32,
|
|
||||||
LO_SUPPLYCOST UInt32,
|
|
||||||
LO_TAX UInt8,
|
|
||||||
LO_COMMITDATE Date,
|
|
||||||
LO_SHIPMODE LowCardinality(String)
|
|
||||||
)
|
|
||||||
ENGINE = MergeTree PARTITION BY toYear(LO_ORDERDATE) ORDER BY (LO_ORDERDATE, LO_ORDERKEY);
|
|
||||||
"
|
|
||||||
customer_table="
|
|
||||||
CREATE TABLE customer
|
|
||||||
(
|
|
||||||
C_CUSTKEY UInt32,
|
|
||||||
C_NAME String,
|
|
||||||
C_ADDRESS String,
|
|
||||||
C_CITY LowCardinality(String),
|
|
||||||
C_NATION LowCardinality(String),
|
|
||||||
C_REGION LowCardinality(String),
|
|
||||||
C_PHONE String,
|
|
||||||
C_MKTSEGMENT LowCardinality(String)
|
|
||||||
)
|
|
||||||
ENGINE = MergeTree ORDER BY (C_CUSTKEY);
|
|
||||||
"
|
|
||||||
|
|
||||||
lineorder_flat_table="
|
|
||||||
SET max_memory_usage = 20000000000;
|
|
||||||
CREATE TABLE lineorder_flat
|
|
||||||
ENGINE = MergeTree
|
|
||||||
PARTITION BY toYear(LO_ORDERDATE)
|
|
||||||
ORDER BY (LO_ORDERDATE, LO_ORDERKEY) AS
|
|
||||||
SELECT
|
|
||||||
l.LO_ORDERKEY AS LO_ORDERKEY,
|
|
||||||
l.LO_LINENUMBER AS LO_LINENUMBER,
|
|
||||||
l.LO_CUSTKEY AS LO_CUSTKEY,
|
|
||||||
l.LO_PARTKEY AS LO_PARTKEY,
|
|
||||||
l.LO_SUPPKEY AS LO_SUPPKEY,
|
|
||||||
l.LO_ORDERDATE AS LO_ORDERDATE,
|
|
||||||
l.LO_ORDERPRIORITY AS LO_ORDERPRIORITY,
|
|
||||||
l.LO_SHIPPRIORITY AS LO_SHIPPRIORITY,
|
|
||||||
l.LO_QUANTITY AS LO_QUANTITY,
|
|
||||||
l.LO_EXTENDEDPRICE AS LO_EXTENDEDPRICE,
|
|
||||||
l.LO_ORDTOTALPRICE AS LO_ORDTOTALPRICE,
|
|
||||||
l.LO_DISCOUNT AS LO_DISCOUNT,
|
|
||||||
l.LO_REVENUE AS LO_REVENUE,
|
|
||||||
l.LO_SUPPLYCOST AS LO_SUPPLYCOST,
|
|
||||||
l.LO_TAX AS LO_TAX,
|
|
||||||
l.LO_COMMITDATE AS LO_COMMITDATE,
|
|
||||||
l.LO_SHIPMODE AS LO_SHIPMODE,
|
|
||||||
c.C_NAME AS C_NAME,
|
|
||||||
c.C_ADDRESS AS C_ADDRESS,
|
|
||||||
c.C_CITY AS C_CITY,
|
|
||||||
c.C_NATION AS C_NATION,
|
|
||||||
c.C_REGION AS C_REGION,
|
|
||||||
c.C_PHONE AS C_PHONE,
|
|
||||||
c.C_MKTSEGMENT AS C_MKTSEGMENT,
|
|
||||||
s.S_NAME AS S_NAME,
|
|
||||||
s.S_ADDRESS AS S_ADDRESS,
|
|
||||||
s.S_CITY AS S_CITY,
|
|
||||||
s.S_NATION AS S_NATION,
|
|
||||||
s.S_REGION AS S_REGION,
|
|
||||||
s.S_PHONE AS S_PHONE,
|
|
||||||
p.P_NAME AS P_NAME,
|
|
||||||
p.P_MFGR AS P_MFGR,
|
|
||||||
p.P_CATEGORY AS P_CATEGORY,
|
|
||||||
p.P_BRAND AS P_BRAND,
|
|
||||||
p.P_COLOR AS P_COLOR,
|
|
||||||
p.P_TYPE AS P_TYPE,
|
|
||||||
p.P_SIZE AS P_SIZE,
|
|
||||||
p.P_CONTAINER AS P_CONTAINER
|
|
||||||
FROM lineorder AS l
|
|
||||||
INNER JOIN customer AS c ON c.C_CUSTKEY = l.LO_CUSTKEY
|
|
||||||
INNER JOIN supplier AS s ON s.S_SUPPKEY = l.LO_SUPPKEY
|
|
||||||
INNER JOIN part AS p ON p.P_PARTKEY = l.LO_PARTKEY;
|
|
||||||
show settings ilike 'max_memory_usage';
|
|
||||||
"
|
|
||||||
|
|
||||||
function insert_data(){
|
|
||||||
echo "insert_data:$1"
|
|
||||||
create_table_prefix="clickhouse client --host ${ckhost} --port $2 --multiquery -q"
|
|
||||||
insert_data_prefix="clickhouse client --query "
|
|
||||||
case $1 in
|
|
||||||
all)
|
|
||||||
clickhouse client --host ${ckhost} --port $2 --multiquery -q"$ckreadSql" && {
|
|
||||||
${insert_data_prefix} "INSERT INTO customer FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/customer.tbl --port=$2
|
|
||||||
${insert_data_prefix} "INSERT INTO part FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/part.tbl --port=$2
|
|
||||||
${insert_data_prefix} "INSERT INTO supplier FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/supplier.tbl --port=$2
|
|
||||||
${insert_data_prefix} "INSERT INTO lineorder FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/lineorder.tbl --port=$2
|
|
||||||
}
|
|
||||||
${create_table_prefix}"${lineorder_flat_table}"
|
|
||||||
;;
|
|
||||||
customer)
|
|
||||||
echo ${create_table_prefix}\"${customer_table}\"
|
|
||||||
${create_table_prefix}"${customer_table}" && {
|
|
||||||
echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2"
|
|
||||||
${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2
|
|
||||||
}
|
|
||||||
;;
|
|
||||||
part)
|
|
||||||
echo ${create_table_prefix}\"${part_table}\"
|
|
||||||
${create_table_prefix}"${part_table}" && {
|
|
||||||
echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2"
|
|
||||||
${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2
|
|
||||||
}
|
|
||||||
;;
|
|
||||||
supplier)
|
|
||||||
echo ${create_table_prefix}"${supplier_table}"
|
|
||||||
${create_table_prefix}"${supplier_table}" && {
|
|
||||||
echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2"
|
|
||||||
${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2
|
|
||||||
}
|
|
||||||
;;
|
|
||||||
lineorder)
|
|
||||||
echo ${create_table_prefix}"${lineorder_table}"
|
|
||||||
${create_table_prefix}"${lineorder_table}" && {
|
|
||||||
echo "${insert_data_prefix} \"INSERT INTO $1 FORMAT CSV\" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2"
|
|
||||||
${insert_data_prefix} "INSERT INTO $1 FORMAT CSV" < ${RAWDATA_DIR}/ssb-dbgen/$1.tbl --port=$2
|
|
||||||
}
|
|
||||||
;;
|
|
||||||
lineorder_flat)
|
|
||||||
echo ${create_table_prefix}"${lineorder_flat_table}"
|
|
||||||
${create_table_prefix}"${lineorder_flat_table}"
|
|
||||||
return 0
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
exit 0
|
|
||||||
;;
|
|
||||||
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
function check_sql(){
|
|
||||||
select_sql="select * from "$1" limit 1"
|
|
||||||
clickhouse client --host ${ckhost} --port $2 --multiquery -q"${select_sql}"
|
|
||||||
}
|
|
||||||
|
|
||||||
function check_table(){
|
|
||||||
checknum=0
|
|
||||||
source_tables="customer part supplier lineorder lineorder_flat"
|
|
||||||
test_tables=${1:-${source_tables}}
|
|
||||||
echo "Checking table data required in server..."
|
|
||||||
for i in $(seq 0 $[inst_num-1])
|
|
||||||
do
|
|
||||||
for j in `echo ${test_tables}`
|
|
||||||
do
|
|
||||||
check_sql $j ${ckport[i]} &> /dev/null || {
|
|
||||||
let checknum+=1 && insert_data "$j" ${ckport[i]}
|
|
||||||
}
|
|
||||||
done
|
|
||||||
done
|
|
||||||
|
|
||||||
for i in $(seq 0 $[inst_num-1])
|
|
||||||
do
|
|
||||||
echo "clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q\"select count() from ${TABLE_NAME};\""
|
|
||||||
var=$(clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q"select count() from ${TABLE_NAME};")
|
|
||||||
if [ $var -eq $TALBE_ROWS ];then
|
|
||||||
echo "Instance_${i} Table data integrity check OK -> Rows:$var"
|
|
||||||
else
|
|
||||||
echo "Instance_${i} Table data integrity check Failed -> Rows:$var"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if [ $checknum -gt 0 ];then
|
|
||||||
echo "Need sleep 10s after first table data insertion...$checknum"
|
|
||||||
sleep 10
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function check_instance(){
|
|
||||||
instance_alive=0
|
|
||||||
for i in {1..10}
|
|
||||||
do
|
|
||||||
sleep 1
|
|
||||||
netstat -nltp | grep ${1} > /dev/null
|
|
||||||
if [ $? -ne 1 ];then
|
|
||||||
instance_alive=1
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ $instance_alive -eq 0 ];then
|
|
||||||
echo "check_instance -> clickhouse server instance faild to launch due to 10s timeout!"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "check_instance -> clickhouse server instance launch successfully!"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function start_clickhouse_for_insertion(){
|
|
||||||
echo "start_clickhouse_for_insertion"
|
|
||||||
for i in $(seq 0 $[inst_num-1])
|
|
||||||
do
|
|
||||||
echo "cd ${database_dir}/$1${dir_server[i]}"
|
|
||||||
echo "${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&${LOG_DIR}/${1}_${i}_server_log& > /dev/null"
|
|
||||||
|
|
||||||
cd ${database_dir}/$1${dir_server[i]}
|
|
||||||
${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&${LOG_DIR}/${1}_${i}_server_log& > /dev/null
|
|
||||||
check_instance ${ckport[i]}
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
function start_clickhouse_for_stressing(){
|
|
||||||
echo "start_clickhouse_for_stressing"
|
|
||||||
for i in $(seq 0 $[inst_num-1])
|
|
||||||
do
|
|
||||||
echo "cd ${database_dir}/$1${dir_server[i]}"
|
|
||||||
echo "${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&/dev/null&"
|
|
||||||
|
|
||||||
cd ${database_dir}/$1${dir_server[i]}
|
|
||||||
${SERVER_BIND_CMD[i]} clickhouse server -C config_${1}${dir_server[i]}.xml >&/dev/null&
|
|
||||||
check_instance ${ckport[i]}
|
|
||||||
done
|
|
||||||
}
|
|
||||||
yum -y install git make gcc sudo net-tools &> /dev/null
|
|
||||||
pip3 install clickhouse_driver numpy &> /dev/null
|
|
||||||
test -d ${RAWDATA_DIR}/ssb-dbgen || git clone https://github.com/vadimtk/ssb-dbgen.git ${RAWDATA_DIR}/ssb-dbgen && cd ${RAWDATA_DIR}/ssb-dbgen
|
|
||||||
|
|
||||||
if [ ! -f ${RAWDATA_DIR}/ssb-dbgen/dbgen ];then
|
|
||||||
make && {
|
|
||||||
test -f ${RAWDATA_DIR}/ssb-dbgen/customer.tbl || echo y |./dbgen -s ${SSB_GEN_FACTOR} -T c
|
|
||||||
test -f ${RAWDATA_DIR}/ssb-dbgen/part.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T p
|
|
||||||
test -f ${RAWDATA_DIR}/ssb-dbgen/supplier.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T s
|
|
||||||
test -f ${RAWDATA_DIR}/ssb-dbgen/date.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T d
|
|
||||||
test -f ${RAWDATA_DIR}/ssb-dbgen/lineorder.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T l
|
|
||||||
}
|
|
||||||
else
|
|
||||||
test -f ${RAWDATA_DIR}/ssb-dbgen/customer.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T c
|
|
||||||
test -f ${RAWDATA_DIR}/ssb-dbgen/part.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T p
|
|
||||||
test -f ${RAWDATA_DIR}/ssb-dbgen/supplier.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T s
|
|
||||||
test -f ${RAWDATA_DIR}/ssb-dbgen/date.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T d
|
|
||||||
test -f ${RAWDATA_DIR}/ssb-dbgen/lineorder.tbl || echo y | ./dbgen -s ${SSB_GEN_FACTOR} -T l
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
filenum=`find ${RAWDATA_DIR}/ssb-dbgen/ -name "*.tbl" | wc -l`
|
|
||||||
|
|
||||||
if [ $filenum -ne 5 ];then
|
|
||||||
echo "generate ssb data file *.tbl faild"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
function kill_instance(){
|
|
||||||
instance_alive=1
|
|
||||||
for i in {1..2}
|
|
||||||
do
|
|
||||||
pkill clickhouse && sleep 5
|
|
||||||
instance_alive=0
|
|
||||||
for i in $(seq 0 $[inst_num-1])
|
|
||||||
do
|
|
||||||
netstat -nltp | grep ${ckport[i]} > /dev/null
|
|
||||||
if [ $? -ne 1 ];then
|
|
||||||
instance_alive=1
|
|
||||||
break;
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if [ $instance_alive -eq 0 ];then
|
|
||||||
break;
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if [ $instance_alive -eq 0 ];then
|
|
||||||
echo "kill_instance OK!"
|
|
||||||
else
|
|
||||||
echo "kill_instance Failed -> clickhouse server instance still alive due to 10s timeout"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function run_test(){
|
|
||||||
is_xml=0
|
|
||||||
for i in $(seq 0 $[inst_num-1])
|
|
||||||
do
|
|
||||||
if [ -f ${database_dir}/${1}${dir_server[i]}/config_${1}${dir_server[i]}.xml ]; then
|
|
||||||
is_xml=$[is_xml+1]
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if [ $is_xml -eq $inst_num ];then
|
|
||||||
echo "Benchmark with $inst_num instance"
|
|
||||||
start_clickhouse_for_insertion ${1}
|
|
||||||
|
|
||||||
for i in $(seq 0 $[inst_num-1])
|
|
||||||
do
|
|
||||||
clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q"show databases;" >/dev/null
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ $? -eq 0 ];then
|
|
||||||
check_table
|
|
||||||
fi
|
|
||||||
kill_instance
|
|
||||||
|
|
||||||
if [ $1 == "deflate" ];then
|
|
||||||
test -f ${LOG_DIR}/${1}_server_log && deflatemsg=`cat ${LOG_DIR}/${1}_server_log | grep DeflateJobHWPool`
|
|
||||||
if [ -n "$deflatemsg" ];then
|
|
||||||
echo ------------------------------------------------------
|
|
||||||
echo $deflatemsg
|
|
||||||
echo ------------------------------------------------------
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
echo "Check table data required in server_${1} -> Done! "
|
|
||||||
|
|
||||||
start_clickhouse_for_stressing ${1}
|
|
||||||
for i in $(seq 0 $[inst_num-1])
|
|
||||||
do
|
|
||||||
clickhouse client --host ${ckhost} --port ${ckport[i]} -m -q"show databases;" >/dev/null
|
|
||||||
done
|
|
||||||
if [ $? -eq 0 ];then
|
|
||||||
test -d ${CLIENT_SCRIPTS_DIR} && cd ${CLIENT_SCRIPTS_DIR}
|
|
||||||
echo "Client stressing... "
|
|
||||||
echo "${CLIENT_BIND_CMD} python3 client_stressing_test.py ${QUERY_FILE} $inst_num &> ${LOG_DIR}/${1}.log"
|
|
||||||
${CLIENT_BIND_CMD} python3 client_stressing_test.py ${QUERY_FILE} $inst_num &> ${LOG_DIR}/${1}.log
|
|
||||||
echo "Completed client stressing, checking log... "
|
|
||||||
finish_log=`grep "Finished" ${LOG_DIR}/${1}.log | wc -l`
|
|
||||||
if [ $finish_log -eq 1 ] ;then
|
|
||||||
kill_instance
|
|
||||||
test -f ${LOG_DIR}/${1}.log && echo "${1}.log ===> ${LOG_DIR}/${1}.log"
|
|
||||||
else
|
|
||||||
kill_instance
|
|
||||||
echo "No find 'Finished' in client log -> Performance test may fail"
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
fi
|
|
||||||
|
|
||||||
else
|
|
||||||
echo "${1} clickhouse server start fail"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "clickhouse server start fail -> Please check xml files required in ${database_dir} for each instance"
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
function clear_log(){
|
|
||||||
if [ -d "$LOG_DIR" ]; then
|
|
||||||
cd ${LOG_DIR} && rm -rf *
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function gather_log_for_codec(){
|
|
||||||
cd ${OUTPUT_DIR} && mkdir -p ${LOG_PACK_FILE}/${1}
|
|
||||||
cp -rf ${LOG_DIR} ${OUTPUT_DIR}/${LOG_PACK_FILE}/${1}
|
|
||||||
}
|
|
||||||
|
|
||||||
function pack_log(){
|
|
||||||
if [ -e "${OUTPUT_DIR}/run.log" ]; then
|
|
||||||
cp ${OUTPUT_DIR}/run.log ${OUTPUT_DIR}/${LOG_PACK_FILE}/
|
|
||||||
fi
|
|
||||||
echo "Please check all log information in ${OUTPUT_DIR}/${LOG_PACK_FILE}"
|
|
||||||
}
|
|
||||||
|
|
||||||
function setup_check(){
|
|
||||||
|
|
||||||
iax_dev_num=`accel-config list | grep iax | wc -l`
|
|
||||||
if [ $iax_dev_num -eq 0 ] ;then
|
|
||||||
iax_dev_num=`accel-config list | grep iax | wc -l`
|
|
||||||
if [ $iax_dev_num -eq 0 ] ;then
|
|
||||||
echo "No IAA devices available -> Please check IAA hardware setup manually!"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "IAA enabled devices number:$iax_dev_num"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "IAA enabled devices number:$iax_dev_num"
|
|
||||||
fi
|
|
||||||
libaccel_version=`accel-config -v`
|
|
||||||
clickhouser_version=`clickhouse server --version`
|
|
||||||
kernel_dxd_log=`dmesg | grep dxd`
|
|
||||||
echo "libaccel_version:$libaccel_version"
|
|
||||||
echo "clickhouser_version:$clickhouser_version"
|
|
||||||
echo -e "idxd section in kernel log:\n$kernel_dxd_log"
|
|
||||||
}
|
|
||||||
|
|
||||||
setup_check
|
|
||||||
export CLICKHOUSE_WATCHDOG_ENABLE=0
|
|
||||||
for i in ${CODEC_CONFIG[@]}
|
|
||||||
do
|
|
||||||
clear_log
|
|
||||||
codec=${i}
|
|
||||||
echo "run test------------$codec"
|
|
||||||
run_test $codec
|
|
||||||
gather_log_for_codec $codec
|
|
||||||
done
|
|
||||||
|
|
||||||
pack_log
|
|
||||||
echo "Done."
|
|
@ -1,278 +0,0 @@
|
|||||||
from operator import eq
|
|
||||||
import os
|
|
||||||
import random
|
|
||||||
import time
|
|
||||||
import sys
|
|
||||||
from clickhouse_driver import Client
|
|
||||||
import numpy as np
|
|
||||||
import subprocess
|
|
||||||
import multiprocessing
|
|
||||||
from multiprocessing import Manager
|
|
||||||
|
|
||||||
warmup_runs = 10
|
|
||||||
calculated_runs = 10
|
|
||||||
seconds = 30
|
|
||||||
max_instances_number = 8
|
|
||||||
retest_number = 3
|
|
||||||
retest_tolerance = 10
|
|
||||||
|
|
||||||
|
|
||||||
def checkInt(str):
|
|
||||||
try:
|
|
||||||
int(str)
|
|
||||||
return True
|
|
||||||
except ValueError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def setup_client(index):
|
|
||||||
if index < 4:
|
|
||||||
port_idx = index
|
|
||||||
else:
|
|
||||||
port_idx = index + 4
|
|
||||||
client = Client(
|
|
||||||
host="localhost",
|
|
||||||
database="default",
|
|
||||||
user="default",
|
|
||||||
password="",
|
|
||||||
port="900%d" % port_idx,
|
|
||||||
)
|
|
||||||
union_mode_query = "SET union_default_mode='DISTINCT'"
|
|
||||||
client.execute(union_mode_query)
|
|
||||||
return client
|
|
||||||
|
|
||||||
|
|
||||||
def warm_client(clientN, clientL, query, loop):
|
|
||||||
for c_idx in range(clientN):
|
|
||||||
for _ in range(loop):
|
|
||||||
clientL[c_idx].execute(query)
|
|
||||||
|
|
||||||
|
|
||||||
def read_queries(queries_list):
|
|
||||||
queries = list()
|
|
||||||
queries_id = list()
|
|
||||||
with open(queries_list, "r") as f:
|
|
||||||
for line in f:
|
|
||||||
line = line.rstrip()
|
|
||||||
line = line.split("$")
|
|
||||||
queries_id.append(line[0])
|
|
||||||
queries.append(line[1])
|
|
||||||
return queries_id, queries
|
|
||||||
|
|
||||||
|
|
||||||
def run_task(client, cname, query, loop, query_latency):
|
|
||||||
start_time = time.time()
|
|
||||||
for i in range(loop):
|
|
||||||
client.execute(query)
|
|
||||||
query_latency.append(client.last_query.elapsed)
|
|
||||||
|
|
||||||
end_time = time.time()
|
|
||||||
p95 = np.percentile(query_latency, 95)
|
|
||||||
print(
|
|
||||||
"CLIENT: {0} end. -> P95: %f, qps: %f".format(cname)
|
|
||||||
% (p95, loop / (end_time - start_time))
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def run_multi_clients(clientN, clientList, query, loop):
|
|
||||||
client_pids = {}
|
|
||||||
start_time = time.time()
|
|
||||||
manager = multiprocessing.Manager()
|
|
||||||
query_latency_list0 = manager.list()
|
|
||||||
query_latency_list1 = manager.list()
|
|
||||||
query_latency_list2 = manager.list()
|
|
||||||
query_latency_list3 = manager.list()
|
|
||||||
query_latency_list4 = manager.list()
|
|
||||||
query_latency_list5 = manager.list()
|
|
||||||
query_latency_list6 = manager.list()
|
|
||||||
query_latency_list7 = manager.list()
|
|
||||||
|
|
||||||
for c_idx in range(clientN):
|
|
||||||
client_name = "Role_%d" % c_idx
|
|
||||||
if c_idx == 0:
|
|
||||||
client_pids[c_idx] = multiprocessing.Process(
|
|
||||||
target=run_task,
|
|
||||||
args=(clientList[c_idx], client_name, query, loop, query_latency_list0),
|
|
||||||
)
|
|
||||||
elif c_idx == 1:
|
|
||||||
client_pids[c_idx] = multiprocessing.Process(
|
|
||||||
target=run_task,
|
|
||||||
args=(clientList[c_idx], client_name, query, loop, query_latency_list1),
|
|
||||||
)
|
|
||||||
elif c_idx == 2:
|
|
||||||
client_pids[c_idx] = multiprocessing.Process(
|
|
||||||
target=run_task,
|
|
||||||
args=(clientList[c_idx], client_name, query, loop, query_latency_list2),
|
|
||||||
)
|
|
||||||
elif c_idx == 3:
|
|
||||||
client_pids[c_idx] = multiprocessing.Process(
|
|
||||||
target=run_task,
|
|
||||||
args=(clientList[c_idx], client_name, query, loop, query_latency_list3),
|
|
||||||
)
|
|
||||||
elif c_idx == 4:
|
|
||||||
client_pids[c_idx] = multiprocessing.Process(
|
|
||||||
target=run_task,
|
|
||||||
args=(clientList[c_idx], client_name, query, loop, query_latency_list4),
|
|
||||||
)
|
|
||||||
elif c_idx == 5:
|
|
||||||
client_pids[c_idx] = multiprocessing.Process(
|
|
||||||
target=run_task,
|
|
||||||
args=(clientList[c_idx], client_name, query, loop, query_latency_list5),
|
|
||||||
)
|
|
||||||
elif c_idx == 6:
|
|
||||||
client_pids[c_idx] = multiprocessing.Process(
|
|
||||||
target=run_task,
|
|
||||||
args=(clientList[c_idx], client_name, query, loop, query_latency_list6),
|
|
||||||
)
|
|
||||||
elif c_idx == 7:
|
|
||||||
client_pids[c_idx] = multiprocessing.Process(
|
|
||||||
target=run_task,
|
|
||||||
args=(clientList[c_idx], client_name, query, loop, query_latency_list7),
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
print("ERROR: CLIENT number dismatch!!")
|
|
||||||
exit()
|
|
||||||
print("CLIENT: %s start" % client_name)
|
|
||||||
client_pids[c_idx].start()
|
|
||||||
|
|
||||||
for c_idx in range(clientN):
|
|
||||||
client_pids[c_idx].join()
|
|
||||||
end_time = time.time()
|
|
||||||
totalT = end_time - start_time
|
|
||||||
|
|
||||||
query_latencyTotal = list()
|
|
||||||
for item in query_latency_list0:
|
|
||||||
query_latencyTotal.append(item)
|
|
||||||
for item in query_latency_list1:
|
|
||||||
query_latencyTotal.append(item)
|
|
||||||
for item in query_latency_list2:
|
|
||||||
query_latencyTotal.append(item)
|
|
||||||
for item in query_latency_list3:
|
|
||||||
query_latencyTotal.append(item)
|
|
||||||
for item in query_latency_list4:
|
|
||||||
query_latencyTotal.append(item)
|
|
||||||
for item in query_latency_list5:
|
|
||||||
query_latencyTotal.append(item)
|
|
||||||
for item in query_latency_list6:
|
|
||||||
query_latencyTotal.append(item)
|
|
||||||
for item in query_latency_list7:
|
|
||||||
query_latencyTotal.append(item)
|
|
||||||
|
|
||||||
totalP95 = np.percentile(query_latencyTotal, 95) * 1000
|
|
||||||
return totalT, totalP95
|
|
||||||
|
|
||||||
|
|
||||||
def run_task_caculated(client, cname, query, loop):
|
|
||||||
query_latency = list()
|
|
||||||
start_time = time.time()
|
|
||||||
for i in range(loop):
|
|
||||||
client.execute(query)
|
|
||||||
query_latency.append(client.last_query.elapsed)
|
|
||||||
end_time = time.time()
|
|
||||||
p95 = np.percentile(query_latency, 95)
|
|
||||||
|
|
||||||
|
|
||||||
def run_multi_clients_caculated(clientN, clientList, query, loop):
|
|
||||||
client_pids = {}
|
|
||||||
start_time = time.time()
|
|
||||||
for c_idx in range(clientN):
|
|
||||||
client_name = "Role_%d" % c_idx
|
|
||||||
client_pids[c_idx] = multiprocessing.Process(
|
|
||||||
target=run_task_caculated,
|
|
||||||
args=(clientList[c_idx], client_name, query, loop),
|
|
||||||
)
|
|
||||||
client_pids[c_idx].start()
|
|
||||||
for c_idx in range(clientN):
|
|
||||||
client_pids[c_idx].join()
|
|
||||||
end_time = time.time()
|
|
||||||
totalT = end_time - start_time
|
|
||||||
return totalT
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
client_number = 1
|
|
||||||
queries = list()
|
|
||||||
queries_id = list()
|
|
||||||
|
|
||||||
if len(sys.argv) != 3:
|
|
||||||
print(
|
|
||||||
"usage: python3 client_stressing_test.py [queries_file_path] [client_number]"
|
|
||||||
)
|
|
||||||
sys.exit()
|
|
||||||
else:
|
|
||||||
queries_list = sys.argv[1]
|
|
||||||
client_number = int(sys.argv[2])
|
|
||||||
print(
|
|
||||||
"queries_file_path: %s, client_number: %d" % (queries_list, client_number)
|
|
||||||
)
|
|
||||||
if not os.path.isfile(queries_list) or not os.access(queries_list, os.R_OK):
|
|
||||||
print("please check the right path for queries file")
|
|
||||||
sys.exit()
|
|
||||||
if (
|
|
||||||
not checkInt(sys.argv[2])
|
|
||||||
or int(sys.argv[2]) > max_instances_number
|
|
||||||
or int(sys.argv[2]) < 1
|
|
||||||
):
|
|
||||||
print("client_number should be in [1~%d]" % max_instances_number)
|
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
client_list = {}
|
|
||||||
queries_id, queries = read_queries(queries_list)
|
|
||||||
|
|
||||||
for c_idx in range(client_number):
|
|
||||||
client_list[c_idx] = setup_client(c_idx)
|
|
||||||
# clear cache
|
|
||||||
os.system("sync; echo 3 > /proc/sys/vm/drop_caches")
|
|
||||||
|
|
||||||
print("###Polit Run Begin")
|
|
||||||
for i in queries:
|
|
||||||
warm_client(client_number, client_list, i, 1)
|
|
||||||
print("###Polit Run End -> Start stressing....")
|
|
||||||
|
|
||||||
query_index = 0
|
|
||||||
for q in queries:
|
|
||||||
print(
|
|
||||||
"\n###START -> Index: %d, ID: %s, Query: %s"
|
|
||||||
% (query_index, queries_id[query_index], q)
|
|
||||||
)
|
|
||||||
warm_client(client_number, client_list, q, warmup_runs)
|
|
||||||
print("###Warm Done!")
|
|
||||||
for j in range(0, retest_number):
|
|
||||||
totalT = run_multi_clients_caculated(
|
|
||||||
client_number, client_list, q, calculated_runs
|
|
||||||
)
|
|
||||||
curr_loop = int(seconds * calculated_runs / totalT) + 1
|
|
||||||
print(
|
|
||||||
"###Calculation Done! -> loopN: %d, expected seconds:%d"
|
|
||||||
% (curr_loop, seconds)
|
|
||||||
)
|
|
||||||
|
|
||||||
print("###Stress Running! -> %d iterations......" % curr_loop)
|
|
||||||
|
|
||||||
totalT, totalP95 = run_multi_clients(
|
|
||||||
client_number, client_list, q, curr_loop
|
|
||||||
)
|
|
||||||
|
|
||||||
if totalT > (seconds - retest_tolerance) and totalT < (
|
|
||||||
seconds + retest_tolerance
|
|
||||||
):
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
print(
|
|
||||||
"###totalT:%d is far way from expected seconds:%d. Run again ->j:%d!"
|
|
||||||
% (totalT, seconds, j)
|
|
||||||
)
|
|
||||||
|
|
||||||
print(
|
|
||||||
"###Completed! -> ID: %s, clientN: %d, totalT: %.2f s, latencyAVG: %.2f ms, P95: %.2f ms, QPS_Final: %.2f"
|
|
||||||
% (
|
|
||||||
queries_id[query_index],
|
|
||||||
client_number,
|
|
||||||
totalT,
|
|
||||||
totalT * 1000 / (curr_loop * client_number),
|
|
||||||
totalP95,
|
|
||||||
((curr_loop * client_number) / totalT),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
query_index += 1
|
|
||||||
print("###Finished!")
|
|
@ -1,10 +0,0 @@
|
|||||||
Q1.1$SELECT sum(LO_EXTENDEDPRICE * LO_DISCOUNT) AS revenue FROM lineorder_flat WHERE toYear(LO_ORDERDATE) = 1993 AND LO_DISCOUNT BETWEEN 1 AND 3 AND LO_QUANTITY < 25;
|
|
||||||
Q2.1$SELECT sum(LO_REVENUE),toYear(LO_ORDERDATE) AS year,P_BRAND FROM lineorder_flat WHERE P_CATEGORY = 'MFGR#12' AND S_REGION = 'AMERICA' GROUP BY year,P_BRAND ORDER BY year,P_BRAND;
|
|
||||||
Q2.2$SELECT sum(LO_REVENUE),toYear(LO_ORDERDATE) AS year,P_BRAND FROM lineorder_flat WHERE P_BRAND >= 'MFGR#2221' AND P_BRAND <= 'MFGR#2228' AND S_REGION = 'ASIA' GROUP BY year,P_BRAND ORDER BY year,P_BRAND;
|
|
||||||
Q2.3$SELECT sum(LO_REVENUE),toYear(LO_ORDERDATE) AS year,P_BRAND FROM lineorder_flat WHERE P_BRAND = 'MFGR#2239' AND S_REGION = 'EUROPE' GROUP BY year,P_BRAND ORDER BY year,P_BRAND;
|
|
||||||
Q3.1$SELECT C_NATION,S_NATION,toYear(LO_ORDERDATE) AS year,sum(LO_REVENUE) AS revenue FROM lineorder_flat WHERE C_REGION = 'ASIA' AND S_REGION = 'ASIA' AND year >= 1992 AND year <= 1997 GROUP BY C_NATION,S_NATION,year ORDER BY year ASC,revenue DESC;
|
|
||||||
Q3.2$SELECT C_CITY,S_CITY,toYear(LO_ORDERDATE) AS year,sum(LO_REVENUE) AS revenue FROM lineorder_flat WHERE C_NATION = 'UNITED STATES' AND S_NATION = 'UNITED STATES' AND year >= 1992 AND year <= 1997 GROUP BY C_CITY,S_CITY,year ORDER BY year ASC,revenue DESC;
|
|
||||||
Q3.3$SELECT C_CITY,S_CITY,toYear(LO_ORDERDATE) AS year,sum(LO_REVENUE) AS revenue FROM lineorder_flat WHERE (C_CITY = 'UNITED KI1' OR C_CITY = 'UNITED KI5') AND (S_CITY = 'UNITED KI1' OR S_CITY = 'UNITED KI5') AND year >= 1992 AND year <= 1997 GROUP BY C_CITY,S_CITY,year ORDER BY year ASC,revenue DESC;
|
|
||||||
Q4.1$SELECT toYear(LO_ORDERDATE) AS year,C_NATION,sum(LO_REVENUE - LO_SUPPLYCOST) AS profit FROM lineorder_flat WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') GROUP BY year,C_NATION ORDER BY year ASC,C_NATION ASC;
|
|
||||||
Q4.2$SELECT toYear(LO_ORDERDATE) AS year,S_NATION,P_CATEGORY,sum(LO_REVENUE - LO_SUPPLYCOST) AS profit FROM lineorder_flat WHERE C_REGION = 'AMERICA' AND S_REGION = 'AMERICA' AND (year = 1997 OR year = 1998) AND (P_MFGR = 'MFGR#1' OR P_MFGR = 'MFGR#2') GROUP BY year,S_NATION,P_CATEGORY ORDER BY year ASC,S_NATION ASC,P_CATEGORY ASC;
|
|
||||||
Q4.3$SELECT toYear(LO_ORDERDATE) AS year,S_CITY,P_BRAND,sum(LO_REVENUE - LO_SUPPLYCOST) AS profit FROM lineorder_flat WHERE S_NATION = 'UNITED STATES' AND (year = 1997 OR year = 1998) AND P_CATEGORY = 'MFGR#14' GROUP BY year,S_CITY,P_BRAND ORDER BY year ASC,S_CITY ASC,P_BRAND ASC;
|
|
@ -1,6 +0,0 @@
|
|||||||
WORKING_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.."
|
|
||||||
if [ ! -d "${WORKING_DIR}/output" ]; then
|
|
||||||
mkdir ${WORKING_DIR}/output
|
|
||||||
fi
|
|
||||||
bash allin1_ssb.sh 2 > ${WORKING_DIR}/output/run.log
|
|
||||||
echo "Please check log in: ${WORKING_DIR}/output/run.log"
|
|
@ -1,49 +0,0 @@
|
|||||||
<!-- This file was generated automatically.
|
|
||||||
Do not edit it: it is likely to be discarded and generated again before it's read next time.
|
|
||||||
Files used to generate this file:
|
|
||||||
config.xml -->
|
|
||||||
|
|
||||||
<!-- Config that is used when server is run without config file. --><clickhouse>
|
|
||||||
<logger>
|
|
||||||
<level>trace</level>
|
|
||||||
<console>true</console>
|
|
||||||
</logger>
|
|
||||||
|
|
||||||
<http_port>8123</http_port>
|
|
||||||
<tcp_port>9000</tcp_port>
|
|
||||||
<mysql_port>9004</mysql_port>
|
|
||||||
|
|
||||||
<path>./</path>
|
|
||||||
|
|
||||||
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
|
||||||
<mark_cache_size>5368709120</mark_cache_size>
|
|
||||||
<mlock_executable>true</mlock_executable>
|
|
||||||
|
|
||||||
<compression>
|
|
||||||
<case>
|
|
||||||
<method>deflate_qpl</method>
|
|
||||||
</case>
|
|
||||||
</compression>
|
|
||||||
|
|
||||||
<users>
|
|
||||||
<default>
|
|
||||||
<password/>
|
|
||||||
|
|
||||||
<networks>
|
|
||||||
<ip>::/0</ip>
|
|
||||||
</networks>
|
|
||||||
|
|
||||||
<profile>default</profile>
|
|
||||||
<quota>default</quota>
|
|
||||||
<access_management>1</access_management>
|
|
||||||
</default>
|
|
||||||
</users>
|
|
||||||
|
|
||||||
<profiles>
|
|
||||||
<default/>
|
|
||||||
</profiles>
|
|
||||||
|
|
||||||
<quotas>
|
|
||||||
<default/>
|
|
||||||
</quotas>
|
|
||||||
</clickhouse>
|
|
@ -1,49 +0,0 @@
|
|||||||
<!-- This file was generated automatically.
|
|
||||||
Do not edit it: it is likely to be discarded and generated again before it's read next time.
|
|
||||||
Files used to generate this file:
|
|
||||||
config.xml -->
|
|
||||||
|
|
||||||
<!-- Config that is used when server is run without config file. --><clickhouse>
|
|
||||||
<logger>
|
|
||||||
<level>trace</level>
|
|
||||||
<console>true</console>
|
|
||||||
</logger>
|
|
||||||
|
|
||||||
<http_port>8124</http_port>
|
|
||||||
<tcp_port>9001</tcp_port>
|
|
||||||
<mysql_port>9005</mysql_port>
|
|
||||||
|
|
||||||
<path>./</path>
|
|
||||||
|
|
||||||
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
|
||||||
<mark_cache_size>5368709120</mark_cache_size>
|
|
||||||
<mlock_executable>true</mlock_executable>
|
|
||||||
|
|
||||||
<compression>
|
|
||||||
<case>
|
|
||||||
<method>deflate_qpl</method>
|
|
||||||
</case>
|
|
||||||
</compression>
|
|
||||||
|
|
||||||
<users>
|
|
||||||
<default>
|
|
||||||
<password/>
|
|
||||||
|
|
||||||
<networks>
|
|
||||||
<ip>::/0</ip>
|
|
||||||
</networks>
|
|
||||||
|
|
||||||
<profile>default</profile>
|
|
||||||
<quota>default</quota>
|
|
||||||
<access_management>1</access_management>
|
|
||||||
</default>
|
|
||||||
</users>
|
|
||||||
|
|
||||||
<profiles>
|
|
||||||
<default/>
|
|
||||||
</profiles>
|
|
||||||
|
|
||||||
<quotas>
|
|
||||||
<default/>
|
|
||||||
</quotas>
|
|
||||||
</clickhouse>
|
|
@ -1,49 +0,0 @@
|
|||||||
<!-- This file was generated automatically.
|
|
||||||
Do not edit it: it is likely to be discarded and generated again before it's read next time.
|
|
||||||
Files used to generate this file:
|
|
||||||
config.xml -->
|
|
||||||
|
|
||||||
<!-- Config that is used when server is run without config file. --><clickhouse>
|
|
||||||
<logger>
|
|
||||||
<level>trace</level>
|
|
||||||
<console>true</console>
|
|
||||||
</logger>
|
|
||||||
|
|
||||||
<http_port>8123</http_port>
|
|
||||||
<tcp_port>9000</tcp_port>
|
|
||||||
<mysql_port>9004</mysql_port>
|
|
||||||
|
|
||||||
<path>./</path>
|
|
||||||
|
|
||||||
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
|
||||||
<mark_cache_size>5368709120</mark_cache_size>
|
|
||||||
<mlock_executable>true</mlock_executable>
|
|
||||||
|
|
||||||
<compression>
|
|
||||||
<case>
|
|
||||||
<method>lz4</method>
|
|
||||||
</case>
|
|
||||||
</compression>
|
|
||||||
|
|
||||||
<users>
|
|
||||||
<default>
|
|
||||||
<password/>
|
|
||||||
|
|
||||||
<networks>
|
|
||||||
<ip>::/0</ip>
|
|
||||||
</networks>
|
|
||||||
|
|
||||||
<profile>default</profile>
|
|
||||||
<quota>default</quota>
|
|
||||||
<access_management>1</access_management>
|
|
||||||
</default>
|
|
||||||
</users>
|
|
||||||
|
|
||||||
<profiles>
|
|
||||||
<default/>
|
|
||||||
</profiles>
|
|
||||||
|
|
||||||
<quotas>
|
|
||||||
<default/>
|
|
||||||
</quotas>
|
|
||||||
</clickhouse>
|
|
@ -1,49 +0,0 @@
|
|||||||
<!-- This file was generated automatically.
|
|
||||||
Do not edit it: it is likely to be discarded and generated again before it's read next time.
|
|
||||||
Files used to generate this file:
|
|
||||||
config.xml -->
|
|
||||||
|
|
||||||
<!-- Config that is used when server is run without config file. --><clickhouse>
|
|
||||||
<logger>
|
|
||||||
<level>trace</level>
|
|
||||||
<console>true</console>
|
|
||||||
</logger>
|
|
||||||
|
|
||||||
<http_port>8124</http_port>
|
|
||||||
<tcp_port>9001</tcp_port>
|
|
||||||
<mysql_port>9005</mysql_port>
|
|
||||||
|
|
||||||
<path>./</path>
|
|
||||||
|
|
||||||
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
|
||||||
<mark_cache_size>5368709120</mark_cache_size>
|
|
||||||
<mlock_executable>true</mlock_executable>
|
|
||||||
|
|
||||||
<compression>
|
|
||||||
<case>
|
|
||||||
<method>lz4</method>
|
|
||||||
</case>
|
|
||||||
</compression>
|
|
||||||
|
|
||||||
<users>
|
|
||||||
<default>
|
|
||||||
<password/>
|
|
||||||
|
|
||||||
<networks>
|
|
||||||
<ip>::/0</ip>
|
|
||||||
</networks>
|
|
||||||
|
|
||||||
<profile>default</profile>
|
|
||||||
<quota>default</quota>
|
|
||||||
<access_management>1</access_management>
|
|
||||||
</default>
|
|
||||||
</users>
|
|
||||||
|
|
||||||
<profiles>
|
|
||||||
<default/>
|
|
||||||
</profiles>
|
|
||||||
|
|
||||||
<quotas>
|
|
||||||
<default/>
|
|
||||||
</quotas>
|
|
||||||
</clickhouse>
|
|
@ -1,49 +0,0 @@
|
|||||||
<!-- This file was generated automatically.
|
|
||||||
Do not edit it: it is likely to be discarded and generated again before it's read next time.
|
|
||||||
Files used to generate this file:
|
|
||||||
config.xml -->
|
|
||||||
|
|
||||||
<!-- Config that is used when server is run without config file. --><clickhouse>
|
|
||||||
<logger>
|
|
||||||
<level>trace</level>
|
|
||||||
<console>true</console>
|
|
||||||
</logger>
|
|
||||||
|
|
||||||
<http_port>8123</http_port>
|
|
||||||
<tcp_port>9000</tcp_port>
|
|
||||||
<mysql_port>9004</mysql_port>
|
|
||||||
|
|
||||||
<path>./</path>
|
|
||||||
|
|
||||||
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
|
||||||
<mark_cache_size>5368709120</mark_cache_size>
|
|
||||||
<mlock_executable>true</mlock_executable>
|
|
||||||
|
|
||||||
<compression>
|
|
||||||
<case>
|
|
||||||
<method>zstd</method>
|
|
||||||
</case>
|
|
||||||
</compression>
|
|
||||||
|
|
||||||
<users>
|
|
||||||
<default>
|
|
||||||
<password/>
|
|
||||||
|
|
||||||
<networks>
|
|
||||||
<ip>::/0</ip>
|
|
||||||
</networks>
|
|
||||||
|
|
||||||
<profile>default</profile>
|
|
||||||
<quota>default</quota>
|
|
||||||
<access_management>1</access_management>
|
|
||||||
</default>
|
|
||||||
</users>
|
|
||||||
|
|
||||||
<profiles>
|
|
||||||
<default/>
|
|
||||||
</profiles>
|
|
||||||
|
|
||||||
<quotas>
|
|
||||||
<default/>
|
|
||||||
</quotas>
|
|
||||||
</clickhouse>
|
|
@ -1,49 +0,0 @@
|
|||||||
<!-- This file was generated automatically.
|
|
||||||
Do not edit it: it is likely to be discarded and generated again before it's read next time.
|
|
||||||
Files used to generate this file:
|
|
||||||
config.xml -->
|
|
||||||
|
|
||||||
<!-- Config that is used when server is run without config file. --><clickhouse>
|
|
||||||
<logger>
|
|
||||||
<level>trace</level>
|
|
||||||
<console>true</console>
|
|
||||||
</logger>
|
|
||||||
|
|
||||||
<http_port>8124</http_port>
|
|
||||||
<tcp_port>9001</tcp_port>
|
|
||||||
<mysql_port>9005</mysql_port>
|
|
||||||
|
|
||||||
<path>./</path>
|
|
||||||
|
|
||||||
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
|
||||||
<mark_cache_size>5368709120</mark_cache_size>
|
|
||||||
<mlock_executable>true</mlock_executable>
|
|
||||||
|
|
||||||
<compression>
|
|
||||||
<case>
|
|
||||||
<method>zstd</method>
|
|
||||||
</case>
|
|
||||||
</compression>
|
|
||||||
|
|
||||||
<users>
|
|
||||||
<default>
|
|
||||||
<password/>
|
|
||||||
|
|
||||||
<networks>
|
|
||||||
<ip>::/0</ip>
|
|
||||||
</networks>
|
|
||||||
|
|
||||||
<profile>default</profile>
|
|
||||||
<quota>default</quota>
|
|
||||||
<access_management>1</access_management>
|
|
||||||
</default>
|
|
||||||
</users>
|
|
||||||
|
|
||||||
<profiles>
|
|
||||||
<default/>
|
|
||||||
</profiles>
|
|
||||||
|
|
||||||
<quotas>
|
|
||||||
<default/>
|
|
||||||
</quotas>
|
|
||||||
</clickhouse>
|
|
@ -27,6 +27,17 @@ set(RE2_SOURCES
|
|||||||
|
|
||||||
add_library(_re2 ${RE2_SOURCES})
|
add_library(_re2 ${RE2_SOURCES})
|
||||||
target_include_directories(_re2 PUBLIC "${SRC_DIR}")
|
target_include_directories(_re2 PUBLIC "${SRC_DIR}")
|
||||||
target_link_libraries(_re2 ch_contrib::abseil_str_format)
|
target_link_libraries(_re2 PRIVATE
|
||||||
|
absl::base
|
||||||
|
absl::core_headers
|
||||||
|
absl::fixed_array
|
||||||
|
absl::flat_hash_map
|
||||||
|
absl::flat_hash_set
|
||||||
|
absl::inlined_vector
|
||||||
|
absl::strings
|
||||||
|
absl::str_format
|
||||||
|
absl::synchronization
|
||||||
|
absl::optional
|
||||||
|
absl::span)
|
||||||
|
|
||||||
add_library(ch_contrib::re2 ALIAS _re2)
|
add_library(ch_contrib::re2 ALIAS _re2)
|
||||||
|
@ -76,7 +76,6 @@ else()
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
include(CheckCCompilerFlag)
|
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||||
if(POWER9)
|
if(POWER9)
|
||||||
set(HAS_POWER9 1)
|
set(HAS_POWER9 1)
|
||||||
@ -88,26 +87,15 @@ if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
|||||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
||||||
CHECK_C_COMPILER_FLAG("-march=armv8-a+crc+crypto" HAS_ARMV8_CRC)
|
set(HAS_ARMV8_CRC 1)
|
||||||
if(HAS_ARMV8_CRC)
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||||
message(STATUS " HAS_ARMV8_CRC yes")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
|
||||||
endif(HAS_ARMV8_CRC)
|
|
||||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
||||||
|
|
||||||
|
|
||||||
include(CheckCXXSourceCompiles)
|
if(ENABLE_AVX2 AND ENABLE_PCLMULQDQ)
|
||||||
if(NOT MSVC)
|
|
||||||
set(CMAKE_REQUIRED_FLAGS "-msse4.2 -mpclmul")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
unset(CMAKE_REQUIRED_FLAGS)
|
|
||||||
if(HAVE_SSE42)
|
|
||||||
add_definitions(-DHAVE_SSE42)
|
add_definitions(-DHAVE_SSE42)
|
||||||
add_definitions(-DHAVE_PCLMUL)
|
add_definitions(-DHAVE_PCLMUL)
|
||||||
elseif(FORCE_SSE42)
|
|
||||||
message(FATAL_ERROR "FORCE_SSE42=ON but unable to compile with SSE4.2 enabled")
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set (HAVE_THREAD_LOCAL 1)
|
set (HAVE_THREAD_LOCAL 1)
|
||||||
@ -121,75 +109,18 @@ elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
|||||||
add_definitions(-DOS_LINUX)
|
add_definitions(-DOS_LINUX)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS")
|
||||||
add_definitions(-DOS_SOLARIS)
|
add_definitions(-DOS_SOLARIS)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "kFreeBSD")
|
|
||||||
add_definitions(-DOS_GNU_KFREEBSD)
|
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||||
add_definitions(-DOS_FREEBSD)
|
add_definitions(-DOS_FREEBSD)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "NetBSD")
|
|
||||||
add_definitions(-DOS_NETBSD)
|
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "OpenBSD")
|
|
||||||
add_definitions(-DOS_OPENBSD)
|
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "DragonFly")
|
|
||||||
add_definitions(-DOS_DRAGONFLYBSD)
|
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Android")
|
elseif(CMAKE_SYSTEM_NAME MATCHES "Android")
|
||||||
add_definitions(-DOS_ANDROID)
|
add_definitions(-DOS_ANDROID)
|
||||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Windows")
|
|
||||||
add_definitions(-DWIN32 -DOS_WIN -D_MBCS -DWIN64 -DNOMINMAX)
|
|
||||||
if(MINGW)
|
|
||||||
add_definitions(-D_WIN32_WINNT=_WIN32_WINNT_VISTA)
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(NOT WIN32)
|
add_definitions(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX)
|
||||||
add_definitions(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
option(WITH_FALLOCATE "build with fallocate" ON)
|
if (OS_LINUX OR OS_FREEBSD)
|
||||||
if(WITH_FALLOCATE)
|
|
||||||
CHECK_C_SOURCE_COMPILES("
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <linux/falloc.h>
|
|
||||||
int main() {
|
|
||||||
int fd = open(\"/dev/null\", 0);
|
|
||||||
fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, 1024);
|
|
||||||
}
|
|
||||||
" HAVE_FALLOCATE)
|
|
||||||
if(HAVE_FALLOCATE)
|
|
||||||
add_definitions(-DROCKSDB_FALLOCATE_PRESENT)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
CHECK_C_SOURCE_COMPILES("
|
|
||||||
#include <fcntl.h>
|
|
||||||
int main() {
|
|
||||||
int fd = open(\"/dev/null\", 0);
|
|
||||||
sync_file_range(fd, 0, 1024, SYNC_FILE_RANGE_WRITE);
|
|
||||||
}
|
|
||||||
" HAVE_SYNC_FILE_RANGE_WRITE)
|
|
||||||
if(HAVE_SYNC_FILE_RANGE_WRITE)
|
|
||||||
add_definitions(-DROCKSDB_RANGESYNC_PRESENT)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
CHECK_C_SOURCE_COMPILES("
|
|
||||||
#include <pthread.h>
|
|
||||||
int main() {
|
|
||||||
(void) PTHREAD_MUTEX_ADAPTIVE_NP;
|
|
||||||
}
|
|
||||||
" HAVE_PTHREAD_MUTEX_ADAPTIVE_NP)
|
|
||||||
if(HAVE_PTHREAD_MUTEX_ADAPTIVE_NP)
|
|
||||||
add_definitions(-DROCKSDB_PTHREAD_ADAPTIVE_MUTEX)
|
add_definitions(-DROCKSDB_PTHREAD_ADAPTIVE_MUTEX)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
include(CheckCXXSymbolExists)
|
|
||||||
if (OS_FREEBSD)
|
|
||||||
check_cxx_symbol_exists(malloc_usable_size "${ROCKSDB_SOURCE_DIR}/malloc_np.h" HAVE_MALLOC_USABLE_SIZE)
|
|
||||||
else()
|
|
||||||
check_cxx_symbol_exists(malloc_usable_size "${ROCKSDB_SOURCE_DIR}/malloc.h" HAVE_MALLOC_USABLE_SIZE)
|
|
||||||
endif()
|
|
||||||
if(HAVE_MALLOC_USABLE_SIZE)
|
|
||||||
add_definitions(-DROCKSDB_MALLOC_USABLE_SIZE)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (OS_LINUX)
|
if (OS_LINUX)
|
||||||
add_definitions(-DROCKSDB_SCHED_GETCPU_PRESENT)
|
add_definitions(-DROCKSDB_SCHED_GETCPU_PRESENT)
|
||||||
add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT)
|
add_definitions(-DROCKSDB_AUXV_SYSAUXV_PRESENT)
|
||||||
@ -204,7 +135,6 @@ include_directories("${ROCKSDB_SOURCE_DIR}/include")
|
|||||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
||||||
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
|
include_directories("${ROCKSDB_SOURCE_DIR}/third-party/folly")
|
||||||
endif()
|
endif()
|
||||||
find_package(Threads REQUIRED)
|
|
||||||
|
|
||||||
# Main library source code
|
# Main library source code
|
||||||
|
|
||||||
@ -497,7 +427,7 @@ set(SOURCES
|
|||||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/range/range_tree/lib/util/memarena.cc
|
||||||
rocksdb_build_version.cc)
|
rocksdb_build_version.cc)
|
||||||
|
|
||||||
if(HAVE_SSE42 AND NOT MSVC)
|
if(ENABLE_SSE42 AND ENABLE_PCLMULQDQ)
|
||||||
set_source_files_properties(
|
set_source_files_properties(
|
||||||
"${ROCKSDB_SOURCE_DIR}/util/crc32c.cc"
|
"${ROCKSDB_SOURCE_DIR}/util/crc32c.cc"
|
||||||
PROPERTIES COMPILE_FLAGS "-msse4.2 -mpclmul")
|
PROPERTIES COMPILE_FLAGS "-msse4.2 -mpclmul")
|
||||||
|
@ -47,8 +47,6 @@ set(thriftcpp_threads_SOURCES
|
|||||||
"${LIBRARY_DIR}/src/thrift/concurrency/Mutex.cpp"
|
"${LIBRARY_DIR}/src/thrift/concurrency/Mutex.cpp"
|
||||||
)
|
)
|
||||||
|
|
||||||
include("${ClickHouse_SOURCE_DIR}/contrib/thrift/build/cmake/ConfigureChecks.cmake") # makes config.h
|
|
||||||
|
|
||||||
set (HAVE_ARPA_INET_H 1)
|
set (HAVE_ARPA_INET_H 1)
|
||||||
set (HAVE_FCNTL_H 1)
|
set (HAVE_FCNTL_H 1)
|
||||||
set (HAVE_GETOPT_H 1)
|
set (HAVE_GETOPT_H 1)
|
||||||
@ -81,10 +79,6 @@ if (OS_LINUX AND NOT USE_MUSL)
|
|||||||
set (STRERROR_R_CHAR_P 1)
|
set (STRERROR_R_CHAR_P 1)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
#set(PACKAGE ${PACKAGE_NAME})
|
|
||||||
#set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}")
|
|
||||||
#set(VERSION ${thrift_VERSION})
|
|
||||||
|
|
||||||
# generate a config.h file
|
# generate a config.h file
|
||||||
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build/cmake/config.h.in" "${CMAKE_CURRENT_BINARY_DIR}/thrift/config.h")
|
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build/cmake/config.h.in" "${CMAKE_CURRENT_BINARY_DIR}/thrift/config.h")
|
||||||
|
|
||||||
|
@ -98,8 +98,6 @@ if (ARCH_S390X)
|
|||||||
add_compile_definitions(WORDS_BIGENDIAN)
|
add_compile_definitions(WORDS_BIGENDIAN)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
find_package(Threads REQUIRED)
|
|
||||||
|
|
||||||
|
|
||||||
add_library(_liblzma
|
add_library(_liblzma
|
||||||
${SRC_DIR}/src/common/mythread.h
|
${SRC_DIR}/src/common/mythread.h
|
||||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.10.3.5"
|
ARG VERSION="23.10.4.25"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -6,29 +6,27 @@ FROM clickhouse/test-util:latest AS cctools
|
|||||||
ENV CC=clang-${LLVM_VERSION}
|
ENV CC=clang-${LLVM_VERSION}
|
||||||
ENV CXX=clang++-${LLVM_VERSION}
|
ENV CXX=clang++-${LLVM_VERSION}
|
||||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||||
# DO NOT PUT ANYTHING BEFORE THREE NEXT `RUN` DIRECTIVES
|
# DO NOT PUT ANYTHING BEFORE THE NEXT TWO `RUN` DIRECTIVES
|
||||||
# THE MOST HEAVY OPERATION MUST BE THE FIRST IN THE CACHE
|
# THE MOST HEAVY OPERATION MUST BE THE FIRST IN THE CACHE
|
||||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||||
# libtapi is required to support .tbh format from recent MacOS SDKs
|
# libtapi is required to support .tbh format from recent MacOS SDKs
|
||||||
RUN git clone --depth 1 https://github.com/tpoechtrager/apple-libtapi.git \
|
RUN git clone https://github.com/tpoechtrager/apple-libtapi.git \
|
||||||
&& cd apple-libtapi \
|
&& cd apple-libtapi \
|
||||||
|
&& git checkout 15dfc2a8c9a2a89d06ff227560a69f5265b692f9 \
|
||||||
&& INSTALLPREFIX=/cctools ./build.sh \
|
&& INSTALLPREFIX=/cctools ./build.sh \
|
||||||
&& ./install.sh \
|
&& ./install.sh \
|
||||||
&& cd .. \
|
&& cd .. \
|
||||||
&& rm -rf apple-libtapi
|
&& rm -rf apple-libtapi
|
||||||
|
|
||||||
# Build and install tools for cross-linking to Darwin (x86-64)
|
# Build and install tools for cross-linking to Darwin (x86-64)
|
||||||
RUN git clone --depth 1 https://github.com/tpoechtrager/cctools-port.git \
|
# Build and install tools for cross-linking to Darwin (aarch64)
|
||||||
|
RUN git clone https://github.com/tpoechtrager/cctools-port.git \
|
||||||
&& cd cctools-port/cctools \
|
&& cd cctools-port/cctools \
|
||||||
|
&& git checkout 2a3e1c2a6ff54a30f898b70cfb9ba1692a55fad7 \
|
||||||
&& ./configure --prefix=/cctools --with-libtapi=/cctools \
|
&& ./configure --prefix=/cctools --with-libtapi=/cctools \
|
||||||
--target=x86_64-apple-darwin \
|
--target=x86_64-apple-darwin \
|
||||||
&& make install -j$(nproc) \
|
&& make install -j$(nproc) \
|
||||||
&& cd ../.. \
|
&& make clean \
|
||||||
&& rm -rf cctools-port
|
|
||||||
|
|
||||||
# Build and install tools for cross-linking to Darwin (aarch64)
|
|
||||||
RUN git clone --depth 1 https://github.com/tpoechtrager/cctools-port.git \
|
|
||||||
&& cd cctools-port/cctools \
|
|
||||||
&& ./configure --prefix=/cctools --with-libtapi=/cctools \
|
&& ./configure --prefix=/cctools --with-libtapi=/cctools \
|
||||||
--target=aarch64-apple-darwin \
|
--target=aarch64-apple-darwin \
|
||||||
&& make install -j$(nproc) \
|
&& make install -j$(nproc) \
|
||||||
@ -62,19 +60,12 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
|||||||
rustup target add aarch64-unknown-linux-musl && \
|
rustup target add aarch64-unknown-linux-musl && \
|
||||||
rustup target add riscv64gc-unknown-linux-gnu
|
rustup target add riscv64gc-unknown-linux-gnu
|
||||||
|
|
||||||
# NOTE: Seems like gcc-11 is too new for ubuntu20 repository
|
|
||||||
# A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work):
|
# A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work):
|
||||||
RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||||
&& apt-get update \
|
&& apt-get update \
|
||||||
&& apt-get install --yes \
|
&& apt-get install --yes \
|
||||||
binutils-riscv64-linux-gnu \
|
binutils-riscv64-linux-gnu \
|
||||||
build-essential \
|
build-essential \
|
||||||
g++-11 \
|
|
||||||
gcc-11 \
|
|
||||||
gcc-aarch64-linux-gnu \
|
|
||||||
libc6 \
|
|
||||||
libc6-dev \
|
|
||||||
libc6-dev-arm64-cross \
|
|
||||||
python3-boto3 \
|
python3-boto3 \
|
||||||
yasm \
|
yasm \
|
||||||
zstd \
|
zstd \
|
||||||
|
@ -22,6 +22,7 @@ if [ "$EXTRACT_TOOLCHAIN_DARWIN" = "1" ]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
# Uncomment to debug ccache. Don't put ccache log in /output right away, or it
|
# Uncomment to debug ccache. Don't put ccache log in /output right away, or it
|
||||||
# will be confusingly packed into the "performance" package.
|
# will be confusingly packed into the "performance" package.
|
||||||
# export CCACHE_LOGFILE=/build/ccache.log
|
# export CCACHE_LOGFILE=/build/ccache.log
|
||||||
@ -32,6 +33,16 @@ mkdir -p /build/build_docker
|
|||||||
cd /build/build_docker
|
cd /build/build_docker
|
||||||
rm -f CMakeCache.txt
|
rm -f CMakeCache.txt
|
||||||
|
|
||||||
|
|
||||||
|
# We don't want to depend on any third-party CMake files.
|
||||||
|
# To check it, find and delete them.
|
||||||
|
|
||||||
|
grep -o -P '"contrib/[^"]+"' ../.gitmodules |
|
||||||
|
grep -v -P 'llvm-project|google-protobuf|grpc|abseil-cpp|corrosion' |
|
||||||
|
xargs -I@ find ../@ -'(' -name 'CMakeLists.txt' -or -name '*.cmake' -')' -and -not -name '*.h.cmake' |
|
||||||
|
xargs rm
|
||||||
|
|
||||||
|
|
||||||
if [ -n "$MAKE_DEB" ]; then
|
if [ -n "$MAKE_DEB" ]; then
|
||||||
rm -rf /build/packages/root
|
rm -rf /build/packages/root
|
||||||
# NOTE: this is for backward compatibility with previous releases,
|
# NOTE: this is for backward compatibility with previous releases,
|
||||||
@ -177,11 +188,12 @@ then
|
|||||||
tar c -C /build/ --exclude='.git/modules/**' .git | tar x -C "$PERF_OUTPUT"/ch
|
tar c -C /build/ --exclude='.git/modules/**' .git | tar x -C "$PERF_OUTPUT"/ch
|
||||||
# Create branch pr and origin/master to have them for the following performance comparison
|
# Create branch pr and origin/master to have them for the following performance comparison
|
||||||
git -C "$PERF_OUTPUT"/ch branch pr
|
git -C "$PERF_OUTPUT"/ch branch pr
|
||||||
git -C "$PERF_OUTPUT"/ch fetch --no-tags --depth 50 origin master:origin/master
|
git -C "$PERF_OUTPUT"/ch fetch --no-tags --no-recurse-submodules --depth 50 origin master:origin/master
|
||||||
# Clean remote, to not have it stale
|
# Clean remote, to not have it stale
|
||||||
git -C "$PERF_OUTPUT"/ch remote | xargs -n1 git -C "$PERF_OUTPUT"/ch remote remove
|
git -C "$PERF_OUTPUT"/ch remote | xargs -n1 git -C "$PERF_OUTPUT"/ch remote remove
|
||||||
# And clean all tags
|
# And clean all tags
|
||||||
git -C "$PERF_OUTPUT"/ch tag | xargs git -C "$PERF_OUTPUT"/ch tag -d
|
echo "Deleting $(git -C "$PERF_OUTPUT"/ch tag | wc -l) tags"
|
||||||
|
git -C "$PERF_OUTPUT"/ch tag | xargs git -C "$PERF_OUTPUT"/ch tag -d >/dev/null
|
||||||
git -C "$PERF_OUTPUT"/ch reset --soft pr
|
git -C "$PERF_OUTPUT"/ch reset --soft pr
|
||||||
git -C "$PERF_OUTPUT"/ch log -5
|
git -C "$PERF_OUTPUT"/ch log -5
|
||||||
(
|
(
|
||||||
|
@ -236,16 +236,14 @@ def parse_env_variables(
|
|||||||
cc = compiler
|
cc = compiler
|
||||||
result.append("DEB_ARCH=amd64")
|
result.append("DEB_ARCH=amd64")
|
||||||
|
|
||||||
cxx = cc.replace("gcc", "g++").replace("clang", "clang++")
|
cxx = cc.replace("clang", "clang++")
|
||||||
|
|
||||||
if package_type == "deb":
|
if package_type == "deb":
|
||||||
# NOTE: This are the env for packages/build script
|
# NOTE: This is the env for packages/build script
|
||||||
result.append("MAKE_DEB=true")
|
result.append("MAKE_DEB=true")
|
||||||
cmake_flags.append("-DENABLE_TESTS=0")
|
cmake_flags.append("-DENABLE_TESTS=0")
|
||||||
cmake_flags.append("-DENABLE_UTILS=0")
|
cmake_flags.append("-DENABLE_UTILS=0")
|
||||||
cmake_flags.append("-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON")
|
|
||||||
cmake_flags.append("-DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON")
|
cmake_flags.append("-DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON")
|
||||||
cmake_flags.append("-DCMAKE_AUTOGEN_VERBOSE=ON")
|
|
||||||
cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr")
|
cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr")
|
||||||
cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc")
|
cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc")
|
||||||
cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var")
|
cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var")
|
||||||
@ -265,12 +263,7 @@ def parse_env_variables(
|
|||||||
elif package_type == "fuzzers":
|
elif package_type == "fuzzers":
|
||||||
cmake_flags.append("-DENABLE_FUZZING=1")
|
cmake_flags.append("-DENABLE_FUZZING=1")
|
||||||
cmake_flags.append("-DENABLE_PROTOBUF=1")
|
cmake_flags.append("-DENABLE_PROTOBUF=1")
|
||||||
cmake_flags.append("-DUSE_INTERNAL_PROTOBUF_LIBRARY=1")
|
|
||||||
cmake_flags.append("-DWITH_COVERAGE=1")
|
cmake_flags.append("-DWITH_COVERAGE=1")
|
||||||
cmake_flags.append("-DCMAKE_AUTOGEN_VERBOSE=ON")
|
|
||||||
# cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr")
|
|
||||||
# cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc")
|
|
||||||
# cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var")
|
|
||||||
# Reduce linking and building time by avoid *install/all dependencies
|
# Reduce linking and building time by avoid *install/all dependencies
|
||||||
cmake_flags.append("-DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON")
|
cmake_flags.append("-DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON")
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.10.3.5"
|
ARG VERSION="23.10.4.25"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -30,7 +30,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="23.10.3.5"
|
ARG VERSION="23.10.4.25"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -206,7 +206,7 @@ function build
|
|||||||
(
|
(
|
||||||
cd "$FASTTEST_BUILD"
|
cd "$FASTTEST_BUILD"
|
||||||
TIMEFORMAT=$'\nreal\t%3R\nuser\t%3U\nsys\t%3S'
|
TIMEFORMAT=$'\nreal\t%3R\nuser\t%3U\nsys\t%3S'
|
||||||
( time ninja clickhouse-bundle) |& ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/build_log.txt"
|
( time ninja clickhouse-bundle clickhouse-stripped) |& ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/build_log.txt"
|
||||||
BUILD_SECONDS_ELAPSED=$(awk '/^....-..-.. ..:..:.. real\t[0-9]/ {print $4}' < "$FASTTEST_OUTPUT/build_log.txt")
|
BUILD_SECONDS_ELAPSED=$(awk '/^....-..-.. ..:..:.. real\t[0-9]/ {print $4}' < "$FASTTEST_OUTPUT/build_log.txt")
|
||||||
echo "build_clickhouse_fasttest_binary: [ OK ] $BUILD_SECONDS_ELAPSED sec." \
|
echo "build_clickhouse_fasttest_binary: [ OK ] $BUILD_SECONDS_ELAPSED sec." \
|
||||||
| ts '%Y-%m-%d %H:%M:%S' \
|
| ts '%Y-%m-%d %H:%M:%S' \
|
||||||
@ -215,7 +215,6 @@ function build
|
|||||||
mkdir -p "$FASTTEST_OUTPUT/binaries/"
|
mkdir -p "$FASTTEST_OUTPUT/binaries/"
|
||||||
cp programs/clickhouse "$FASTTEST_OUTPUT/binaries/clickhouse"
|
cp programs/clickhouse "$FASTTEST_OUTPUT/binaries/clickhouse"
|
||||||
|
|
||||||
strip programs/clickhouse -o programs/clickhouse-stripped
|
|
||||||
zstd --threads=0 programs/clickhouse-stripped -o "$FASTTEST_OUTPUT/binaries/clickhouse-stripped.zst"
|
zstd --threads=0 programs/clickhouse-stripped -o "$FASTTEST_OUTPUT/binaries/clickhouse-stripped.zst"
|
||||||
fi
|
fi
|
||||||
ccache_status
|
ccache_status
|
||||||
|
@ -39,8 +39,7 @@ public class MySQLJavaClientTest {
|
|||||||
|
|
||||||
// useServerPrepStmts=true -> COM_STMT_PREPARE + COM_STMT_EXECUTE -> binary
|
// useServerPrepStmts=true -> COM_STMT_PREPARE + COM_STMT_EXECUTE -> binary
|
||||||
// useServerPrepStmts=false -> COM_QUERY -> text
|
// useServerPrepStmts=false -> COM_QUERY -> text
|
||||||
String jdbcUrl = String.format("jdbc:mysql://%s:%s/%s?useSSL=false&useServerPrepStmts=%s",
|
String jdbcUrl = String.format("jdbc:mysql://%s:%s/%s?useSSL=false&useServerPrepStmts=%s", host, port, database, binary);
|
||||||
host, port, database, binary);
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
Class.forName("com.mysql.cj.jdbc.Driver");
|
Class.forName("com.mysql.cj.jdbc.Driver");
|
||||||
@ -67,21 +66,21 @@ public class MySQLJavaClientTest {
|
|||||||
int rowNum = 1;
|
int rowNum = 1;
|
||||||
while (rs.next()) {
|
while (rs.next()) {
|
||||||
System.out.printf("Row #%d\n", rowNum++);
|
System.out.printf("Row #%d\n", rowNum++);
|
||||||
System.out.printf("%s, value: %d\n", getMysqlType(rs, "i8"), rs.getInt("i8"));
|
System.out.printf("%s, value: %d, wasNull: %b\n", getMysqlType(rs, "i8"), rs.getInt("i8"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %d\n", getMysqlType(rs, "i16"), rs.getInt("i16"));
|
System.out.printf("%s, value: %d, wasNull: %b\n", getMysqlType(rs, "i16"), rs.getInt("i16"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %d\n", getMysqlType(rs, "i32"), rs.getInt("i32"));
|
System.out.printf("%s, value: %d, wasNull: %b\n", getMysqlType(rs, "i32"), rs.getInt("i32"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %d\n", getMysqlType(rs, "i64"), rs.getLong("i64"));
|
System.out.printf("%s, value: %d, wasNull: %b\n", getMysqlType(rs, "i64"), rs.getLong("i64"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "i128"), rs.getString("i128"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "i128"), rs.getString("i128"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "i256"), rs.getString("i256"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "i256"), rs.getString("i256"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %d\n", getMysqlType(rs, "ui8"), rs.getInt("ui8"));
|
System.out.printf("%s, value: %d, wasNull: %b\n", getMysqlType(rs, "ui8"), rs.getInt("ui8"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %d\n", getMysqlType(rs, "ui16"), rs.getInt("ui16"));
|
System.out.printf("%s, value: %d, wasNull: %b\n", getMysqlType(rs, "ui16"), rs.getInt("ui16"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %d\n", getMysqlType(rs, "ui32"), rs.getLong("ui32"));
|
System.out.printf("%s, value: %d, wasNull: %b\n", getMysqlType(rs, "ui32"), rs.getLong("ui32"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "ui64"), rs.getString("ui64"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "ui64"), rs.getString("ui64"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "ui128"), rs.getString("ui128"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "ui128"), rs.getString("ui128"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "ui256"), rs.getString("ui256"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "ui256"), rs.getString("ui256"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %f\n", getMysqlType(rs, "f32"), rs.getFloat("f32"));
|
System.out.printf("%s, value: %f, wasNull: %b\n", getMysqlType(rs, "f32"), rs.getFloat("f32"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %f\n", getMysqlType(rs, "f64"), rs.getFloat("f64"));
|
System.out.printf("%s, value: %f, wasNull: %b\n", getMysqlType(rs, "f64"), rs.getFloat("f64"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %b\n", getMysqlType(rs, "b"), rs.getBoolean("b"));
|
System.out.printf("%s, value: %b, wasNull: %b\n", getMysqlType(rs, "b"), rs.getBoolean("b"), rs.wasNull());
|
||||||
}
|
}
|
||||||
System.out.println();
|
System.out.println();
|
||||||
}
|
}
|
||||||
@ -92,10 +91,10 @@ public class MySQLJavaClientTest {
|
|||||||
int rowNum = 1;
|
int rowNum = 1;
|
||||||
while (rs.next()) {
|
while (rs.next()) {
|
||||||
System.out.printf("Row #%d\n", rowNum++);
|
System.out.printf("Row #%d\n", rowNum++);
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "s"), rs.getString("s"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "s"), rs.getString("s"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "sn"), rs.getString("sn"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "sn"), rs.getString("sn"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "lc"), rs.getString("lc"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "lc"), rs.getString("lc"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "nlc"), rs.getString("nlc"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "nlc"), rs.getString("nlc"), rs.wasNull());
|
||||||
}
|
}
|
||||||
System.out.println();
|
System.out.println();
|
||||||
}
|
}
|
||||||
@ -106,10 +105,10 @@ public class MySQLJavaClientTest {
|
|||||||
int rowNum = 1;
|
int rowNum = 1;
|
||||||
while (rs.next()) {
|
while (rs.next()) {
|
||||||
System.out.printf("Row #%d\n", rowNum++);
|
System.out.printf("Row #%d\n", rowNum++);
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "ilc"), rs.getInt("ilc"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "ilc"), rs.getInt("ilc"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dlc"), rs.getDate("dlc"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dlc"), rs.getDate("dlc"), rs.wasNull());
|
||||||
// NULL int is represented as zero
|
// NULL int is represented as zero
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "ni"), rs.getInt("ni"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "ni"), rs.getInt("ni"), rs.wasNull());
|
||||||
}
|
}
|
||||||
System.out.println();
|
System.out.println();
|
||||||
}
|
}
|
||||||
@ -120,12 +119,11 @@ public class MySQLJavaClientTest {
|
|||||||
int rowNum = 1;
|
int rowNum = 1;
|
||||||
while (rs.next()) {
|
while (rs.next()) {
|
||||||
System.out.printf("Row #%d\n", rowNum++);
|
System.out.printf("Row #%d\n", rowNum++);
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "d32"), rs.getBigDecimal("d32").toPlainString());
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d32"), rs.getBigDecimal("d32").toPlainString(), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "d64"), rs.getBigDecimal("d64").toPlainString());
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d64"), rs.getBigDecimal("d64").toPlainString(), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "d128_native"),
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d128_native"), rs.getBigDecimal("d128_native").toPlainString(), rs.wasNull());
|
||||||
rs.getBigDecimal("d128_native").toPlainString());
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d128_text"), rs.getString("d128_text"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "d128_text"), rs.getString("d128_text"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d256"), rs.getString("d256"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "d256"), rs.getString("d256"));
|
|
||||||
}
|
}
|
||||||
System.out.println();
|
System.out.println();
|
||||||
}
|
}
|
||||||
@ -136,12 +134,12 @@ public class MySQLJavaClientTest {
|
|||||||
int rowNum = 1;
|
int rowNum = 1;
|
||||||
while (rs.next()) {
|
while (rs.next()) {
|
||||||
System.out.printf("Row #%d\n", rowNum++);
|
System.out.printf("Row #%d\n", rowNum++);
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "d"), rs.getDate("d"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d"), rs.getDate("d"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "d32"), rs.getDate("d32"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d32"), rs.getDate("d32"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt"), rs.getTimestamp("dt"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt"), rs.getTimestamp("dt"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_3"), rs.getTimestamp("dt64_3"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_3"), rs.getTimestamp("dt64_3"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_6"), rs.getTimestamp("dt64_6"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_6"), rs.getTimestamp("dt64_6"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_9"), rs.getTimestamp("dt64_9"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_9"), rs.getTimestamp("dt64_9"), rs.wasNull());
|
||||||
}
|
}
|
||||||
System.out.println();
|
System.out.println();
|
||||||
}
|
}
|
||||||
@ -152,13 +150,13 @@ public class MySQLJavaClientTest {
|
|||||||
int rowNum = 1;
|
int rowNum = 1;
|
||||||
while (rs.next()) {
|
while (rs.next()) {
|
||||||
System.out.printf("Row #%d\n", rowNum++);
|
System.out.printf("Row #%d\n", rowNum++);
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_0"), rs.getTimestamp("dt64_0"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_0"), rs.getTimestamp("dt64_0"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_1"), rs.getTimestamp("dt64_1"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_1"), rs.getTimestamp("dt64_1"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_2"), rs.getTimestamp("dt64_2"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_2"), rs.getTimestamp("dt64_2"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_4"), rs.getTimestamp("dt64_4"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_4"), rs.getTimestamp("dt64_4"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_5"), rs.getTimestamp("dt64_5"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_5"), rs.getTimestamp("dt64_5"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_7"), rs.getTimestamp("dt64_7"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_7"), rs.getTimestamp("dt64_7"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_8"), rs.getTimestamp("dt64_8"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_8"), rs.getTimestamp("dt64_8"), rs.wasNull());
|
||||||
}
|
}
|
||||||
System.out.println();
|
System.out.println();
|
||||||
}
|
}
|
||||||
@ -169,8 +167,8 @@ public class MySQLJavaClientTest {
|
|||||||
int rowNum = 1;
|
int rowNum = 1;
|
||||||
while (rs.next()) {
|
while (rs.next()) {
|
||||||
System.out.printf("Row #%d\n", rowNum++);
|
System.out.printf("Row #%d\n", rowNum++);
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt"), rs.getTimestamp("dt"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt"), rs.getTimestamp("dt"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt64_3"), rs.getTimestamp("dt64_3"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt64_3"), rs.getTimestamp("dt64_3"), rs.wasNull());
|
||||||
}
|
}
|
||||||
System.out.println();
|
System.out.println();
|
||||||
}
|
}
|
||||||
@ -181,10 +179,10 @@ public class MySQLJavaClientTest {
|
|||||||
int rowNum = 1;
|
int rowNum = 1;
|
||||||
while (rs.next()) {
|
while (rs.next()) {
|
||||||
System.out.printf("Row #%d\n", rowNum++);
|
System.out.printf("Row #%d\n", rowNum++);
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "a"), rs.getString("a"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "a"), rs.getString("a"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "u"), rs.getString("u"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "u"), rs.getString("u"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "t"), rs.getString("t"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "t"), rs.getString("t"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "m"), rs.getString("m"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "m"), rs.getString("m"), rs.wasNull());
|
||||||
}
|
}
|
||||||
System.out.println();
|
System.out.println();
|
||||||
}
|
}
|
||||||
@ -196,17 +194,15 @@ public class MySQLJavaClientTest {
|
|||||||
int rowNum = 1;
|
int rowNum = 1;
|
||||||
while (rs.next()) {
|
while (rs.next()) {
|
||||||
System.out.printf("Row #%d\n", rowNum++);
|
System.out.printf("Row #%d\n", rowNum++);
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "f"), rs.getFloat("f"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "f"), rs.getFloat("f"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "d"), rs.getDate("d"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "d"), rs.getDate("d"), rs.wasNull());
|
||||||
System.out.printf("%s, value: %s\n", getMysqlType(rs, "dt"), rs.getTimestamp("dt"));
|
System.out.printf("%s, value: %s, wasNull: %b\n", getMysqlType(rs, "dt"), rs.getTimestamp("dt"), rs.wasNull());
|
||||||
}
|
}
|
||||||
System.out.println();
|
System.out.println();
|
||||||
}
|
}
|
||||||
|
|
||||||
private static String getMysqlType(ResultSet rs, String columnLabel) throws SQLException {
|
private static String getMysqlType(ResultSet rs, String columnLabel) throws SQLException {
|
||||||
ResultSetMetaData meta = rs.getMetaData();
|
ResultSetMetaData meta = rs.getMetaData();
|
||||||
return String.format("%s type is %s", columnLabel,
|
return String.format("%s type is %s", columnLabel, MysqlType.getByJdbcType(meta.getColumnType(rs.findColumn(columnLabel))));
|
||||||
MysqlType.getByJdbcType(meta.getColumnType(rs.findColumn(columnLabel))));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -68,6 +68,7 @@ RUN python3 -m pip install --no-cache-dir \
|
|||||||
asyncio \
|
asyncio \
|
||||||
avro==1.10.2 \
|
avro==1.10.2 \
|
||||||
azure-storage-blob \
|
azure-storage-blob \
|
||||||
|
boto3 \
|
||||||
cassandra-driver \
|
cassandra-driver \
|
||||||
confluent-kafka==1.9.2 \
|
confluent-kafka==1.9.2 \
|
||||||
delta-spark==2.3.0 \
|
delta-spark==2.3.0 \
|
||||||
|
@ -189,6 +189,8 @@ function run_tests
|
|||||||
test_prefix=right/performance
|
test_prefix=right/performance
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
run_only_changed_tests=0
|
||||||
|
|
||||||
# Determine which tests to run.
|
# Determine which tests to run.
|
||||||
if [ -v CHPC_TEST_GREP ]
|
if [ -v CHPC_TEST_GREP ]
|
||||||
then
|
then
|
||||||
@ -203,6 +205,7 @@ function run_tests
|
|||||||
# tests. The lists of changed files are prepared in entrypoint.sh because
|
# tests. The lists of changed files are prepared in entrypoint.sh because
|
||||||
# it has the repository.
|
# it has the repository.
|
||||||
test_files=($(sed "s/tests\/performance/${test_prefix//\//\\/}/" changed-test-definitions.txt))
|
test_files=($(sed "s/tests\/performance/${test_prefix//\//\\/}/" changed-test-definitions.txt))
|
||||||
|
run_only_changed_tests=1
|
||||||
else
|
else
|
||||||
# The default -- run all tests found in the test dir.
|
# The default -- run all tests found in the test dir.
|
||||||
test_files=($(ls "$test_prefix"/*.xml))
|
test_files=($(ls "$test_prefix"/*.xml))
|
||||||
@ -226,6 +229,13 @@ function run_tests
|
|||||||
test_files=("${test_files[@]}")
|
test_files=("${test_files[@]}")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ "$run_only_changed_tests" -ne 0 ]; then
|
||||||
|
if [ ${#test_files[@]} -eq 0 ]; then
|
||||||
|
time "$script_dir/report.py" --no-tests-run > report.html
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# For PRs w/o changes in test definitons, test only a subset of queries,
|
# For PRs w/o changes in test definitons, test only a subset of queries,
|
||||||
# and run them less times. If the corresponding environment variables are
|
# and run them less times. If the corresponding environment variables are
|
||||||
# already set, keep those values.
|
# already set, keep those values.
|
||||||
|
@ -34,9 +34,4 @@
|
|||||||
<memory_profiler_step>0</memory_profiler_step>
|
<memory_profiler_step>0</memory_profiler_step>
|
||||||
</default>
|
</default>
|
||||||
</profiles>
|
</profiles>
|
||||||
<users>
|
|
||||||
<default>
|
|
||||||
<access_management>1</access_management>
|
|
||||||
</default>
|
|
||||||
</users>
|
|
||||||
</clickhouse>
|
</clickhouse>
|
||||||
|
@ -19,6 +19,7 @@ parser.add_argument(
|
|||||||
choices=["main", "all-queries"],
|
choices=["main", "all-queries"],
|
||||||
help="Which report to build",
|
help="Which report to build",
|
||||||
)
|
)
|
||||||
|
parser.add_argument("--no-tests-run", action="store_true", default=False)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
tables = []
|
tables = []
|
||||||
@ -354,6 +355,36 @@ if args.report == "main":
|
|||||||
|
|
||||||
add_tested_commits()
|
add_tested_commits()
|
||||||
|
|
||||||
|
def print_status(status, message):
|
||||||
|
print(
|
||||||
|
(
|
||||||
|
"""
|
||||||
|
<!--status: {status}-->
|
||||||
|
<!--message: {message}-->
|
||||||
|
""".format(
|
||||||
|
status=status, message=message
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if args.no_tests_run:
|
||||||
|
for t in tables:
|
||||||
|
print(t)
|
||||||
|
print(
|
||||||
|
"<h2>No tests to run. Only changed tests were run, but all changed tests are from another batch.</h2>"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f"""
|
||||||
|
</div>
|
||||||
|
{os.getenv("CHPC_ADD_REPORT_LINKS") or ''}
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
# Why failure? Because otherwise we will not notice if we have a bug that leads to 0 tests being run
|
||||||
|
print_status("failure", "No tests changed, nothing to run")
|
||||||
|
exit(0)
|
||||||
|
|
||||||
run_error_rows = tsvRows("run-errors.tsv")
|
run_error_rows = tsvRows("run-errors.tsv")
|
||||||
error_tests += len(run_error_rows)
|
error_tests += len(run_error_rows)
|
||||||
addSimpleTable("Run Errors", ["Test", "Error"], run_error_rows)
|
addSimpleTable("Run Errors", ["Test", "Error"], run_error_rows)
|
||||||
@ -646,16 +677,7 @@ if args.report == "main":
|
|||||||
status = "failure"
|
status = "failure"
|
||||||
message = "Errors while building the report."
|
message = "Errors while building the report."
|
||||||
|
|
||||||
print(
|
print_status(status, message)
|
||||||
(
|
|
||||||
"""
|
|
||||||
<!--status: {status}-->
|
|
||||||
<!--message: {message}-->
|
|
||||||
""".format(
|
|
||||||
status=status, message=message
|
|
||||||
)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
elif args.report == "all-queries":
|
elif args.report == "all-queries":
|
||||||
print((header_template.format()))
|
print((header_template.format()))
|
||||||
|
@ -53,7 +53,7 @@ function configure()
|
|||||||
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||||
|
|
||||||
function randomize_keeper_config_boolean_value {
|
function randomize_config_boolean_value {
|
||||||
value=$(($RANDOM % 2))
|
value=$(($RANDOM % 2))
|
||||||
sudo cat /etc/clickhouse-server/config.d/$2.xml \
|
sudo cat /etc/clickhouse-server/config.d/$2.xml \
|
||||||
| sed "s|<$1>[01]</$1>|<$1>$value</$1>|" \
|
| sed "s|<$1>[01]</$1>|<$1>$value</$1>|" \
|
||||||
@ -72,7 +72,11 @@ function configure()
|
|||||||
sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||||
|
|
||||||
randomize_config_boolean_value use_compression zookeeper
|
if [[ -n "$ZOOKEEPER_FAULT_INJECTION" ]] && [[ "$ZOOKEEPER_FAULT_INJECTION" -eq 1 ]]; then
|
||||||
|
randomize_config_boolean_value use_compression zookeeper_fault_injection
|
||||||
|
else
|
||||||
|
randomize_config_boolean_value use_compression zookeeper
|
||||||
|
fi
|
||||||
|
|
||||||
# for clickhouse-server (via service)
|
# for clickhouse-server (via service)
|
||||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||||
|
@ -20,9 +20,9 @@ then
|
|||||||
fi
|
fi
|
||||||
elif [ "${ARCH}" = "aarch64" -o "${ARCH}" = "arm64" ]
|
elif [ "${ARCH}" = "aarch64" -o "${ARCH}" = "arm64" ]
|
||||||
then
|
then
|
||||||
# If the system has >=ARMv8.2 (https://en.wikipedia.org/wiki/AArch64), choose the corresponding build, else fall back to a v8.0
|
# Dispatch between standard and compatibility builds, see cmake/cpu_features.cmake for details. Unfortunately, (1) the ARM ISA level
|
||||||
# compat build. Unfortunately, the ARM ISA level cannot be read directly, we need to guess from the "features" in /proc/cpuinfo.
|
# cannot be read directly, we need to guess from the "features" in /proc/cpuinfo, and (2) the flags in /proc/cpuinfo are named
|
||||||
# Also, the flags in /proc/cpuinfo are named differently than the flags passed to the compiler (cmake/cpu_features.cmake).
|
# differently than the flags passed to the compiler in cpu_features.cmake.
|
||||||
HAS_ARMV82=$(grep -m 1 'Features' /proc/cpuinfo | awk '/asimd/ && /sha1/ && /aes/ && /atomics/ && /lrcpc/')
|
HAS_ARMV82=$(grep -m 1 'Features' /proc/cpuinfo | awk '/asimd/ && /sha1/ && /aes/ && /atomics/ && /lrcpc/')
|
||||||
if [ "${HAS_ARMV82}" ]
|
if [ "${HAS_ARMV82}" ]
|
||||||
then
|
then
|
||||||
|
28
docs/changelogs/v23.10.4.25-stable.md
Normal file
28
docs/changelogs/v23.10.4.25-stable.md
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.10.4.25-stable (330fd687d41) FIXME as compared to v23.10.3.5-stable (b2ba7637a41)
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#56633](https://github.com/ClickHouse/ClickHouse/issues/56633): In [#54043](https://github.com/ClickHouse/ClickHouse/issues/54043) the setup plan started to appear in the logs. It should be only in the `runner_get_all_tests.log` only. As well, send the failed infrastructure event to CI db. [#56214](https://github.com/ClickHouse/ClickHouse/pull/56214) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Backported in [#56737](https://github.com/ClickHouse/ClickHouse/issues/56737): Do not fetch changed submodules in the builder container. [#56689](https://github.com/ClickHouse/ClickHouse/pull/56689) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Select from system tables when table based on table function. [#55540](https://github.com/ClickHouse/ClickHouse/pull/55540) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Fix restore from backup with `flatten_nested` and `data_type_default_nullable` [#56306](https://github.com/ClickHouse/ClickHouse/pull/56306) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix segfault during Kerberos initialization [#56401](https://github.com/ClickHouse/ClickHouse/pull/56401) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix: RabbitMQ OpenSSL dynamic loading issue [#56703](https://github.com/ClickHouse/ClickHouse/pull/56703) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix crash in GCD codec in case when zeros present in data [#56704](https://github.com/ClickHouse/ClickHouse/pull/56704) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix crash in FPC codec [#56795](https://github.com/ClickHouse/ClickHouse/pull/56795) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Rewrite jobs to use callable workflow [#56385](https://github.com/ClickHouse/ClickHouse/pull/56385) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Continue rewriting workflows to reusable tests [#56501](https://github.com/ClickHouse/ClickHouse/pull/56501) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Better exception messages [#56854](https://github.com/ClickHouse/ClickHouse/pull/56854) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
23
docs/changelogs/v23.3.17.13-lts.md
Normal file
23
docs/changelogs/v23.3.17.13-lts.md
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.3.17.13-lts (e867d59020f) FIXME as compared to v23.3.16.7-lts (fb4125cc92a)
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#56731](https://github.com/ClickHouse/ClickHouse/issues/56731): Do not fetch changed submodules in the builder container. [#56689](https://github.com/ClickHouse/ClickHouse/pull/56689) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix segfault during Kerberos initialization [#56401](https://github.com/ClickHouse/ClickHouse/pull/56401) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix crash in FPC codec [#56795](https://github.com/ClickHouse/ClickHouse/pull/56795) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Rewrite jobs to use callable workflow [#56385](https://github.com/ClickHouse/ClickHouse/pull/56385) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Continue rewriting workflows to reusable tests [#56501](https://github.com/ClickHouse/ClickHouse/pull/56501) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Better exception messages [#56854](https://github.com/ClickHouse/ClickHouse/pull/56854) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
31
docs/changelogs/v23.8.7.24-lts.md
Normal file
31
docs/changelogs/v23.8.7.24-lts.md
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.8.7.24-lts (812b95e14ba) FIXME as compared to v23.8.6.16-lts (077df679bed)
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#56733](https://github.com/ClickHouse/ClickHouse/issues/56733): Do not fetch changed submodules in the builder container. [#56689](https://github.com/ClickHouse/ClickHouse/pull/56689) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Select from system tables when table based on table function. [#55540](https://github.com/ClickHouse/ClickHouse/pull/55540) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Fix incomplete query result for UNION in view() function. [#56274](https://github.com/ClickHouse/ClickHouse/pull/56274) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix crash in case of adding a column with type Object(JSON) [#56307](https://github.com/ClickHouse/ClickHouse/pull/56307) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix segfault during Kerberos initialization [#56401](https://github.com/ClickHouse/ClickHouse/pull/56401) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix: RabbitMQ OpenSSL dynamic loading issue [#56703](https://github.com/ClickHouse/ClickHouse/pull/56703) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix crash in FPC codec [#56795](https://github.com/ClickHouse/ClickHouse/pull/56795) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NO CL CATEGORY
|
||||||
|
|
||||||
|
* Backported in [#56601](https://github.com/ClickHouse/ClickHouse/issues/56601):. [#56598](https://github.com/ClickHouse/ClickHouse/pull/56598) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Rewrite jobs to use callable workflow [#56385](https://github.com/ClickHouse/ClickHouse/pull/56385) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Continue rewriting workflows to reusable tests [#56501](https://github.com/ClickHouse/ClickHouse/pull/56501) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Better exception messages [#56854](https://github.com/ClickHouse/ClickHouse/pull/56854) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
34
docs/changelogs/v23.9.5.29-stable.md
Normal file
34
docs/changelogs/v23.9.5.29-stable.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.9.5.29-stable (f8554c1a1ff) FIXME as compared to v23.9.4.11-stable (74c1f49dd6a)
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#56631](https://github.com/ClickHouse/ClickHouse/issues/56631): In [#54043](https://github.com/ClickHouse/ClickHouse/issues/54043) the setup plan started to appear in the logs. It should be only in the `runner_get_all_tests.log` only. As well, send the failed infrastructure event to CI db. [#56214](https://github.com/ClickHouse/ClickHouse/pull/56214) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Backported in [#56735](https://github.com/ClickHouse/ClickHouse/issues/56735): Do not fetch changed submodules in the builder container. [#56689](https://github.com/ClickHouse/ClickHouse/pull/56689) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Select from system tables when table based on table function. [#55540](https://github.com/ClickHouse/ClickHouse/pull/55540) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Fix incomplete query result for UNION in view() function. [#56274](https://github.com/ClickHouse/ClickHouse/pull/56274) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix crash in case of adding a column with type Object(JSON) [#56307](https://github.com/ClickHouse/ClickHouse/pull/56307) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix segfault during Kerberos initialization [#56401](https://github.com/ClickHouse/ClickHouse/pull/56401) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix: RabbitMQ OpenSSL dynamic loading issue [#56703](https://github.com/ClickHouse/ClickHouse/pull/56703) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix crash in GCD codec in case when zeros present in data [#56704](https://github.com/ClickHouse/ClickHouse/pull/56704) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix crash in FPC codec [#56795](https://github.com/ClickHouse/ClickHouse/pull/56795) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NO CL CATEGORY
|
||||||
|
|
||||||
|
* Backported in [#56603](https://github.com/ClickHouse/ClickHouse/issues/56603):. [#56598](https://github.com/ClickHouse/ClickHouse/pull/56598) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Improve enrich image [#55793](https://github.com/ClickHouse/ClickHouse/pull/55793) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Rewrite jobs to use callable workflow [#56385](https://github.com/ClickHouse/ClickHouse/pull/56385) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Continue rewriting workflows to reusable tests [#56501](https://github.com/ClickHouse/ClickHouse/pull/56501) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Better exception messages [#56854](https://github.com/ClickHouse/ClickHouse/pull/56854) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
@ -238,19 +238,19 @@ Example:
|
|||||||
|
|
||||||
## Virtual Columns {#virtual-columns}
|
## Virtual Columns {#virtual-columns}
|
||||||
|
|
||||||
- `_topic` — Kafka topic.
|
- `_topic` — Kafka topic. Data type: `LowCardinality(String)`.
|
||||||
- `_key` — Key of the message.
|
- `_key` — Key of the message. Data type: `String`.
|
||||||
- `_offset` — Offset of the message.
|
- `_offset` — Offset of the message. Data type: `UInt64`.
|
||||||
- `_timestamp` — Timestamp of the message.
|
- `_timestamp` — Timestamp of the message Data type: `Nullable(DateTime)`.
|
||||||
- `_timestamp_ms` — Timestamp in milliseconds of the message.
|
- `_timestamp_ms` — Timestamp in milliseconds of the message. Data type: `Nullable(DateTime64(3))`.
|
||||||
- `_partition` — Partition of Kafka topic.
|
- `_partition` — Partition of Kafka topic. Data type: `UInt64`.
|
||||||
- `_headers.name` — Array of message's headers keys.
|
- `_headers.name` — Array of message's headers keys. Data type: `Array(String)`.
|
||||||
- `_headers.value` — Array of message's headers values.
|
- `_headers.value` — Array of message's headers values. Data type: `Array(String)`.
|
||||||
|
|
||||||
Additional virtual columns when `kafka_handle_error_mode='stream'`:
|
Additional virtual columns when `kafka_handle_error_mode='stream'`:
|
||||||
|
|
||||||
- `_raw_message` - Raw message that couldn't be parsed successfully.
|
- `_raw_message` - Raw message that couldn't be parsed successfully. Data type: `String`.
|
||||||
- `_error` - Exception message happened during failed parsing.
|
- `_error` - Exception message happened during failed parsing. Data type: `String`.
|
||||||
|
|
||||||
Note: `_raw_message` and `_error` virtual columns are filled only in case of exception during parsing, they are always empty when message was parsed successfully.
|
Note: `_raw_message` and `_error` virtual columns are filled only in case of exception during parsing, they are always empty when message was parsed successfully.
|
||||||
|
|
||||||
|
@ -163,14 +163,14 @@ If you want to change the target table by using `ALTER`, we recommend disabling
|
|||||||
|
|
||||||
## Virtual Columns {#virtual-columns}
|
## Virtual Columns {#virtual-columns}
|
||||||
|
|
||||||
- `_subject` - NATS message subject.
|
- `_subject` - NATS message subject. Data type: `String`.
|
||||||
|
|
||||||
Additional virtual columns when `kafka_handle_error_mode='stream'`:
|
Additional virtual columns when `kafka_handle_error_mode='stream'`:
|
||||||
|
|
||||||
- `_raw_message` - Raw message that couldn't be parsed successfully.
|
- `_raw_message` - Raw message that couldn't be parsed successfully. Data type: `Nullable(String)`.
|
||||||
- `_error` - Exception message happened during failed parsing.
|
- `_error` - Exception message happened during failed parsing. Data type: `Nullable(String)`.
|
||||||
|
|
||||||
Note: `_raw_message` and `_error` virtual columns are filled only in case of exception during parsing, they are always empty when message was parsed successfully.
|
Note: `_raw_message` and `_error` virtual columns are filled only in case of exception during parsing, they are always `NULL` when message was parsed successfully.
|
||||||
|
|
||||||
|
|
||||||
## Data formats support {#data-formats-support}
|
## Data formats support {#data-formats-support}
|
||||||
|
@ -184,19 +184,19 @@ Example:
|
|||||||
|
|
||||||
## Virtual Columns {#virtual-columns}
|
## Virtual Columns {#virtual-columns}
|
||||||
|
|
||||||
- `_exchange_name` - RabbitMQ exchange name.
|
- `_exchange_name` - RabbitMQ exchange name. Data type: `String`.
|
||||||
- `_channel_id` - ChannelID, on which consumer, who received the message, was declared.
|
- `_channel_id` - ChannelID, on which consumer, who received the message, was declared. Data type: `String`.
|
||||||
- `_delivery_tag` - DeliveryTag of the received message. Scoped per channel.
|
- `_delivery_tag` - DeliveryTag of the received message. Scoped per channel. Data type: `UInt64`.
|
||||||
- `_redelivered` - `redelivered` flag of the message.
|
- `_redelivered` - `redelivered` flag of the message. Data type: `UInt8`.
|
||||||
- `_message_id` - messageID of the received message; non-empty if was set, when message was published.
|
- `_message_id` - messageID of the received message; non-empty if was set, when message was published. Data type: `String`.
|
||||||
- `_timestamp` - timestamp of the received message; non-empty if was set, when message was published.
|
- `_timestamp` - timestamp of the received message; non-empty if was set, when message was published. Data type: `UInt64`.
|
||||||
|
|
||||||
Additional virtual columns when `kafka_handle_error_mode='stream'`:
|
Additional virtual columns when `kafka_handle_error_mode='stream'`:
|
||||||
|
|
||||||
- `_raw_message` - Raw message that couldn't be parsed successfully.
|
- `_raw_message` - Raw message that couldn't be parsed successfully. Data type: `Nullable(String)`.
|
||||||
- `_error` - Exception message happened during failed parsing.
|
- `_error` - Exception message happened during failed parsing. Data type: `Nullable(String)`.
|
||||||
|
|
||||||
Note: `_raw_message` and `_error` virtual columns are filled only in case of exception during parsing, they are always empty when message was parsed successfully.
|
Note: `_raw_message` and `_error` virtual columns are filled only in case of exception during parsing, they are always `NULL` when message was parsed successfully.
|
||||||
|
|
||||||
## Data formats support {#data-formats-support}
|
## Data formats support {#data-formats-support}
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ You should never use too granular of partitioning. Don't partition your data by
|
|||||||
|
|
||||||
Partitioning is available for the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family tables (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables). [Materialized views](../../../engines/table-engines/special/materializedview.md#materializedview) based on MergeTree tables support partitioning, as well.
|
Partitioning is available for the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family tables (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables). [Materialized views](../../../engines/table-engines/special/materializedview.md#materializedview) based on MergeTree tables support partitioning, as well.
|
||||||
|
|
||||||
A partition is a logical combination of records in a table by a specified criterion. You can set a partition by an arbitrary criterion, such as by month, by day, or by event type. Each partition is stored separately to simplify manipulations of this data. When accessing the data, ClickHouse uses the smallest subset of partitions possible.
|
A partition is a logical combination of records in a table by a specified criterion. You can set a partition by an arbitrary criterion, such as by month, by day, or by event type. Each partition is stored separately to simplify manipulations of this data. When accessing the data, ClickHouse uses the smallest subset of partitions possible. Partitions improve performance for queries containing a partitioning key because ClickHouse will filter for that partition before selecting the parts and granules within the partition.
|
||||||
|
|
||||||
The partition is specified in the `PARTITION BY expr` clause when [creating a table](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table). The partition key can be any expression from the table columns. For example, to specify partitioning by month, use the expression `toYYYYMM(date_column)`:
|
The partition is specified in the `PARTITION BY expr` clause when [creating a table](../../../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-creating-a-table). The partition key can be any expression from the table columns. For example, to specify partitioning by month, use the expression `toYYYYMM(date_column)`:
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ sidebar_label: MergeTree
|
|||||||
|
|
||||||
# MergeTree
|
# MergeTree
|
||||||
|
|
||||||
The `MergeTree` engine and other engines of this family (`*MergeTree`) are the most robust ClickHouse table engines.
|
The `MergeTree` engine and other engines of this family (`*MergeTree`) are the most commonly used and most robust ClickHouse table engines.
|
||||||
|
|
||||||
Engines in the `MergeTree` family are designed for inserting a very large amount of data into a table. The data is quickly written to the table part by part, then rules are applied for merging the parts in the background. This method is much more efficient than continually rewriting the data in storage during insert.
|
Engines in the `MergeTree` family are designed for inserting a very large amount of data into a table. The data is quickly written to the table part by part, then rules are applied for merging the parts in the background. This method is much more efficient than continually rewriting the data in storage during insert.
|
||||||
|
|
||||||
@ -32,6 +32,8 @@ Main features:
|
|||||||
The [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine does not belong to the `*MergeTree` family.
|
The [Merge](/docs/en/engines/table-engines/special/merge.md/#merge) engine does not belong to the `*MergeTree` family.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
If you need to update rows frequently, we recommend using the [`ReplacingMergeTree`](/docs/en/engines/table-engines/mergetree-family/replacingmergetree.md) table engine. Using `ALTER TABLE my_table UPDATE` to update rows triggers a mutation, which causes parts to be re-written and uses IO/resources. With `ReplacingMergeTree`, you can simply insert the updated rows and the old rows will be replaced according to the table sorting key.
|
||||||
|
|
||||||
## Creating a Table {#table_engine-mergetree-creating-a-table}
|
## Creating a Table {#table_engine-mergetree-creating-a-table}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
|
@ -94,12 +94,12 @@ If you want to change the target table by using `ALTER`, we recommend disabling
|
|||||||
|
|
||||||
## Virtual Columns {#virtual-columns}
|
## Virtual Columns {#virtual-columns}
|
||||||
|
|
||||||
- `_filename` - Name of the log file.
|
- `_filename` - Name of the log file. Data type: `LowCardinality(String)`.
|
||||||
- `_offset` - Offset in the log file.
|
- `_offset` - Offset in the log file. Data type: `UInt64`.
|
||||||
|
|
||||||
Additional virtual columns when `kafka_handle_error_mode='stream'`:
|
Additional virtual columns when `kafka_handle_error_mode='stream'`:
|
||||||
|
|
||||||
- `_raw_record` - Raw record that couldn't be parsed successfully.
|
- `_raw_record` - Raw record that couldn't be parsed successfully. Data type: `Nullable(String)`.
|
||||||
- `_error` - Exception message happened during failed parsing.
|
- `_error` - Exception message happened during failed parsing. Data type: `Nullable(String)`.
|
||||||
|
|
||||||
Note: `_raw_record` and `_error` virtual columns are filled only in case of exception during parsing, they are always empty when message was parsed successfully.
|
Note: `_raw_record` and `_error` virtual columns are filled only in case of exception during parsing, they are always `NULL` when message was parsed successfully.
|
||||||
|
@ -2469,6 +2469,7 @@ This function is designed to load a NumPy array from a .npy file into ClickHouse
|
|||||||
| u2 | UInt16 |
|
| u2 | UInt16 |
|
||||||
| u4 | UInt32 |
|
| u4 | UInt32 |
|
||||||
| u8 | UInt64 |
|
| u8 | UInt64 |
|
||||||
|
| f2 | Float32 |
|
||||||
| f4 | Float32 |
|
| f4 | Float32 |
|
||||||
| f8 | Float64 |
|
| f8 | Float64 |
|
||||||
| S | String |
|
| S | String |
|
||||||
|
@ -17,12 +17,8 @@
|
|||||||
|
|
||||||
- The issue may be happened when the GPG key is changed.
|
- The issue may be happened when the GPG key is changed.
|
||||||
|
|
||||||
Please use the following scripts to resolve the issue:
|
Please use the manual from the [setup](../getting-started/install.md#setup-the-debian-repository) page to update the repository configuration.
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754
|
|
||||||
sudo apt-get update
|
|
||||||
```
|
|
||||||
|
|
||||||
### You Get Different Warnings with `apt-get update` {#you-get-different-warnings-with-apt-get-update}
|
### You Get Different Warnings with `apt-get update` {#you-get-different-warnings-with-apt-get-update}
|
||||||
|
|
||||||
|
@ -169,7 +169,12 @@ Also, results of queries with non-deterministic functions are not cached by defa
|
|||||||
[`getMacro()`](../sql-reference/functions/other-functions.md#getMacro) etc.
|
[`getMacro()`](../sql-reference/functions/other-functions.md#getMacro) etc.
|
||||||
|
|
||||||
To force caching of results of queries with non-deterministic functions regardless, use setting
|
To force caching of results of queries with non-deterministic functions regardless, use setting
|
||||||
[query_cache_store_results_of_queries_with_nondeterministic_functions](settings/settings.md#query-cache-store-results-of-queries-with-nondeterministic-functions).
|
[query_cache_nondeterministic_function_handling](settings/settings.md#query-cache-nondeterministic-function-handling).
|
||||||
|
|
||||||
|
:::note
|
||||||
|
Prior to ClickHouse v23.11, setting 'query_cache_store_results_of_queries_with_nondeterministic_functions = 0 / 1' controlled whether
|
||||||
|
results of queries with non-deterministic results were cached. In newer ClickHouse versions, this setting is obsolete and has no effect.
|
||||||
|
:::
|
||||||
|
|
||||||
Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a
|
Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a
|
||||||
row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can
|
row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can
|
||||||
|
@ -961,9 +961,13 @@ See also “[Executable User Defined Functions](../../sql-reference/functions/in
|
|||||||
|
|
||||||
Lazy loading of dictionaries.
|
Lazy loading of dictionaries.
|
||||||
|
|
||||||
If `true`, then each dictionary is created on first use. If dictionary creation failed, the function that was using the dictionary throws an exception.
|
If `true`, then each dictionary is loaded on the first use. If the loading is failed, the function that was using the dictionary throws an exception.
|
||||||
|
|
||||||
If `false`, all dictionaries are created when the server starts, if the dictionary or dictionaries are created too long or are created with errors, then the server boots without of these dictionaries and continues to try to create these dictionaries.
|
If `false`, then the server starts loading all dictionaries at startup.
|
||||||
|
Dictionaries are loaded in background.
|
||||||
|
The server doesn't wait at startup until all the dictionaries finish their loading
|
||||||
|
(exception: if `wait_dictionaries_load_at_startup` is set to `true` - see below).
|
||||||
|
When a dictionary is used in a query for the first time then the query waits until the dictionary is loaded if it's not loaded yet.
|
||||||
|
|
||||||
The default is `true`.
|
The default is `true`.
|
||||||
|
|
||||||
@ -2391,6 +2395,24 @@ Path to the file that contains:
|
|||||||
<users_config>users.xml</users_config>
|
<users_config>users.xml</users_config>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## wait_dictionaries_load_at_startup {#wait_dictionaries_load_at_startup}
|
||||||
|
|
||||||
|
If `false`, then the server will not wait at startup until all the dictionaries finish their loading.
|
||||||
|
This allows to start ClickHouse faster.
|
||||||
|
|
||||||
|
If `true`, then the server will wait at startup until all the dictionaries finish their loading (successfully or not)
|
||||||
|
before listening to any connections.
|
||||||
|
This can make ClickHouse start slowly, however after that some queries can be executed faster
|
||||||
|
(because they won't have to wait for the used dictionaries to be load).
|
||||||
|
|
||||||
|
The default is `false`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` xml
|
||||||
|
<wait_dictionaries_load_at_startup>false</wait_dictionaries_load_at_startup>
|
||||||
|
```
|
||||||
|
|
||||||
## zookeeper {#server-settings_zookeeper}
|
## zookeeper {#server-settings_zookeeper}
|
||||||
|
|
||||||
Contains settings that allow ClickHouse to interact with a [ZooKeeper](http://zookeeper.apache.org/) cluster.
|
Contains settings that allow ClickHouse to interact with a [ZooKeeper](http://zookeeper.apache.org/) cluster.
|
||||||
|
@ -731,11 +731,13 @@ Default value: LZ4.
|
|||||||
|
|
||||||
## max_block_size {#setting-max_block_size}
|
## max_block_size {#setting-max_block_size}
|
||||||
|
|
||||||
In ClickHouse, data is processed by blocks (sets of column parts). The internal processing cycles for a single block are efficient enough, but there are noticeable expenditures on each block. The `max_block_size` setting is a recommendation for what size of the block (in a count of rows) to load from tables. The block size shouldn’t be too small, so that the expenditures on each block are still noticeable, but not too large so that the query with LIMIT that is completed after the first block is processed quickly. The goal is to avoid consuming too much memory when extracting a large number of columns in multiple threads and to preserve at least some cache locality.
|
In ClickHouse, data is processed by blocks, which are sets of column parts. The internal processing cycles for a single block are efficient but there are noticeable costs when processing each block.
|
||||||
|
|
||||||
Default value: 65,536.
|
The `max_block_size` setting indicates the recommended maximum number of rows to include in a single block when loading data from tables. Blocks the size of `max_block_size` are not always loaded from the table: if ClickHouse determines that less data needs to be retrieved, a smaller block is processed.
|
||||||
|
|
||||||
Blocks the size of `max_block_size` are not always loaded from the table. If it is obvious that less data needs to be retrieved, a smaller block is processed.
|
The block size should not be too small to avoid noticeable costs when processing each block. It should also not be too large to ensure that queries with a LIMIT clause execute quickly after processing the first block. When setting `max_block_size`, the goal should be to avoid consuming too much memory when extracting a large number of columns in multiple threads and to preserve at least some cache locality.
|
||||||
|
|
||||||
|
Default value: `65,409`
|
||||||
|
|
||||||
## preferred_block_size_bytes {#preferred-block-size-bytes}
|
## preferred_block_size_bytes {#preferred-block-size-bytes}
|
||||||
|
|
||||||
@ -1657,16 +1659,17 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `1`.
|
Default value: `1`.
|
||||||
|
|
||||||
## query_cache_store_results_of_queries_with_nondeterministic_functions {#query-cache-store-results-of-queries-with-nondeterministic-functions}
|
## query_cache_nondeterministic_function_handling {#query-cache-nondeterministic-function-handling}
|
||||||
|
|
||||||
If turned on, then results of `SELECT` queries with non-deterministic functions (e.g. `rand()`, `now()`) can be cached in the [query cache](../query-cache.md).
|
Controls how the [query cache](../query-cache.md) handles `SELECT` queries with non-deterministic functions like `rand()` or `now()`.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
- 0 - Disabled
|
- `'throw'` - Throw an exception and don't cache the query result.
|
||||||
- 1 - Enabled
|
- `'save'` - Cache the query result.
|
||||||
|
- `'ignore'` - Don't cache the query result and don't throw an exception.
|
||||||
|
|
||||||
Default value: `0`.
|
Default value: `throw`.
|
||||||
|
|
||||||
## query_cache_min_query_runs {#query-cache-min-query-runs}
|
## query_cache_min_query_runs {#query-cache-min-query-runs}
|
||||||
|
|
||||||
@ -2713,6 +2716,10 @@ Default value: `0`.
|
|||||||
- [Distributed Table Engine](../../engines/table-engines/special/distributed.md/#distributed)
|
- [Distributed Table Engine](../../engines/table-engines/special/distributed.md/#distributed)
|
||||||
- [Managing Distributed Tables](../../sql-reference/statements/system.md/#query-language-system-distributed)
|
- [Managing Distributed Tables](../../sql-reference/statements/system.md/#query-language-system-distributed)
|
||||||
|
|
||||||
|
## insert_distributed_sync {#insert_distributed_sync}
|
||||||
|
|
||||||
|
Alias for [`distributed_foreground_insert`](#distributed_foreground_insert).
|
||||||
|
|
||||||
## insert_shard_id {#insert_shard_id}
|
## insert_shard_id {#insert_shard_id}
|
||||||
|
|
||||||
If not `0`, specifies the shard of [Distributed](../../engines/table-engines/special/distributed.md/#distributed) table into which the data will be inserted synchronously.
|
If not `0`, specifies the shard of [Distributed](../../engines/table-engines/special/distributed.md/#distributed) table into which the data will be inserted synchronously.
|
||||||
@ -4819,3 +4826,10 @@ When set to `true` the metadata files are written with `VERSION_FULL_OBJECT_KEY`
|
|||||||
When set to `false` the metadata files are written with the previous format version, `VERSION_INLINE_DATA`. With that format only suffixes of object storage key names are are written to the metadata files. The prefix for all of object storage key names is set in configurations files at `storage_configuration.disks` section.
|
When set to `false` the metadata files are written with the previous format version, `VERSION_INLINE_DATA`. With that format only suffixes of object storage key names are are written to the metadata files. The prefix for all of object storage key names is set in configurations files at `storage_configuration.disks` section.
|
||||||
|
|
||||||
Default value: `false`.
|
Default value: `false`.
|
||||||
|
|
||||||
|
## s3_use_adaptive_timeouts {#s3_use_adaptive_timeouts}
|
||||||
|
|
||||||
|
When set to `true` than for all s3 requests first two attempts are made with low send and receive timeouts.
|
||||||
|
When set to `false` than all attempts are made with identical timeouts.
|
||||||
|
|
||||||
|
Default value: `true`.
|
||||||
|
50
docs/en/operations/utilities/backupview.md
Normal file
50
docs/en/operations/utilities/backupview.md
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
---
|
||||||
|
slug: /en/operations/utilities/backupview
|
||||||
|
title: clickhouse_backupview
|
||||||
|
---
|
||||||
|
|
||||||
|
# clickhouse_backupview {#clickhouse_backupview}
|
||||||
|
|
||||||
|
Python module to help analyzing backups made by the [BACKUP](https://clickhouse.com/docs/en/operations/backup) command.
|
||||||
|
The main motivation was to allows getting some information from a backup without actually restoring it.
|
||||||
|
|
||||||
|
This module provides functions to
|
||||||
|
- enumerate files contained in a backup
|
||||||
|
- read files from a backup
|
||||||
|
- get useful information in readable form about databases, tables, parts contained in a backup
|
||||||
|
- check integrity of a backup
|
||||||
|
|
||||||
|
## Example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from clickhouse_backupview import open_backup, S3, FileInfo
|
||||||
|
|
||||||
|
# Open a backup. We could also use a local path:
|
||||||
|
# backup = open_backup("/backups/my_backup_1/")
|
||||||
|
backup = open_backup(S3("uri", "access_key_id", "secret_access_key"))
|
||||||
|
|
||||||
|
# Get a list of databasess inside the backup.
|
||||||
|
print(backup.get_databases()))
|
||||||
|
|
||||||
|
# Get a list of tables inside the backup,
|
||||||
|
# and for each table its create query and a list of parts and partitions.
|
||||||
|
for db in backup.get_databases():
|
||||||
|
for tbl in backup.get_tables(database=db):
|
||||||
|
print(backup.get_create_query(database=db, table=tbl))
|
||||||
|
print(backup.get_partitions(database=db, table=tbl))
|
||||||
|
print(backup.get_parts(database=db, table=tbl))
|
||||||
|
|
||||||
|
# Extract everything from the backup.
|
||||||
|
backup.extract_all(table="mydb.mytable", out='/tmp/my_backup_1/all/')
|
||||||
|
|
||||||
|
# Extract the data of a specific table.
|
||||||
|
backup.extract_table_data(table="mydb.mytable", out='/tmp/my_backup_1/mytable/')
|
||||||
|
|
||||||
|
# Extract a single partition.
|
||||||
|
backup.extract_table_data(table="mydb.mytable", partition="202201", out='/tmp/my_backup_1/202201/')
|
||||||
|
|
||||||
|
# Extract a single part.
|
||||||
|
backup.extract_table_data(table="mydb.mytable", part="202201_100_200_3", out='/tmp/my_backup_1/202201_100_200_3/')
|
||||||
|
```
|
||||||
|
|
||||||
|
For more examples see the [test](https://github.com/ClickHouse/ClickHouse/blob/master/utils/backupview/test/test.py).
|
@ -16,3 +16,4 @@ pagination_next: 'en/operations/utilities/clickhouse-copier'
|
|||||||
- [clickhouse-disks](../../operations/utilities/clickhouse-disks.md) -- Provides filesystem-like operations
|
- [clickhouse-disks](../../operations/utilities/clickhouse-disks.md) -- Provides filesystem-like operations
|
||||||
on files among different ClickHouse disks.
|
on files among different ClickHouse disks.
|
||||||
- [clickhouse-odbc-bridge](../../operations/utilities/odbc-bridge.md) — A proxy server for ODBC driver.
|
- [clickhouse-odbc-bridge](../../operations/utilities/odbc-bridge.md) — A proxy server for ODBC driver.
|
||||||
|
- [clickhouse_backupview](../../operations/utilities/backupview.md) — A python module to analyze ClickHouse backups.
|
||||||
|
@ -1381,7 +1381,7 @@ toStartOfFifteenMinutes(toDateTime('2023-04-21 10:20:00')): 2023-04-21 10:15:00
|
|||||||
toStartOfFifteenMinutes(toDateTime('2023-04-21 10:23:00')): 2023-04-21 10:15:00
|
toStartOfFifteenMinutes(toDateTime('2023-04-21 10:23:00')): 2023-04-21 10:15:00
|
||||||
```
|
```
|
||||||
|
|
||||||
## toStartOfInterval(time_or_data, INTERVAL x unit \[, time_zone\])
|
## toStartOfInterval(date_or_date_with_time, INTERVAL x unit \[, time_zone\])
|
||||||
|
|
||||||
This function generalizes other `toStartOf*()` functions. For example,
|
This function generalizes other `toStartOf*()` functions. For example,
|
||||||
- `toStartOfInterval(t, INTERVAL 1 year)` returns the same as `toStartOfYear(t)`,
|
- `toStartOfInterval(t, INTERVAL 1 year)` returns the same as `toStartOfYear(t)`,
|
||||||
|
@ -6,9 +6,9 @@ sidebar_label: Random Numbers
|
|||||||
|
|
||||||
# Functions for Generating Random Numbers
|
# Functions for Generating Random Numbers
|
||||||
|
|
||||||
All functions in this section accept zero or one arguments. The only use of the argument (if provided) is to prevent prevent [common subexpression
|
All functions in this section accept zero or one arguments. The only use of the argument (if provided) is to prevent [common subexpression
|
||||||
elimination](../../sql-reference/functions/index.md#common-subexpression-elimination) such that two different execution of the same random
|
elimination](../../sql-reference/functions/index.md#common-subexpression-elimination) such that two different executions within a row of the same random
|
||||||
function in a query return different random values.
|
function return different random values.
|
||||||
|
|
||||||
Related content
|
Related content
|
||||||
- Blog: [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse)
|
- Blog: [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse)
|
||||||
|
@ -429,7 +429,7 @@ SELECT format('{} {}', 'Hello', 'World')
|
|||||||
|
|
||||||
## concat
|
## concat
|
||||||
|
|
||||||
Concatenates the strings listed in the arguments without separator.
|
Concatenates the given arguments.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -439,7 +439,9 @@ concat(s1, s2, ...)
|
|||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
Values of type String or FixedString.
|
At least two values of arbitrary type.
|
||||||
|
|
||||||
|
Arguments which are not of types [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md) are converted to strings using their default serialization. As this decreases performance, it is not recommended to use non-String/FixedString arguments.
|
||||||
|
|
||||||
**Returned values**
|
**Returned values**
|
||||||
|
|
||||||
@ -449,6 +451,8 @@ If any of arguments is `NULL`, the function returns `NULL`.
|
|||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT concat('Hello, ', 'World!');
|
SELECT concat('Hello, ', 'World!');
|
||||||
```
|
```
|
||||||
@ -461,6 +465,20 @@ Result:
|
|||||||
└─────────────────────────────┘
|
└─────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT concat(42, 144);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```result
|
||||||
|
┌─concat(42, 144)─┐
|
||||||
|
│ 42144 │
|
||||||
|
└─────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## concatAssumeInjective
|
## concatAssumeInjective
|
||||||
|
|
||||||
Like [concat](#concat) but assumes that `concat(s1, s2, ...) → sn` is injective. Can be used for optimization of GROUP BY.
|
Like [concat](#concat) but assumes that `concat(s1, s2, ...) → sn` is injective. Can be used for optimization of GROUP BY.
|
||||||
@ -526,6 +544,8 @@ Concatenates the given strings with a given separator.
|
|||||||
concatWithSeparator(sep, expr1, expr2, expr3...)
|
concatWithSeparator(sep, expr1, expr2, expr3...)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Alias: `concat_ws`
|
||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- sep — separator. Const [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
- sep — separator. Const [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||||
|
@ -5,7 +5,7 @@ sidebar_label: OPTIMIZE
|
|||||||
title: "OPTIMIZE Statement"
|
title: "OPTIMIZE Statement"
|
||||||
---
|
---
|
||||||
|
|
||||||
This query tries to initialize an unscheduled merge of data parts for tables.
|
This query tries to initialize an unscheduled merge of data parts for tables. Note that we generally recommend against using `OPTIMIZE TABLE ... FINAL` (see these [docs](/docs/en/optimize/avoidoptimizefinal)) as its use case is meant for administration, not for daily operations.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
`OPTIMIZE` can’t fix the `Too many parts` error.
|
`OPTIMIZE` can’t fix the `Too many parts` error.
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
---
|
--
|
||||||
slug: /en/sql-reference/table-functions/file
|
slug: /en/sql-reference/table-functions/file
|
||||||
sidebar_position: 60
|
sidebar_position: 60
|
||||||
sidebar_label: file
|
sidebar_label: file
|
||||||
@ -6,7 +6,7 @@ sidebar_label: file
|
|||||||
|
|
||||||
# file
|
# file
|
||||||
|
|
||||||
Provides a table-like interface to SELECT from and INSERT to files. This table function is similar to the [s3](/docs/en/sql-reference/table-functions/url.md) table function. Use file() when working with local files, and s3() when working with buckets in S3, GCS, or MinIO.
|
A table engine which provides a table-like interface to SELECT from and INSERT into files, similar to the [s3](/docs/en/sql-reference/table-functions/url.md) table function. Use `file()` when working with local files, and `s3()` when working with buckets in object storage such as S3, GCS, or MinIO.
|
||||||
|
|
||||||
The `file` function can be used in `SELECT` and `INSERT` queries to read from or write to files.
|
The `file` function can be used in `SELECT` and `INSERT` queries to read from or write to files.
|
||||||
|
|
||||||
@ -18,18 +18,18 @@ file([path_to_archive ::] path [,format] [,structure] [,compression])
|
|||||||
|
|
||||||
**Parameters**
|
**Parameters**
|
||||||
|
|
||||||
- `path` — The relative path to the file from [user_files_path](/docs/en/operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Path to file support following globs in read-only mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc', 'def'` — strings.
|
- `path` — The relative path to the file from [user_files_path](/docs/en/operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Supports in read-only mode the following [globs](#globs_in_path): `*`, `?`, `{abc,def}` (with `'abc'` and `'def'` being strings) and `{N..M}` (with `N` and `M` being numbers).
|
||||||
- `path_to_archive` - The relative path to zip/tar/7z archive. Path to archive support the same globs as `path`.
|
- `path_to_archive` - The relative path to a zip/tar/7z archive. Supports the same globs as `path`.
|
||||||
- `format` — The [format](/docs/en/interfaces/formats.md#formats) of the file.
|
- `format` — The [format](/docs/en/interfaces/formats.md#formats) of the file.
|
||||||
- `structure` — Structure of the table. Format: `'column1_name column1_type, column2_name column2_type, ...'`.
|
- `structure` — Structure of the table. Format: `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||||
- `compression` — The existing compression type when used in a `SELECT` query, or the desired compression type when used in an `INSERT` query. The supported compression types are `gz`, `br`, `xz`, `zst`, `lz4`, and `bz2`.
|
- `compression` — The existing compression type when used in a `SELECT` query, or the desired compression type when used in an `INSERT` query. Supported compression types are `gz`, `br`, `xz`, `zst`, `lz4`, and `bz2`.
|
||||||
|
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
A table with the specified structure for reading or writing data in the specified file.
|
A table for reading or writing data in a file.
|
||||||
|
|
||||||
## File Write Examples
|
## Examples for Writing to a File
|
||||||
|
|
||||||
### Write to a TSV file
|
### Write to a TSV file
|
||||||
|
|
||||||
@ -48,9 +48,9 @@ As a result, the data is written into the file `test.tsv`:
|
|||||||
1 3 2
|
1 3 2
|
||||||
```
|
```
|
||||||
|
|
||||||
### Partitioned Write to multiple TSV files
|
### Partitioned write to multiple TSV files
|
||||||
|
|
||||||
If you specify `PARTITION BY` expression when inserting data into a file() function, a separate file is created for each partition value. Splitting the data into separate files helps to improve reading operations efficiency.
|
If you specify a `PARTITION BY` expression when inserting data into a table function of type `file()`, then a separate file is created for each partition. Splitting the data into separate files helps to improve performance of read operations.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO TABLE FUNCTION
|
INSERT INTO TABLE FUNCTION
|
||||||
@ -72,11 +72,11 @@ As a result, the data is written into three files: `test_1.tsv`, `test_2.tsv`, a
|
|||||||
1 2 3
|
1 2 3
|
||||||
```
|
```
|
||||||
|
|
||||||
## File Read Examples
|
## Examples for Reading from a File
|
||||||
|
|
||||||
### SELECT from a CSV file
|
### SELECT from a CSV file
|
||||||
|
|
||||||
Setting `user_files_path` and the contents of the file `test.csv`:
|
First, set `user_files_path` in the server configuration and prepare a file `test.csv`:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
$ grep user_files_path /etc/clickhouse-server/config.xml
|
$ grep user_files_path /etc/clickhouse-server/config.xml
|
||||||
@ -88,7 +88,7 @@ $ cat /var/lib/clickhouse/user_files/test.csv
|
|||||||
78,43,45
|
78,43,45
|
||||||
```
|
```
|
||||||
|
|
||||||
Getting data from a table in `test.csv` and selecting the first two rows from it:
|
Then, read data from `test.csv` into a table and select its first two rows:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT * FROM
|
SELECT * FROM
|
||||||
@ -103,14 +103,6 @@ LIMIT 2;
|
|||||||
└─────────┴─────────┴─────────┘
|
└─────────┴─────────┴─────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Getting the first 10 lines of a table that contains 3 columns of [UInt32](/docs/en/sql-reference/data-types/int-uint.md) type from a CSV file:
|
|
||||||
|
|
||||||
``` sql
|
|
||||||
SELECT * FROM
|
|
||||||
file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32')
|
|
||||||
LIMIT 10;
|
|
||||||
```
|
|
||||||
|
|
||||||
### Inserting data from a file into a table:
|
### Inserting data from a file into a table:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -130,41 +122,42 @@ file('test.csv', 'CSV', 'column1 UInt32, column2 UInt32, column3 UInt32');
|
|||||||
└─────────┴─────────┴─────────┘
|
└─────────┴─────────┴─────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Getting data from table in table.csv, located in archive1.zip or/and archive2.zip
|
Reading data from `table.csv`, located in `archive1.zip` or/and `archive2.zip`:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv');
|
SELECT * FROM file('user_files/archives/archive{1..2}.zip :: table.csv');
|
||||||
```
|
```
|
||||||
|
|
||||||
## Globs in Path {#globs_in_path}
|
## Globbing {#globs_in_path}
|
||||||
|
|
||||||
Multiple path components can have globs. For being processed file must exist and match to the whole path pattern (not only suffix or prefix).
|
Paths may use globbing. Files must match the whole path pattern, not only the suffix or prefix.
|
||||||
|
|
||||||
- `*` — Substitutes any number of any characters except `/` including empty string.
|
- `*` — Represents arbitrarily many characters except `/` but including the empty string.
|
||||||
- `?` — Substitutes any single character.
|
- `?` — Represents an arbitrary single character.
|
||||||
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. The strings can contain the `/` symbol.
|
- `{some_string,another_string,yet_another_one}` — Represents any of alternative strings `'some_string', 'another_string', 'yet_another_one'`. The strings may contain `/`.
|
||||||
- `{N..M}` — Substitutes any number in range from N to M including both borders.
|
- `{N..M}` — Represents any number `>= N` and `<= M`.
|
||||||
- `**` - Fetches all files inside the folder recursively.
|
- `**` - Represents all files inside a folder recursively.
|
||||||
|
|
||||||
Constructions with `{}` are similar to the [remote](remote.md) table function.
|
Constructions with `{}` are similar to the [remote](remote.md) table function.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
Suppose we have several files with the following relative paths:
|
Suppose there are these files with the following relative paths:
|
||||||
|
|
||||||
- 'some_dir/some_file_1'
|
- `some_dir/some_file_1`
|
||||||
- 'some_dir/some_file_2'
|
- `some_dir/some_file_2`
|
||||||
- 'some_dir/some_file_3'
|
- `some_dir/some_file_3`
|
||||||
- 'another_dir/some_file_1'
|
- `another_dir/some_file_1`
|
||||||
- 'another_dir/some_file_2'
|
- `another_dir/some_file_2`
|
||||||
- 'another_dir/some_file_3'
|
- `another_dir/some_file_3`
|
||||||
|
|
||||||
Query the number of rows in these files:
|
Query the total number of rows in all files:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT count(*) FROM file('{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32');
|
SELECT count(*) FROM file('{some,another}_dir/some_file_{1..3}', 'TSV', 'name String, value UInt32');
|
||||||
```
|
```
|
||||||
|
|
||||||
Query the number of rows in all files of these two directories:
|
An alternative path expression which achieves the same:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT count(*) FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32');
|
SELECT count(*) FROM file('{some,another}_dir/*', 'TSV', 'name String, value UInt32');
|
||||||
@ -176,7 +169,7 @@ If your listing of files contains number ranges with leading zeros, use the cons
|
|||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
Query the data from files named `file000`, `file001`, … , `file999`:
|
Query the total number of rows in files named `file000`, `file001`, … , `file999`:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32');
|
SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32');
|
||||||
@ -184,7 +177,7 @@ SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String,
|
|||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
Query the data from all files inside `big_dir` directory recursively:
|
Query the total number of rows from all files inside directory `big_dir/` recursively:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT count(*) FROM file('big_dir/**', 'CSV', 'name String, value UInt32');
|
SELECT count(*) FROM file('big_dir/**', 'CSV', 'name String, value UInt32');
|
||||||
@ -192,7 +185,7 @@ SELECT count(*) FROM file('big_dir/**', 'CSV', 'name String, value UInt32');
|
|||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
Query the data from all `file002` files from any folder inside `big_dir` directory recursively:
|
Query the total number of rows from all files `file002` inside any folder in directory `big_dir/` recursively:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt32');
|
SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt32');
|
||||||
|
@ -6,7 +6,7 @@ sidebar_label: remote
|
|||||||
|
|
||||||
# remote, remoteSecure
|
# remote, remoteSecure
|
||||||
|
|
||||||
Allows accessing remote servers, including migration of data, without creating a [Distributed](../../engines/table-engines/special/distributed.md) table. `remoteSecure` - same as `remote` but with a secured connection.
|
Table function `remote` allows to access remote servers on-the-fly, i.e. without creating a [Distributed](../../engines/table-engines/special/distributed.md) table. Table function `remoteSecure` is same as `remote` but over a secure connection.
|
||||||
|
|
||||||
Both functions can be used in `SELECT` and `INSERT` queries.
|
Both functions can be used in `SELECT` and `INSERT` queries.
|
||||||
|
|
||||||
@ -21,36 +21,36 @@ remoteSecure('addresses_expr', [db.table, 'user'[, 'password'], sharding_key])
|
|||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
- `addresses_expr` — An expression that generates addresses of remote servers. This may be just one server address. The server address is `host:port`, or just `host`.
|
- `addresses_expr` — A remote server address or an expression that generates multiple addresses of remote servers. Format: `host` or `host:port`.
|
||||||
|
|
||||||
The host can be specified as the server name, or as the IPv4 or IPv6 address. An IPv6 address is specified in square brackets.
|
The `host` can be specified as a server name, or as a IPv4 or IPv6 address. An IPv6 address must be specified in square brackets.
|
||||||
|
|
||||||
The port is the TCP port on the remote server. If the port is omitted, it uses [tcp_port](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port) from the server’s config file in `remote` (by default, 9000) and [tcp_port_secure](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port_secure) in `remoteSecure` (by default, 9440).
|
The `port` is the TCP port on the remote server. If the port is omitted, it uses [tcp_port](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port) from the server config file for table function `remote` (by default, 9000) and [tcp_port_secure](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-tcp_port_secure) for table function `remoteSecure` (by default, 9440).
|
||||||
|
|
||||||
The port is required for an IPv6 address.
|
For IPv6 addresses, a port is required.
|
||||||
|
|
||||||
If only specify this parameter, `db` and `table` will use `system.one` by default.
|
If only parameter `addresses_expr` is specified, `db` and `table` will use `system.one` by default.
|
||||||
|
|
||||||
Type: [String](../../sql-reference/data-types/string.md).
|
Type: [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
- `db` — Database name. Type: [String](../../sql-reference/data-types/string.md).
|
- `db` — Database name. Type: [String](../../sql-reference/data-types/string.md).
|
||||||
- `table` — Table name. Type: [String](../../sql-reference/data-types/string.md).
|
- `table` — Table name. Type: [String](../../sql-reference/data-types/string.md).
|
||||||
- `user` — User name. If the user is not specified, `default` is used. Type: [String](../../sql-reference/data-types/string.md).
|
- `user` — User name. If not specified, `default` is used. Type: [String](../../sql-reference/data-types/string.md).
|
||||||
- `password` — User password. If the password is not specified, an empty password is used. Type: [String](../../sql-reference/data-types/string.md).
|
- `password` — User password. If not specified, an empty password is used. Type: [String](../../sql-reference/data-types/string.md).
|
||||||
- `sharding_key` — Sharding key to support distributing data across nodes. For example: `insert into remote('127.0.0.1:9000,127.0.0.2', db, table, 'default', rand())`. Type: [UInt32](../../sql-reference/data-types/int-uint.md).
|
- `sharding_key` — Sharding key to support distributing data across nodes. For example: `insert into remote('127.0.0.1:9000,127.0.0.2', db, table, 'default', rand())`. Type: [UInt32](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
## Returned value
|
## Returned value
|
||||||
|
|
||||||
The dataset from remote servers.
|
A table located on a remote server.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
Unless you are migrating data from one system to another, using the `remote` table function is less optimal than creating a `Distributed` table because in this case the server connection is re-established for every request. Also, if hostnames are set, the names are resolved, and errors are not counted when working with various replicas. When processing a large number of queries, always create the `Distributed` table ahead of time, and do not use the `remote` table function.
|
As table functions `remote` and `remoteSecure` re-establish the connection for each request, it is recommended to use a `Distributed` table instead. Also, if hostnames are set, the names are resolved, and errors are not counted when working with various replicas. When processing a large number of queries, always create the `Distributed` table ahead of time, and do not use the `remote` table function.
|
||||||
|
|
||||||
The `remote` table function can be useful in the following cases:
|
The `remote` table function can be useful in the following cases:
|
||||||
|
|
||||||
- Migrating data from one system to another
|
- One-time data migration from one system to another
|
||||||
- Accessing a specific server for data comparison, debugging, and testing.
|
- Accessing a specific server for data comparison, debugging, and testing, i.e. ad-hoc connections.
|
||||||
- Queries between various ClickHouse clusters for research purposes.
|
- Queries between various ClickHouse clusters for research purposes.
|
||||||
- Infrequent distributed requests that are made manually.
|
- Infrequent distributed requests that are made manually.
|
||||||
- Distributed requests where the set of servers is re-defined each time.
|
- Distributed requests where the set of servers is re-defined each time.
|
||||||
@ -68,7 +68,7 @@ localhost
|
|||||||
[2a02:6b8:0:1111::11]:9000
|
[2a02:6b8:0:1111::11]:9000
|
||||||
```
|
```
|
||||||
|
|
||||||
Multiple addresses can be comma-separated. In this case, ClickHouse will use distributed processing, so it will send the query to all specified addresses (like shards with different data). Example:
|
Multiple addresses can be comma-separated. In this case, ClickHouse will use distributed processing and send the query to all specified addresses (like shards with different data). Example:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
example01-01-1,example01-02-1
|
example01-01-1,example01-02-1
|
||||||
@ -91,10 +91,13 @@ SELECT * FROM remote_table;
|
|||||||
```
|
```
|
||||||
|
|
||||||
### Migration of tables from one system to another:
|
### Migration of tables from one system to another:
|
||||||
|
|
||||||
This example uses one table from a sample dataset. The database is `imdb`, and the table is `actors`.
|
This example uses one table from a sample dataset. The database is `imdb`, and the table is `actors`.
|
||||||
|
|
||||||
#### On the source ClickHouse system (the system that currently hosts the data)
|
#### On the source ClickHouse system (the system that currently hosts the data)
|
||||||
|
|
||||||
- Verify the source database and table name (`imdb.actors`)
|
- Verify the source database and table name (`imdb.actors`)
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
show databases
|
show databases
|
||||||
```
|
```
|
||||||
@ -104,6 +107,7 @@ This example uses one table from a sample dataset. The database is `imdb`, and
|
|||||||
```
|
```
|
||||||
|
|
||||||
- Get the CREATE TABLE statement from the source:
|
- Get the CREATE TABLE statement from the source:
|
||||||
|
|
||||||
```
|
```
|
||||||
select create_table_query
|
select create_table_query
|
||||||
from system.tables
|
from system.tables
|
||||||
@ -111,6 +115,7 @@ This example uses one table from a sample dataset. The database is `imdb`, and
|
|||||||
```
|
```
|
||||||
|
|
||||||
Response
|
Response
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE imdb.actors (`id` UInt32,
|
CREATE TABLE imdb.actors (`id` UInt32,
|
||||||
`first_name` String,
|
`first_name` String,
|
||||||
@ -123,11 +128,13 @@ This example uses one table from a sample dataset. The database is `imdb`, and
|
|||||||
#### On the destination ClickHouse system:
|
#### On the destination ClickHouse system:
|
||||||
|
|
||||||
- Create the destination database:
|
- Create the destination database:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE DATABASE imdb
|
CREATE DATABASE imdb
|
||||||
```
|
```
|
||||||
|
|
||||||
- Using the CREATE TABLE statement from the source, create the destination:
|
- Using the CREATE TABLE statement from the source, create the destination:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE imdb.actors (`id` UInt32,
|
CREATE TABLE imdb.actors (`id` UInt32,
|
||||||
`first_name` String,
|
`first_name` String,
|
||||||
@ -140,21 +147,23 @@ This example uses one table from a sample dataset. The database is `imdb`, and
|
|||||||
#### Back on the source deployment:
|
#### Back on the source deployment:
|
||||||
|
|
||||||
Insert into the new database and table created on the remote system. You will need the host, port, username, password, destination database, and destination table.
|
Insert into the new database and table created on the remote system. You will need the host, port, username, password, destination database, and destination table.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
INSERT INTO FUNCTION
|
INSERT INTO FUNCTION
|
||||||
remoteSecure('remote.clickhouse.cloud:9440', 'imdb.actors', 'USER', 'PASSWORD')
|
remoteSecure('remote.clickhouse.cloud:9440', 'imdb.actors', 'USER', 'PASSWORD')
|
||||||
SELECT * from imdb.actors
|
SELECT * from imdb.actors
|
||||||
```
|
```
|
||||||
|
|
||||||
## Globs in Addresses {#globs-in-addresses}
|
## Globbing {#globs-in-addresses}
|
||||||
|
|
||||||
Patterns in curly brackets `{ }` are used to generate a set of shards and to specify replicas. If there are multiple pairs of curly brackets, then the direct product of the corresponding sets is generated.
|
Patterns in curly brackets `{ }` are used to generate a set of shards and to specify replicas. If there are multiple pairs of curly brackets, then the direct product of the corresponding sets is generated.
|
||||||
|
|
||||||
The following pattern types are supported.
|
The following pattern types are supported.
|
||||||
|
|
||||||
- {*a*,*b*} - Any number of variants separated by a comma. The pattern is replaced with *a* in the first shard address and it is replaced with *b* in the second shard address and so on. For instance, `example0{1,2}-1` generates addresses `example01-1` and `example02-1`.
|
- `{a,b,c}` - Represents any of alternative strings `a`, `b` or `c`. The pattern is replaced with `a` in the first shard address and replaced with `b` in the second shard address and so on. For instance, `example0{1,2}-1` generates addresses `example01-1` and `example02-1`.
|
||||||
- {*n*..*m*} - A range of numbers. This pattern generates shard addresses with incrementing indices from *n* to *m*. `example0{1..2}-1` generates `example01-1` and `example02-1`.
|
- `{N..M}` - A range of numbers. This pattern generates shard addresses with incrementing indices from `N` to (and including) `M`. For instance, `example0{1..2}-1` generates `example01-1` and `example02-1`.
|
||||||
- {*0n*..*0m*} - A range of numbers with leading zeroes. This modification preserves leading zeroes in indices. The pattern `example{01..03}-1` generates `example01-1`, `example02-1` and `example03-1`.
|
- `{0n..0m}` - A range of numbers with leading zeroes. This pattern preserves leading zeroes in indices. For instance, `example{01..03}-1` generates `example01-1`, `example02-1` and `example03-1`.
|
||||||
- {*a*|*b*} - Any number of variants separated by a `|`. The pattern specifies replicas. For instance, `example01-{1|2}` generates replicas `example01-1` and `example01-2`.
|
- `{a|b}` - Any number of variants separated by a `|`. The pattern specifies replicas. For instance, `example01-{1|2}` generates replicas `example01-1` and `example01-2`.
|
||||||
|
|
||||||
The query will be sent to the first healthy replica. However, for `remote` the replicas are iterated in the order currently set in the [load_balancing](../../operations/settings/settings.md#settings-load_balancing) setting.
|
The query will be sent to the first healthy replica. However, for `remote` the replicas are iterated in the order currently set in the [load_balancing](../../operations/settings/settings.md#settings-load_balancing) setting.
|
||||||
The number of generated addresses is limited by [table_function_remote_max_addresses](../../operations/settings/settings.md#table_function_remote_max_addresses) setting.
|
The number of generated addresses is limited by [table_function_remote_max_addresses](../../operations/settings/settings.md#table_function_remote_max_addresses) setting.
|
||||||
|
@ -86,14 +86,14 @@ WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
|||||||
|
|
||||||
These functions can be used only as a window function.
|
These functions can be used only as a window function.
|
||||||
|
|
||||||
`row_number()` - Number the current row within its partition starting from 1.
|
- `row_number()` - Number the current row within its partition starting from 1.
|
||||||
`first_value(x)` - Return the first non-NULL value evaluated within its ordered frame.
|
- `first_value(x)` - Return the first non-NULL value evaluated within its ordered frame.
|
||||||
`last_value(x)` - Return the last non-NULL value evaluated within its ordered frame.
|
- `last_value(x)` - Return the last non-NULL value evaluated within its ordered frame.
|
||||||
`nth_value(x, offset)` - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
|
- `nth_value(x, offset)` - Return the first non-NULL value evaluated against the nth row (offset) in its ordered frame.
|
||||||
`rank()` - Rank the current row within its partition with gaps.
|
- `rank()` - Rank the current row within its partition with gaps.
|
||||||
`dense_rank()` - Rank the current row within its partition without gaps.
|
- `dense_rank()` - Rank the current row within its partition without gaps.
|
||||||
`lagInFrame(x)` - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame.
|
- `lagInFrame(x)` - Return a value evaluated at the row that is at a specified physical offset row before the current row within the ordered frame.
|
||||||
`leadInFrame(x)` - Return a value evaluated at the row that is offset rows after the current row within the ordered frame.
|
- `leadInFrame(x)` - Return a value evaluated at the row that is offset rows after the current row within the ordered frame.
|
||||||
|
|
||||||
```text
|
```text
|
||||||
PARTITION
|
PARTITION
|
||||||
|
@ -2,6 +2,3 @@ position: 1
|
|||||||
label: 'Введение'
|
label: 'Введение'
|
||||||
collapsible: true
|
collapsible: true
|
||||||
collapsed: true
|
collapsed: true
|
||||||
link:
|
|
||||||
type: generated-index
|
|
||||||
title: Введение
|
|
||||||
|
13
docs/ru/introduction/index.md
Normal file
13
docs/ru/introduction/index.md
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
slug: /ru/introduction/
|
||||||
|
sidebar_label: "Введение"
|
||||||
|
sidebar_position: 8
|
||||||
|
---
|
||||||
|
|
||||||
|
# Введение
|
||||||
|
|
||||||
|
В этом разделе содержится информация о том, как начать работу с ClickHouse.
|
||||||
|
|
||||||
|
- [Отличительные возможности ClickHouse](./distinctive-features.md)
|
||||||
|
- [Производительность](./performance.md)
|
||||||
|
- [История ClickHouse](./history.md)
|
@ -277,8 +277,10 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
|
|||||||
|
|
||||||
Если `true`, то каждый словарь создаётся при первом использовании. Если словарь не удалось создать, то вызов функции, использующей словарь, сгенерирует исключение.
|
Если `true`, то каждый словарь создаётся при первом использовании. Если словарь не удалось создать, то вызов функции, использующей словарь, сгенерирует исключение.
|
||||||
|
|
||||||
Если `false`, то все словари создаются при старте сервера, если словарь или словари создаются слишком долго или создаются с ошибкой, то сервер загружается без
|
Если `false`, сервер начнет загрузку всех словарей на старте сервера.
|
||||||
этих словарей и продолжает попытки создать эти словари.
|
Словари загружаются в фоне. Сервер не ждет на старте, пока словари закончат загружаться
|
||||||
|
(исключение: если `wait_dictionaries_load_at_startup` установлена в `true` - см. ниже).
|
||||||
|
Когда словарь используется в запросе первый раз, этот запрос будет ждать окончания загрузки словаря, если он еще не загрузился.
|
||||||
|
|
||||||
По умолчанию - `true`.
|
По умолчанию - `true`.
|
||||||
|
|
||||||
@ -1718,6 +1720,24 @@ TCP порт для защищённого обмена данными с кли
|
|||||||
<users_config>users.xml</users_config>
|
<users_config>users.xml</users_config>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## wait_dictionaries_load_at_startup {#wait_dictionaries_load_at_startup}
|
||||||
|
|
||||||
|
Если `false`, то сервер не будет ждать на старте, пока словари закончат загружаться.
|
||||||
|
Это позволяет ClickHouse стартовать быстрее.
|
||||||
|
|
||||||
|
Если `true`, то ClickHouse будет ждать на старте до окончания загрузки всех словарей (успешно или нет)
|
||||||
|
перед тем, как начать принимать соединения.
|
||||||
|
Это может привести к медленному старту ClickHouse, однако после этого некоторые запросы могут выполняться быстрее
|
||||||
|
(потому что им не придется ждать окончания загрузки используемых словарей).
|
||||||
|
|
||||||
|
По умолчанию - `false`.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
``` xml
|
||||||
|
<wait_dictionaries_load_at_startup>false</wait_dictionaries_load_at_startup>
|
||||||
|
```
|
||||||
|
|
||||||
## zookeeper {#server-settings_zookeeper}
|
## zookeeper {#server-settings_zookeeper}
|
||||||
|
|
||||||
Содержит параметры, позволяющие ClickHouse взаимодействовать с кластером [ZooKeeper](http://zookeeper.apache.org/).
|
Содержит параметры, позволяющие ClickHouse взаимодействовать с кластером [ZooKeeper](http://zookeeper.apache.org/).
|
||||||
|
50
docs/ru/operations/utilities/backupview.md
Normal file
50
docs/ru/operations/utilities/backupview.md
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
---
|
||||||
|
slug: /en/operations/utilities/backupview
|
||||||
|
title: clickhouse_backupview
|
||||||
|
---
|
||||||
|
|
||||||
|
# clickhouse_backupview {#clickhouse_backupview}
|
||||||
|
|
||||||
|
Модуль на Питоне для анализа бэкапов, созданных командой [BACKUP](https://clickhouse.com/docs/ru/operations/backup)
|
||||||
|
Главная идея этого модуля была в том, чтобы позволить извлечение информации из бэкапа без выполнения команды RESTORE.
|
||||||
|
|
||||||
|
Этот модуль содержит функции для
|
||||||
|
- получения списка файлов внутри бэкапа
|
||||||
|
- чтения файлов из бэкапа
|
||||||
|
- получения информации в читаемом виде о базах данных, таблицах, партах, содержащихся в бэкапе
|
||||||
|
- проверки целостности бэкапа
|
||||||
|
|
||||||
|
## Пример:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from clickhouse_backupview import open_backup, S3, FileInfo
|
||||||
|
|
||||||
|
# Открыть бэкап. Можно также использовать локальный путь:
|
||||||
|
# backup = open_backup("/backups/my_backup_1/")
|
||||||
|
backup = open_backup(S3("uri", "access_key_id", "secret_access_key"))
|
||||||
|
|
||||||
|
# Получить список баз данных внутри бэкапа.
|
||||||
|
print(backup.get_databases()))
|
||||||
|
|
||||||
|
# Получить список таблиц внутри бэкапа,
|
||||||
|
# и для каждой таблицы получить ее определение а также список партов и партиций.
|
||||||
|
for db in backup.get_databases():
|
||||||
|
for tbl in backup.get_tables(database=db):
|
||||||
|
print(backup.get_create_query(database=db, table=tbl))
|
||||||
|
print(backup.get_partitions(database=db, table=tbl))
|
||||||
|
print(backup.get_parts(database=db, table=tbl))
|
||||||
|
|
||||||
|
# Извлечь все содержимое бэкапа.
|
||||||
|
backup.extract_all(table="mydb.mytable", out='/tmp/my_backup_1/all/')
|
||||||
|
|
||||||
|
# Извлечь данные конкретной таблицы.
|
||||||
|
backup.extract_table_data(table="mydb.mytable", out='/tmp/my_backup_1/mytable/')
|
||||||
|
|
||||||
|
# Извлечь одну партицию из бэкапа.
|
||||||
|
backup.extract_table_data(table="mydb.mytable", partition="202201", out='/tmp/my_backup_1/202201/')
|
||||||
|
|
||||||
|
# Извлечь один парт из бэкапа.
|
||||||
|
backup.extract_table_data(table="mydb.mytable", part="202201_100_200_3", out='/tmp/my_backup_1/202201_100_200_3/')
|
||||||
|
```
|
||||||
|
|
||||||
|
Больше примеров смотрите в [тесте](https://github.com/ClickHouse/ClickHouse/blob/master/utils/backupview/test/test.py).
|
@ -13,3 +13,4 @@ sidebar_position: 56
|
|||||||
- [ClickHouse obfuscator](../../operations/utilities/clickhouse-obfuscator.md) — обфусцирует данные.
|
- [ClickHouse obfuscator](../../operations/utilities/clickhouse-obfuscator.md) — обфусцирует данные.
|
||||||
- [ClickHouse compressor](../../operations/utilities/clickhouse-compressor.md) — упаковывает и распаковывает данные.
|
- [ClickHouse compressor](../../operations/utilities/clickhouse-compressor.md) — упаковывает и распаковывает данные.
|
||||||
- [clickhouse-odbc-bridge](../../operations/utilities/odbc-bridge.md) — прокси-сервер для ODBC.
|
- [clickhouse-odbc-bridge](../../operations/utilities/odbc-bridge.md) — прокси-сервер для ODBC.
|
||||||
|
- [clickhouse_backupview](../../operations/utilities/backupview.md) — модуль на Питоне для анализа бэкапов ClickHouse.
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
add_compile_options($<$<OR:$<COMPILE_LANGUAGE:C>,$<COMPILE_LANGUAGE:CXX>>:${COVERAGE_FLAGS}>)
|
||||||
|
|
||||||
if (USE_CLANG_TIDY)
|
if (USE_CLANG_TIDY)
|
||||||
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
|
||||||
endif ()
|
endif ()
|
||||||
@ -432,6 +434,11 @@ if (USE_BINARY_HASH)
|
|||||||
add_custom_command(TARGET clickhouse POST_BUILD COMMAND ./clickhouse hash-binary > hash && ${OBJCOPY_PATH} --add-section .clickhouse.hash=hash clickhouse COMMENT "Adding section '.clickhouse.hash' to clickhouse binary" VERBATIM)
|
add_custom_command(TARGET clickhouse POST_BUILD COMMAND ./clickhouse hash-binary > hash && ${OBJCOPY_PATH} --add-section .clickhouse.hash=hash clickhouse COMMENT "Adding section '.clickhouse.hash' to clickhouse binary" VERBATIM)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (CHECK_LARGE_OBJECT_SIZES)
|
||||||
|
add_custom_command(TARGET clickhouse POST_BUILD
|
||||||
|
COMMAND "${CMAKE_SOURCE_DIR}/utils/check-style/check-large-objects.sh" "${CMAKE_BINARY_DIR}")
|
||||||
|
endif ()
|
||||||
|
|
||||||
if (SPLIT_DEBUG_SYMBOLS)
|
if (SPLIT_DEBUG_SYMBOLS)
|
||||||
clickhouse_split_debug_symbols(TARGET clickhouse DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH clickhouse)
|
clickhouse_split_debug_symbols(TARGET clickhouse DESTINATION_DIR ${CMAKE_CURRENT_BINARY_DIR}/${SPLITTED_DEBUG_SYMBOLS_DIR} BINARY_PATH clickhouse)
|
||||||
else()
|
else()
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user