mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge branch 'master' into keeper-batch-flushes
This commit is contained in:
commit
59557ddd7c
3
.github/workflows/backport_branches.yml
vendored
3
.github/workflows/backport_branches.yml
vendored
@ -3,6 +3,9 @@ name: BackportPR
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
# Export system tables to ClickHouse Cloud
|
||||
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
|
||||
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
|
46
.github/workflows/master.yml
vendored
46
.github/workflows/master.yml
vendored
@ -3,6 +3,9 @@ name: MasterCI
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
# Export system tables to ClickHouse Cloud
|
||||
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
|
||||
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
@ -892,6 +895,48 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinS390X:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_s390x
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
@ -975,6 +1020,7 @@ jobs:
|
||||
- BuilderBinFreeBSD
|
||||
- BuilderBinPPC64
|
||||
- BuilderBinRISCV64
|
||||
- BuilderBinS390X
|
||||
- BuilderBinAmd64Compat
|
||||
- BuilderBinAarch64V80Compat
|
||||
- BuilderBinClangTidy
|
||||
|
81
.github/workflows/pull_request.yml
vendored
81
.github/workflows/pull_request.yml
vendored
@ -3,6 +3,9 @@ name: PullRequestCI
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
# Export system tables to ClickHouse Cloud
|
||||
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
|
||||
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
pull_request:
|
||||
@ -952,6 +955,47 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinS390X:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_s390x
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
@ -1034,6 +1078,7 @@ jobs:
|
||||
- BuilderBinFreeBSD
|
||||
- BuilderBinPPC64
|
||||
- BuilderBinRISCV64
|
||||
- BuilderBinS390X
|
||||
- BuilderBinAmd64Compat
|
||||
- BuilderBinAarch64V80Compat
|
||||
- BuilderBinClangTidy
|
||||
@ -5182,3 +5227,39 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
##############################################################################################
|
||||
##################################### SQL TEST ###############################################
|
||||
##############################################################################################
|
||||
SQLTest:
|
||||
needs: [BuilderDebRelease]
|
||||
runs-on: [self-hosted, fuzzer-unit-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/sqltest
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
CHECK_NAME=SQLTest
|
||||
REPO_COPY=${{runner.temp}}/sqltest/ClickHouse
|
||||
EOF
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: SQLTest
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci"
|
||||
python3 sqltest.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
|
3
.github/workflows/release_branches.yml
vendored
3
.github/workflows/release_branches.yml
vendored
@ -3,6 +3,9 @@ name: ReleaseBranchCI
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
# Export system tables to ClickHouse Cloud
|
||||
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
|
||||
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
|
12
.gitmodules
vendored
12
.gitmodules
vendored
@ -347,3 +347,15 @@
|
||||
[submodule "contrib/incbin"]
|
||||
path = contrib/incbin
|
||||
url = https://github.com/graphitemaster/incbin.git
|
||||
[submodule "contrib/usearch"]
|
||||
path = contrib/usearch
|
||||
url = https://github.com/unum-cloud/usearch.git
|
||||
[submodule "contrib/SimSIMD"]
|
||||
path = contrib/SimSIMD
|
||||
url = https://github.com/ashvardanian/SimSIMD.git
|
||||
[submodule "contrib/FP16"]
|
||||
path = contrib/FP16
|
||||
url = https://github.com/Maratyszcza/FP16.git
|
||||
[submodule "contrib/robin-map"]
|
||||
path = contrib/robin-map
|
||||
url = https://github.com/Tessil/robin-map.git
|
||||
|
@ -52,7 +52,6 @@
|
||||
* Add new setting `disable_url_encoding` that allows to disable decoding/encoding path in uri in URL engine. [#52337](https://github.com/ClickHouse/ClickHouse/pull/52337) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Writing parquet files is 10x faster, it's multi-threaded now. Almost the same speed as reading. [#49367](https://github.com/ClickHouse/ClickHouse/pull/49367) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Enable automatic selection of the sparse serialization format by default. It improves performance. The format is supported since version 22.1. After this change, downgrading to versions older than 22.1 might not be possible. You can turn off the usage of the sparse serialization format by providing the `ratio_of_defaults_for_sparse_serialization = 1` setting for your MergeTree tables. [#49631](https://github.com/ClickHouse/ClickHouse/pull/49631) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Enable `move_all_conditions_to_prewhere` and `enable_multiple_prewhere_read_steps` settings by default. [#46365](https://github.com/ClickHouse/ClickHouse/pull/46365) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Improves performance of some queries by tuning allocator. [#46416](https://github.com/ClickHouse/ClickHouse/pull/46416) ([Azat Khuzhin](https://github.com/azat)).
|
||||
@ -114,6 +113,7 @@
|
||||
* Now interserver port will be closed only after tables are shut down. [#52498](https://github.com/ClickHouse/ClickHouse/pull/52498) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Writing parquet files is 10x faster, it's multi-threaded now. Almost the same speed as reading. [#49367](https://github.com/ClickHouse/ClickHouse/pull/49367) ([Michael Kolupaev](https://github.com/al13n321)). This is controlled by the setting `output_format_parquet_use_custom_encoder` which is disabled by default, because the feature is non-ideal.
|
||||
* Added support for [PRQL](https://prql-lang.org/) as a query language. [#50686](https://github.com/ClickHouse/ClickHouse/pull/50686) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Allow to add disk name for custom disks. Previously custom disks would use an internal generated disk name. Now it will be possible with `disk = disk_<name>(...)` (e.g. disk will have name `name`) . [#51552](https://github.com/ClickHouse/ClickHouse/pull/51552) ([Kseniia Sumarokova](https://github.com/kssenii)). This syntax can be changed in this release.
|
||||
* (experimental MaterializedMySQL) Fixed crash when `mysqlxx::Pool::Entry` is used after it was disconnected. [#52063](https://github.com/ClickHouse/ClickHouse/pull/52063) ([Val Doroshchuk](https://github.com/valbok)).
|
||||
|
@ -208,9 +208,6 @@ option(OMIT_HEAVY_DEBUG_SYMBOLS
|
||||
"Do not generate debugger info for heavy modules (ClickHouse functions and dictionaries, some contrib)"
|
||||
${OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT})
|
||||
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
set(USE_DEBUG_HELPERS ON)
|
||||
endif()
|
||||
option(USE_DEBUG_HELPERS "Enable debug helpers" ${USE_DEBUG_HELPERS})
|
||||
|
||||
option(BUILD_STANDALONE_KEEPER "Build keeper as small standalone binary" OFF)
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <magic_enum.hpp>
|
||||
#include <fmt/format.h>
|
||||
|
||||
|
||||
template <class T> concept is_enum = std::is_enum_v<T>;
|
||||
|
||||
namespace detail
|
||||
|
@ -7,8 +7,6 @@
|
||||
#include <base/find_symbols.h>
|
||||
#include <base/preciseExp10.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#define JSON_MAX_DEPTH 100
|
||||
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <base/defines.h>
|
||||
#include <base/types.h>
|
||||
#include <base/unaligned.h>
|
||||
#include <base/simd.h>
|
||||
|
||||
#include <city.h>
|
||||
|
||||
@ -29,6 +30,11 @@
|
||||
#define CRC_INT __crc32cd
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__) && defined(__ARM_NEON)
|
||||
#include <arm_neon.h>
|
||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* The std::string_view-like container to avoid creating strings to find substrings in the hash table.
|
||||
@ -74,14 +80,14 @@ using StringRefs = std::vector<StringRef>;
|
||||
* For more information, see hash_map_string_2.cpp
|
||||
*/
|
||||
|
||||
inline bool compareSSE2(const char * p1, const char * p2)
|
||||
inline bool compare8(const char * p1, const char * p2)
|
||||
{
|
||||
return 0xFFFF == _mm_movemask_epi8(_mm_cmpeq_epi8(
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i *>(p1)),
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i *>(p2))));
|
||||
}
|
||||
|
||||
inline bool compareSSE2x4(const char * p1, const char * p2)
|
||||
inline bool compare64(const char * p1, const char * p2)
|
||||
{
|
||||
return 0xFFFF == _mm_movemask_epi8(
|
||||
_mm_and_si128(
|
||||
@ -101,7 +107,30 @@ inline bool compareSSE2x4(const char * p1, const char * p2)
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i *>(p2) + 3)))));
|
||||
}
|
||||
|
||||
inline bool memequalSSE2Wide(const char * p1, const char * p2, size_t size)
|
||||
#elif defined(__aarch64__) && defined(__ARM_NEON)
|
||||
|
||||
inline bool compare8(const char * p1, const char * p2)
|
||||
{
|
||||
uint64_t mask = getNibbleMask(vceqq_u8(
|
||||
vld1q_u8(reinterpret_cast<const unsigned char *>(p1)), vld1q_u8(reinterpret_cast<const unsigned char *>(p2))));
|
||||
return 0xFFFFFFFFFFFFFFFF == mask;
|
||||
}
|
||||
|
||||
inline bool compare64(const char * p1, const char * p2)
|
||||
{
|
||||
uint64_t mask = getNibbleMask(vandq_u8(
|
||||
vandq_u8(vceqq_u8(vld1q_u8(reinterpret_cast<const unsigned char *>(p1)), vld1q_u8(reinterpret_cast<const unsigned char *>(p2))),
|
||||
vceqq_u8(vld1q_u8(reinterpret_cast<const unsigned char *>(p1 + 16)), vld1q_u8(reinterpret_cast<const unsigned char *>(p2 + 16)))),
|
||||
vandq_u8(vceqq_u8(vld1q_u8(reinterpret_cast<const unsigned char *>(p1 + 32)), vld1q_u8(reinterpret_cast<const unsigned char *>(p2 + 32))),
|
||||
vceqq_u8(vld1q_u8(reinterpret_cast<const unsigned char *>(p1 + 48)), vld1q_u8(reinterpret_cast<const unsigned char *>(p2 + 48))))));
|
||||
return 0xFFFFFFFFFFFFFFFF == mask;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(__SSE2__) || (defined(__aarch64__) && defined(__ARM_NEON))
|
||||
|
||||
inline bool memequalWide(const char * p1, const char * p2, size_t size)
|
||||
{
|
||||
/** The order of branches and the trick with overlapping comparisons
|
||||
* are the same as in memcpy implementation.
|
||||
@ -138,7 +167,7 @@ inline bool memequalSSE2Wide(const char * p1, const char * p2, size_t size)
|
||||
|
||||
while (size >= 64)
|
||||
{
|
||||
if (compareSSE2x4(p1, p2))
|
||||
if (compare64(p1, p2))
|
||||
{
|
||||
p1 += 64;
|
||||
p2 += 64;
|
||||
@ -150,17 +179,16 @@ inline bool memequalSSE2Wide(const char * p1, const char * p2, size_t size)
|
||||
|
||||
switch (size / 16)
|
||||
{
|
||||
case 3: if (!compareSSE2(p1 + 32, p2 + 32)) return false; [[fallthrough]];
|
||||
case 2: if (!compareSSE2(p1 + 16, p2 + 16)) return false; [[fallthrough]];
|
||||
case 1: if (!compareSSE2(p1, p2)) return false;
|
||||
case 3: if (!compare8(p1 + 32, p2 + 32)) return false; [[fallthrough]];
|
||||
case 2: if (!compare8(p1 + 16, p2 + 16)) return false; [[fallthrough]];
|
||||
case 1: if (!compare8(p1, p2)) return false;
|
||||
}
|
||||
|
||||
return compareSSE2(p1 + size - 16, p2 + size - 16);
|
||||
return compare8(p1 + size - 16, p2 + size - 16);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
inline bool operator== (StringRef lhs, StringRef rhs)
|
||||
{
|
||||
if (lhs.size != rhs.size)
|
||||
@ -169,8 +197,8 @@ inline bool operator== (StringRef lhs, StringRef rhs)
|
||||
if (lhs.size == 0)
|
||||
return true;
|
||||
|
||||
#if defined(__SSE2__)
|
||||
return memequalSSE2Wide(lhs.data, rhs.data, lhs.size);
|
||||
#if defined(__SSE2__) || (defined(__aarch64__) && defined(__ARM_NEON))
|
||||
return memequalWide(lhs.data, rhs.data, lhs.size);
|
||||
#else
|
||||
return 0 == memcmp(lhs.data, rhs.data, lhs.size);
|
||||
#endif
|
||||
|
14
base/base/simd.h
Normal file
14
base/base/simd.h
Normal file
@ -0,0 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#if defined(__aarch64__) && defined(__ARM_NEON)
|
||||
|
||||
# include <arm_neon.h>
|
||||
# pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
|
||||
/// Returns a 64 bit mask of nibbles (4 bits for each byte).
|
||||
inline uint64_t getNibbleMask(uint8x16_t res)
|
||||
{
|
||||
return vget_lane_u64(vreinterpret_u64_u8(vshrn_n_u16(vreinterpretq_u16_u8(res), 4)), 0);
|
||||
}
|
||||
|
||||
#endif
|
@ -12,7 +12,6 @@
|
||||
#include <tuple>
|
||||
#include <limits>
|
||||
|
||||
#include <boost/multiprecision/cpp_bin_float.hpp>
|
||||
#include <boost/math/special_functions/fpclassify.hpp>
|
||||
|
||||
// NOLINTBEGIN(*)
|
||||
@ -22,6 +21,7 @@
|
||||
#define CONSTEXPR_FROM_DOUBLE constexpr
|
||||
using FromDoubleIntermediateType = long double;
|
||||
#else
|
||||
#include <boost/multiprecision/cpp_bin_float.hpp>
|
||||
/// `wide_integer_from_builtin` can't be constexpr with non-literal `cpp_bin_float_double_extended`
|
||||
#define CONSTEXPR_FROM_DOUBLE
|
||||
using FromDoubleIntermediateType = boost::multiprecision::cpp_bin_float_double_extended;
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include "Poco/UTF16Encoding.h"
|
||||
#include "Poco/Buffer.h"
|
||||
#include "Poco/Exception.h"
|
||||
#include <iostream>
|
||||
|
||||
|
||||
using Poco::Buffer;
|
||||
|
@ -97,7 +97,7 @@ namespace Data
|
||||
///
|
||||
/// static void extract(std::size_t pos, Person& obj, const Person& defVal, AbstractExtractor::Ptr pExt)
|
||||
/// {
|
||||
/// // defVal is the default person we should use if we encunter NULL entries, so we take the individual fields
|
||||
/// // defVal is the default person we should use if we encounter NULL entries, so we take the individual fields
|
||||
/// // as defaults. You can do more complex checking, ie return defVal if only one single entry of the fields is null etc...
|
||||
/// poco_assert_dbg (!pExt.isNull());
|
||||
/// std::string lastName;
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include "Poco/TaskManager.h"
|
||||
#include "Poco/Exception.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <array>
|
||||
|
||||
|
||||
|
@ -14,7 +14,6 @@
|
||||
|
||||
#include "Poco/JSON/Object.h"
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
|
||||
|
||||
using Poco::Dynamic::Var;
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include "Poco/CountingStream.h"
|
||||
#include "Poco/RegularExpression.h"
|
||||
#include <sstream>
|
||||
#include <iostream>
|
||||
|
||||
|
||||
using Poco::NumberFormatter;
|
||||
|
@ -146,7 +146,7 @@ namespace Net
|
||||
|
||||
std::string cipherList;
|
||||
/// Specifies the supported ciphers in OpenSSL notation.
|
||||
/// Defaults to "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH".
|
||||
/// Defaults to "ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH".
|
||||
|
||||
std::string dhParamsFile;
|
||||
/// Specifies a file containing Diffie-Hellman parameters.
|
||||
@ -172,7 +172,7 @@ namespace Net
|
||||
VerificationMode verificationMode = VERIFY_RELAXED,
|
||||
int verificationDepth = 9,
|
||||
bool loadDefaultCAs = false,
|
||||
const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH");
|
||||
const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH");
|
||||
/// Creates a Context.
|
||||
///
|
||||
/// * usage specifies whether the context is used by a client or server.
|
||||
@ -200,7 +200,7 @@ namespace Net
|
||||
VerificationMode verificationMode = VERIFY_RELAXED,
|
||||
int verificationDepth = 9,
|
||||
bool loadDefaultCAs = false,
|
||||
const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH");
|
||||
const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH");
|
||||
/// Creates a Context.
|
||||
///
|
||||
/// * usage specifies whether the context is used by a client or server.
|
||||
|
@ -76,7 +76,7 @@ namespace Net
|
||||
/// <verificationMode>none|relaxed|strict|once</verificationMode>
|
||||
/// <verificationDepth>1..9</verificationDepth>
|
||||
/// <loadDefaultCAFile>true|false</loadDefaultCAFile>
|
||||
/// <cipherList>ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH</cipherList>
|
||||
/// <cipherList>ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH</cipherList>
|
||||
/// <preferServerCiphers>true|false</preferServerCiphers>
|
||||
/// <privateKeyPassphraseHandler>
|
||||
/// <name>KeyFileHandler</name>
|
||||
|
@ -41,7 +41,7 @@ Context::Params::Params():
|
||||
verificationMode(VERIFY_RELAXED),
|
||||
verificationDepth(9),
|
||||
loadDefaultCAs(false),
|
||||
cipherList("ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH")
|
||||
cipherList("ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH")
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,9 @@ set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/s390x-linux-gnu/libc")
|
||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
|
||||
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
|
||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
|
||||
|
||||
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
|
||||
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
|
||||
|
@ -47,7 +47,7 @@ if (CMAKE_CROSSCOMPILING)
|
||||
set (ENABLE_RUST OFF CACHE INTERNAL "")
|
||||
elseif (ARCH_S390X)
|
||||
set (ENABLE_GRPC OFF CACHE INTERNAL "")
|
||||
set (ENABLE_SENTRY OFF CACHE INTERNAL "")
|
||||
set (ENABLE_RUST OFF CACHE INTERNAL "")
|
||||
endif ()
|
||||
elseif (OS_FREEBSD)
|
||||
# FIXME: broken dependencies
|
||||
|
11
contrib/CMakeLists.txt
vendored
11
contrib/CMakeLists.txt
vendored
@ -196,6 +196,17 @@ if (ARCH_S390X)
|
||||
add_contrib(crc32-s390x-cmake crc32-s390x)
|
||||
endif()
|
||||
add_contrib (annoy-cmake annoy)
|
||||
|
||||
option(ENABLE_USEARCH "Enable USearch (Approximate Neighborhood Search, HNSW) support" ${ENABLE_LIBRARIES})
|
||||
if (ENABLE_USEARCH)
|
||||
add_contrib (FP16-cmake FP16)
|
||||
add_contrib (robin-map-cmake robin-map)
|
||||
add_contrib (SimSIMD-cmake SimSIMD)
|
||||
add_contrib (usearch-cmake usearch) # requires: FP16, robin-map, SimdSIMD
|
||||
else ()
|
||||
message(STATUS "Not using USearch")
|
||||
endif ()
|
||||
|
||||
add_contrib (xxHash-cmake xxHash)
|
||||
|
||||
add_contrib (libbcrypt-cmake libbcrypt)
|
||||
|
1
contrib/FP16
vendored
Submodule
1
contrib/FP16
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 0a92994d729ff76a58f692d3028ca1b64b145d91
|
1
contrib/FP16-cmake/CMakeLists.txt
Normal file
1
contrib/FP16-cmake/CMakeLists.txt
Normal file
@ -0,0 +1 @@
|
||||
# See contrib/usearch-cmake/CMakeLists.txt
|
1
contrib/SimSIMD
vendored
Submodule
1
contrib/SimSIMD
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit de2cb75b9e9e3389d5e1e51fd9f8ed151f3c17cf
|
1
contrib/SimSIMD-cmake/CMakeLists.txt
Normal file
1
contrib/SimSIMD-cmake/CMakeLists.txt
Normal file
@ -0,0 +1 @@
|
||||
# See contrib/usearch-cmake/CMakeLists.txt
|
2
contrib/base64
vendored
2
contrib/base64
vendored
@ -1 +1 @@
|
||||
Subproject commit 9499e0c4945589973b9ea1bc927377cfbc84aa46
|
||||
Subproject commit 8628e258090f9eb76d90ac3c91e1ab4690e9aa11
|
2
contrib/boost
vendored
2
contrib/boost
vendored
@ -1 +1 @@
|
||||
Subproject commit aec12eea7fc762721ae16943d1361340c66c9c17
|
||||
Subproject commit 063a9372b4ae304e869a5c5724971d0501552731
|
@ -19,6 +19,12 @@ add_library (_boost_filesystem ${SRCS_FILESYSTEM})
|
||||
add_library (boost::filesystem ALIAS _boost_filesystem)
|
||||
target_include_directories (_boost_filesystem SYSTEM BEFORE PUBLIC ${LIBRARY_DIR})
|
||||
|
||||
if (OS_LINUX)
|
||||
target_compile_definitions (_boost_filesystem PRIVATE
|
||||
BOOST_FILESYSTEM_HAS_POSIX_AT_APIS=1
|
||||
)
|
||||
endif ()
|
||||
|
||||
# headers-only
|
||||
|
||||
add_library (_boost_headers_only INTERFACE)
|
||||
@ -172,9 +178,9 @@ endif()
|
||||
# coroutine
|
||||
|
||||
set (SRCS_COROUTINE
|
||||
"${LIBRARY_DIR}/libs/coroutine/detail/coroutine_context.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/exceptions.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/posix/stack_traits.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/src/detail/coroutine_context.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/src/exceptions.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/src/posix/stack_traits.cpp"
|
||||
)
|
||||
add_library (_boost_coroutine ${SRCS_COROUTINE})
|
||||
add_library (boost::coroutine ALIAS _boost_coroutine)
|
||||
|
@ -73,8 +73,8 @@ struct uint128
|
||||
|
||||
uint128() = default;
|
||||
uint128(uint64 low64_, uint64 high64_) : low64(low64_), high64(high64_) {}
|
||||
friend bool operator ==(const uint128 & x, const uint128 & y) { return (x.low64 == y.low64) && (x.high64 == y.high64); }
|
||||
friend bool operator !=(const uint128 & x, const uint128 & y) { return !(x == y); }
|
||||
|
||||
friend auto operator<=>(const uint128 &, const uint128 &) = default;
|
||||
};
|
||||
|
||||
inline uint64 Uint128Low64(const uint128 & x) { return x.low64; }
|
||||
|
2
contrib/curl
vendored
2
contrib/curl
vendored
@ -1 +1 @@
|
||||
Subproject commit b0edf0b7dae44d9e66f270a257cf654b35d5263d
|
||||
Subproject commit eb3b049df526bf125eda23218e680ce7fa9ec46c
|
@ -8,125 +8,122 @@ endif()
|
||||
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/curl")
|
||||
|
||||
set (SRCS
|
||||
"${LIBRARY_DIR}/lib/fopen.c"
|
||||
"${LIBRARY_DIR}/lib/noproxy.c"
|
||||
"${LIBRARY_DIR}/lib/idn.c"
|
||||
"${LIBRARY_DIR}/lib/cfilters.c"
|
||||
"${LIBRARY_DIR}/lib/cf-socket.c"
|
||||
"${LIBRARY_DIR}/lib/altsvc.c"
|
||||
"${LIBRARY_DIR}/lib/amigaos.c"
|
||||
"${LIBRARY_DIR}/lib/asyn-thread.c"
|
||||
"${LIBRARY_DIR}/lib/base64.c"
|
||||
"${LIBRARY_DIR}/lib/bufq.c"
|
||||
"${LIBRARY_DIR}/lib/bufref.c"
|
||||
"${LIBRARY_DIR}/lib/cf-h1-proxy.c"
|
||||
"${LIBRARY_DIR}/lib/cf-haproxy.c"
|
||||
"${LIBRARY_DIR}/lib/cf-https-connect.c"
|
||||
"${LIBRARY_DIR}/lib/file.c"
|
||||
"${LIBRARY_DIR}/lib/timeval.c"
|
||||
"${LIBRARY_DIR}/lib/base64.c"
|
||||
"${LIBRARY_DIR}/lib/hostip.c"
|
||||
"${LIBRARY_DIR}/lib/progress.c"
|
||||
"${LIBRARY_DIR}/lib/formdata.c"
|
||||
"${LIBRARY_DIR}/lib/cookie.c"
|
||||
"${LIBRARY_DIR}/lib/http.c"
|
||||
"${LIBRARY_DIR}/lib/sendf.c"
|
||||
"${LIBRARY_DIR}/lib/url.c"
|
||||
"${LIBRARY_DIR}/lib/dict.c"
|
||||
"${LIBRARY_DIR}/lib/if2ip.c"
|
||||
"${LIBRARY_DIR}/lib/speedcheck.c"
|
||||
"${LIBRARY_DIR}/lib/ldap.c"
|
||||
"${LIBRARY_DIR}/lib/version.c"
|
||||
"${LIBRARY_DIR}/lib/getenv.c"
|
||||
"${LIBRARY_DIR}/lib/escape.c"
|
||||
"${LIBRARY_DIR}/lib/mprintf.c"
|
||||
"${LIBRARY_DIR}/lib/telnet.c"
|
||||
"${LIBRARY_DIR}/lib/netrc.c"
|
||||
"${LIBRARY_DIR}/lib/getinfo.c"
|
||||
"${LIBRARY_DIR}/lib/transfer.c"
|
||||
"${LIBRARY_DIR}/lib/strcase.c"
|
||||
"${LIBRARY_DIR}/lib/easy.c"
|
||||
"${LIBRARY_DIR}/lib/curl_fnmatch.c"
|
||||
"${LIBRARY_DIR}/lib/curl_log.c"
|
||||
"${LIBRARY_DIR}/lib/fileinfo.c"
|
||||
"${LIBRARY_DIR}/lib/krb5.c"
|
||||
"${LIBRARY_DIR}/lib/memdebug.c"
|
||||
"${LIBRARY_DIR}/lib/http_chunks.c"
|
||||
"${LIBRARY_DIR}/lib/strtok.c"
|
||||
"${LIBRARY_DIR}/lib/cf-socket.c"
|
||||
"${LIBRARY_DIR}/lib/cfilters.c"
|
||||
"${LIBRARY_DIR}/lib/conncache.c"
|
||||
"${LIBRARY_DIR}/lib/connect.c"
|
||||
"${LIBRARY_DIR}/lib/llist.c"
|
||||
"${LIBRARY_DIR}/lib/hash.c"
|
||||
"${LIBRARY_DIR}/lib/multi.c"
|
||||
"${LIBRARY_DIR}/lib/content_encoding.c"
|
||||
"${LIBRARY_DIR}/lib/share.c"
|
||||
"${LIBRARY_DIR}/lib/http_digest.c"
|
||||
"${LIBRARY_DIR}/lib/md4.c"
|
||||
"${LIBRARY_DIR}/lib/md5.c"
|
||||
"${LIBRARY_DIR}/lib/http_negotiate.c"
|
||||
"${LIBRARY_DIR}/lib/inet_pton.c"
|
||||
"${LIBRARY_DIR}/lib/strtoofft.c"
|
||||
"${LIBRARY_DIR}/lib/strerror.c"
|
||||
"${LIBRARY_DIR}/lib/amigaos.c"
|
||||
"${LIBRARY_DIR}/lib/cookie.c"
|
||||
"${LIBRARY_DIR}/lib/curl_addrinfo.c"
|
||||
"${LIBRARY_DIR}/lib/curl_des.c"
|
||||
"${LIBRARY_DIR}/lib/curl_endian.c"
|
||||
"${LIBRARY_DIR}/lib/curl_fnmatch.c"
|
||||
"${LIBRARY_DIR}/lib/curl_get_line.c"
|
||||
"${LIBRARY_DIR}/lib/curl_gethostname.c"
|
||||
"${LIBRARY_DIR}/lib/curl_gssapi.c"
|
||||
"${LIBRARY_DIR}/lib/curl_memrchr.c"
|
||||
"${LIBRARY_DIR}/lib/curl_multibyte.c"
|
||||
"${LIBRARY_DIR}/lib/curl_ntlm_core.c"
|
||||
"${LIBRARY_DIR}/lib/curl_ntlm_wb.c"
|
||||
"${LIBRARY_DIR}/lib/curl_path.c"
|
||||
"${LIBRARY_DIR}/lib/curl_range.c"
|
||||
"${LIBRARY_DIR}/lib/curl_rtmp.c"
|
||||
"${LIBRARY_DIR}/lib/curl_sasl.c"
|
||||
"${LIBRARY_DIR}/lib/curl_sspi.c"
|
||||
"${LIBRARY_DIR}/lib/curl_threads.c"
|
||||
"${LIBRARY_DIR}/lib/curl_trc.c"
|
||||
"${LIBRARY_DIR}/lib/dict.c"
|
||||
"${LIBRARY_DIR}/lib/doh.c"
|
||||
"${LIBRARY_DIR}/lib/dynbuf.c"
|
||||
"${LIBRARY_DIR}/lib/dynhds.c"
|
||||
"${LIBRARY_DIR}/lib/easy.c"
|
||||
"${LIBRARY_DIR}/lib/escape.c"
|
||||
"${LIBRARY_DIR}/lib/file.c"
|
||||
"${LIBRARY_DIR}/lib/fileinfo.c"
|
||||
"${LIBRARY_DIR}/lib/fopen.c"
|
||||
"${LIBRARY_DIR}/lib/formdata.c"
|
||||
"${LIBRARY_DIR}/lib/getenv.c"
|
||||
"${LIBRARY_DIR}/lib/getinfo.c"
|
||||
"${LIBRARY_DIR}/lib/gopher.c"
|
||||
"${LIBRARY_DIR}/lib/hash.c"
|
||||
"${LIBRARY_DIR}/lib/headers.c"
|
||||
"${LIBRARY_DIR}/lib/hmac.c"
|
||||
"${LIBRARY_DIR}/lib/hostasyn.c"
|
||||
"${LIBRARY_DIR}/lib/hostip.c"
|
||||
"${LIBRARY_DIR}/lib/hostip4.c"
|
||||
"${LIBRARY_DIR}/lib/hostip6.c"
|
||||
"${LIBRARY_DIR}/lib/hostsyn.c"
|
||||
"${LIBRARY_DIR}/lib/hsts.c"
|
||||
"${LIBRARY_DIR}/lib/http.c"
|
||||
"${LIBRARY_DIR}/lib/http2.c"
|
||||
"${LIBRARY_DIR}/lib/http_aws_sigv4.c"
|
||||
"${LIBRARY_DIR}/lib/http_chunks.c"
|
||||
"${LIBRARY_DIR}/lib/http_digest.c"
|
||||
"${LIBRARY_DIR}/lib/http_negotiate.c"
|
||||
"${LIBRARY_DIR}/lib/http_ntlm.c"
|
||||
"${LIBRARY_DIR}/lib/http_proxy.c"
|
||||
"${LIBRARY_DIR}/lib/idn.c"
|
||||
"${LIBRARY_DIR}/lib/if2ip.c"
|
||||
"${LIBRARY_DIR}/lib/imap.c"
|
||||
"${LIBRARY_DIR}/lib/inet_ntop.c"
|
||||
"${LIBRARY_DIR}/lib/inet_pton.c"
|
||||
"${LIBRARY_DIR}/lib/krb5.c"
|
||||
"${LIBRARY_DIR}/lib/ldap.c"
|
||||
"${LIBRARY_DIR}/lib/llist.c"
|
||||
"${LIBRARY_DIR}/lib/md4.c"
|
||||
"${LIBRARY_DIR}/lib/md5.c"
|
||||
"${LIBRARY_DIR}/lib/memdebug.c"
|
||||
"${LIBRARY_DIR}/lib/mime.c"
|
||||
"${LIBRARY_DIR}/lib/mprintf.c"
|
||||
"${LIBRARY_DIR}/lib/mqtt.c"
|
||||
"${LIBRARY_DIR}/lib/multi.c"
|
||||
"${LIBRARY_DIR}/lib/netrc.c"
|
||||
"${LIBRARY_DIR}/lib/nonblock.c"
|
||||
"${LIBRARY_DIR}/lib/noproxy.c"
|
||||
"${LIBRARY_DIR}/lib/openldap.c"
|
||||
"${LIBRARY_DIR}/lib/parsedate.c"
|
||||
"${LIBRARY_DIR}/lib/pingpong.c"
|
||||
"${LIBRARY_DIR}/lib/pop3.c"
|
||||
"${LIBRARY_DIR}/lib/progress.c"
|
||||
"${LIBRARY_DIR}/lib/psl.c"
|
||||
"${LIBRARY_DIR}/lib/rand.c"
|
||||
"${LIBRARY_DIR}/lib/rename.c"
|
||||
"${LIBRARY_DIR}/lib/rtsp.c"
|
||||
"${LIBRARY_DIR}/lib/select.c"
|
||||
"${LIBRARY_DIR}/lib/splay.c"
|
||||
"${LIBRARY_DIR}/lib/strdup.c"
|
||||
"${LIBRARY_DIR}/lib/sendf.c"
|
||||
"${LIBRARY_DIR}/lib/setopt.c"
|
||||
"${LIBRARY_DIR}/lib/sha256.c"
|
||||
"${LIBRARY_DIR}/lib/share.c"
|
||||
"${LIBRARY_DIR}/lib/slist.c"
|
||||
"${LIBRARY_DIR}/lib/smb.c"
|
||||
"${LIBRARY_DIR}/lib/smtp.c"
|
||||
"${LIBRARY_DIR}/lib/socketpair.c"
|
||||
"${LIBRARY_DIR}/lib/socks.c"
|
||||
"${LIBRARY_DIR}/lib/curl_addrinfo.c"
|
||||
"${LIBRARY_DIR}/lib/socks_gssapi.c"
|
||||
"${LIBRARY_DIR}/lib/socks_sspi.c"
|
||||
"${LIBRARY_DIR}/lib/curl_sspi.c"
|
||||
"${LIBRARY_DIR}/lib/slist.c"
|
||||
"${LIBRARY_DIR}/lib/nonblock.c"
|
||||
"${LIBRARY_DIR}/lib/curl_memrchr.c"
|
||||
"${LIBRARY_DIR}/lib/imap.c"
|
||||
"${LIBRARY_DIR}/lib/pop3.c"
|
||||
"${LIBRARY_DIR}/lib/smtp.c"
|
||||
"${LIBRARY_DIR}/lib/pingpong.c"
|
||||
"${LIBRARY_DIR}/lib/rtsp.c"
|
||||
"${LIBRARY_DIR}/lib/curl_threads.c"
|
||||
"${LIBRARY_DIR}/lib/warnless.c"
|
||||
"${LIBRARY_DIR}/lib/hmac.c"
|
||||
"${LIBRARY_DIR}/lib/curl_rtmp.c"
|
||||
"${LIBRARY_DIR}/lib/openldap.c"
|
||||
"${LIBRARY_DIR}/lib/curl_gethostname.c"
|
||||
"${LIBRARY_DIR}/lib/gopher.c"
|
||||
"${LIBRARY_DIR}/lib/http_proxy.c"
|
||||
"${LIBRARY_DIR}/lib/asyn-thread.c"
|
||||
"${LIBRARY_DIR}/lib/curl_gssapi.c"
|
||||
"${LIBRARY_DIR}/lib/http_ntlm.c"
|
||||
"${LIBRARY_DIR}/lib/curl_ntlm_wb.c"
|
||||
"${LIBRARY_DIR}/lib/curl_ntlm_core.c"
|
||||
"${LIBRARY_DIR}/lib/curl_sasl.c"
|
||||
"${LIBRARY_DIR}/lib/rand.c"
|
||||
"${LIBRARY_DIR}/lib/curl_multibyte.c"
|
||||
"${LIBRARY_DIR}/lib/conncache.c"
|
||||
"${LIBRARY_DIR}/lib/cf-h1-proxy.c"
|
||||
"${LIBRARY_DIR}/lib/http2.c"
|
||||
"${LIBRARY_DIR}/lib/smb.c"
|
||||
"${LIBRARY_DIR}/lib/curl_endian.c"
|
||||
"${LIBRARY_DIR}/lib/curl_des.c"
|
||||
"${LIBRARY_DIR}/lib/speedcheck.c"
|
||||
"${LIBRARY_DIR}/lib/splay.c"
|
||||
"${LIBRARY_DIR}/lib/strcase.c"
|
||||
"${LIBRARY_DIR}/lib/strdup.c"
|
||||
"${LIBRARY_DIR}/lib/strerror.c"
|
||||
"${LIBRARY_DIR}/lib/strtok.c"
|
||||
"${LIBRARY_DIR}/lib/strtoofft.c"
|
||||
"${LIBRARY_DIR}/lib/system_win32.c"
|
||||
"${LIBRARY_DIR}/lib/mime.c"
|
||||
"${LIBRARY_DIR}/lib/sha256.c"
|
||||
"${LIBRARY_DIR}/lib/setopt.c"
|
||||
"${LIBRARY_DIR}/lib/curl_path.c"
|
||||
"${LIBRARY_DIR}/lib/curl_range.c"
|
||||
"${LIBRARY_DIR}/lib/psl.c"
|
||||
"${LIBRARY_DIR}/lib/doh.c"
|
||||
"${LIBRARY_DIR}/lib/urlapi.c"
|
||||
"${LIBRARY_DIR}/lib/curl_get_line.c"
|
||||
"${LIBRARY_DIR}/lib/altsvc.c"
|
||||
"${LIBRARY_DIR}/lib/socketpair.c"
|
||||
"${LIBRARY_DIR}/lib/bufref.c"
|
||||
"${LIBRARY_DIR}/lib/bufq.c"
|
||||
"${LIBRARY_DIR}/lib/dynbuf.c"
|
||||
"${LIBRARY_DIR}/lib/dynhds.c"
|
||||
"${LIBRARY_DIR}/lib/hsts.c"
|
||||
"${LIBRARY_DIR}/lib/http_aws_sigv4.c"
|
||||
"${LIBRARY_DIR}/lib/mqtt.c"
|
||||
"${LIBRARY_DIR}/lib/rename.c"
|
||||
"${LIBRARY_DIR}/lib/headers.c"
|
||||
"${LIBRARY_DIR}/lib/telnet.c"
|
||||
"${LIBRARY_DIR}/lib/timediff.c"
|
||||
"${LIBRARY_DIR}/lib/vauth/vauth.c"
|
||||
"${LIBRARY_DIR}/lib/timeval.c"
|
||||
"${LIBRARY_DIR}/lib/transfer.c"
|
||||
"${LIBRARY_DIR}/lib/url.c"
|
||||
"${LIBRARY_DIR}/lib/urlapi.c"
|
||||
"${LIBRARY_DIR}/lib/vauth/cleartext.c"
|
||||
"${LIBRARY_DIR}/lib/vauth/cram.c"
|
||||
"${LIBRARY_DIR}/lib/vauth/digest.c"
|
||||
@ -138,23 +135,24 @@ set (SRCS
|
||||
"${LIBRARY_DIR}/lib/vauth/oauth2.c"
|
||||
"${LIBRARY_DIR}/lib/vauth/spnego_gssapi.c"
|
||||
"${LIBRARY_DIR}/lib/vauth/spnego_sspi.c"
|
||||
"${LIBRARY_DIR}/lib/vauth/vauth.c"
|
||||
"${LIBRARY_DIR}/lib/version.c"
|
||||
"${LIBRARY_DIR}/lib/vquic/vquic.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/openssl.c"
|
||||
"${LIBRARY_DIR}/lib/vssh/libssh.c"
|
||||
"${LIBRARY_DIR}/lib/vssh/libssh2.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/bearssl.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/gtls.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/vtls.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/nss.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/wolfssl.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/hostcheck.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/keylog.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/mbedtls.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/openssl.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/schannel.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/schannel_verify.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/sectransp.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/gskit.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/mbedtls.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/bearssl.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/keylog.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/vtls.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/wolfssl.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/x509asn1.c"
|
||||
"${LIBRARY_DIR}/lib/vtls/hostcheck.c"
|
||||
"${LIBRARY_DIR}/lib/vssh/libssh2.c"
|
||||
"${LIBRARY_DIR}/lib/vssh/libssh.c"
|
||||
"${LIBRARY_DIR}/lib/warnless.c"
|
||||
)
|
||||
|
||||
add_library (_curl ${SRCS})
|
||||
|
@ -1,6 +1,7 @@
|
||||
option(ENABLE_ISAL_LIBRARY "Enable ISA-L library" ${ENABLE_LIBRARIES})
|
||||
if (ARCH_AARCH64)
|
||||
# Disable ISA-L libray on aarch64.
|
||||
|
||||
# ISA-L is only available for x86-64, so it shall be disabled for other platforms
|
||||
if (NOT ARCH_AMD64)
|
||||
set (ENABLE_ISAL_LIBRARY OFF)
|
||||
endif ()
|
||||
|
||||
|
2
contrib/krb5
vendored
2
contrib/krb5
vendored
@ -1 +1 @@
|
||||
Subproject commit b56ce6ba690e1f320df1a64afa34980c3e462617
|
||||
Subproject commit 1d5c970e9369f444caf81d1d06a231a6bad8581f
|
@ -147,7 +147,7 @@ target_compile_definitions(_libarchive PUBLIC
|
||||
target_compile_options(_libarchive PRIVATE "-Wno-reserved-macro-identifier")
|
||||
|
||||
if (TARGET ch_contrib::xz)
|
||||
target_compile_definitions(_libarchive PUBLIC HAVE_LZMA_H=1)
|
||||
target_compile_definitions(_libarchive PUBLIC HAVE_LZMA_H=1 HAVE_LIBLZMA=1)
|
||||
target_link_libraries(_libarchive PRIVATE ch_contrib::xz)
|
||||
endif()
|
||||
|
||||
@ -156,6 +156,16 @@ if (TARGET ch_contrib::zlib)
|
||||
target_link_libraries(_libarchive PRIVATE ch_contrib::zlib)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_contrib::zstd)
|
||||
target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1)
|
||||
target_link_libraries(_libarchive PRIVATE ch_contrib::zstd)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_contrib::bzip2)
|
||||
target_compile_definitions(_libarchive PUBLIC HAVE_BZLIB_H=1)
|
||||
target_link_libraries(_libarchive PRIVATE ch_contrib::bzip2)
|
||||
endif()
|
||||
|
||||
if (OS_LINUX)
|
||||
target_compile_definitions(
|
||||
_libarchive PUBLIC
|
||||
|
2
contrib/llvm-project
vendored
2
contrib/llvm-project
vendored
@ -1 +1 @@
|
||||
Subproject commit d857c707fccd50423bea1c4710dc469cf89607a9
|
||||
Subproject commit e7b8befca85c8b847614432dba250c22d35fbae0
|
@ -1,18 +1,16 @@
|
||||
if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
|
||||
if (APPLE OR SANITIZE STREQUAL "undefined")
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
|
||||
else()
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
|
||||
endif()
|
||||
|
||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for JIT compilation during query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
||||
|
||||
if (NOT ENABLE_EMBEDDED_COMPILER)
|
||||
message(STATUS "Not using LLVM")
|
||||
return()
|
||||
endif()
|
||||
|
||||
# TODO: Enable compilation on AArch64
|
||||
|
||||
set (LLVM_VERSION "15.0.0bundled")
|
||||
set (LLVM_INCLUDE_DIRS
|
||||
"${ClickHouse_SOURCE_DIR}/contrib/llvm-project/llvm/include"
|
||||
@ -58,18 +56,30 @@ set (REQUIRED_LLVM_LIBRARIES
|
||||
LLVMDemangle
|
||||
)
|
||||
|
||||
# if (ARCH_AMD64)
|
||||
if (ARCH_AMD64)
|
||||
set (LLVM_TARGETS_TO_BUILD "X86" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen)
|
||||
# elseif (ARCH_AARCH64)
|
||||
# list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen)
|
||||
# endif ()
|
||||
elseif (ARCH_AARCH64)
|
||||
set (LLVM_TARGETS_TO_BUILD "AArch64" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen)
|
||||
elseif (ARCH_PPC64LE)
|
||||
set (LLVM_TARGETS_TO_BUILD "PowerPC" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMPowerPCInfo LLVMPowerPCDesc LLVMPowerPCCodeGen)
|
||||
elseif (ARCH_S390X)
|
||||
set (LLVM_TARGETS_TO_BUILD "SystemZ" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMSystemZInfo LLVMSystemZDesc LLVMSystemZCodeGen)
|
||||
elseif (ARCH_RISCV64)
|
||||
set (LLVM_TARGETS_TO_BUILD "RISCV" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMRISCVInfo LLVMRISCVDesc LLVMRISCVCodeGen)
|
||||
endif ()
|
||||
|
||||
message (STATUS "LLVM TARGETS TO BUILD ${LLVM_TARGETS_TO_BUILD}")
|
||||
|
||||
set (CMAKE_INSTALL_RPATH "ON") # Do not adjust RPATH in llvm, since then it will not be able to find libcxx/libcxxabi/libunwind
|
||||
set (LLVM_COMPILER_CHECKED 1 CACHE INTERNAL "") # Skip internal compiler selection
|
||||
set (LLVM_ENABLE_EH 1 CACHE INTERNAL "") # With exception handling
|
||||
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
|
||||
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
|
||||
set (LLVM_TARGETS_TO_BUILD "X86" CACHE STRING "") # for x86 + ARM: "X86;AArch64"
|
||||
|
||||
# Omit unnecessary stuff (just the options which are ON by default)
|
||||
set(LLVM_ENABLE_BACKTRACES 0 CACHE INTERNAL "")
|
||||
@ -99,15 +109,12 @@ set(LLVM_ENABLE_BINDINGS 0 CACHE INTERNAL "")
|
||||
set (LLVM_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/llvm")
|
||||
set (LLVM_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm")
|
||||
|
||||
# Since we always use toolchain files to generate hermatic builds, cmake will
|
||||
# think it's a cross compilation, and LLVM will try to configure NATIVE LLVM
|
||||
# targets with all tests enabled, which will slow down cmake configuration and
|
||||
# compilation (You'll see Building native llvm-tblgen...). Let's disable the
|
||||
# cross compiling indicator for now.
|
||||
#
|
||||
# TODO We should let cmake know whether it's indeed a cross compilation in the
|
||||
# first place.
|
||||
set (CMAKE_CROSSCOMPILING 0)
|
||||
message (STATUS "LLVM CMAKE CROSS COMPILING ${CMAKE_CROSSCOMPILING}")
|
||||
if (CMAKE_CROSSCOMPILING)
|
||||
set (LLVM_HOST_TRIPLE "${CMAKE_C_COMPILER_TARGET}" CACHE INTERNAL "")
|
||||
message (STATUS "CROSS COMPILING SET LLVM HOST TRIPLE ${LLVM_HOST_TRIPLE}")
|
||||
endif()
|
||||
|
||||
add_subdirectory ("${LLVM_SOURCE_DIR}" "${LLVM_BINARY_DIR}")
|
||||
|
||||
set_directory_properties (PROPERTIES
|
||||
|
2
contrib/orc
vendored
2
contrib/orc
vendored
@ -1 +1 @@
|
||||
Subproject commit 568d1d60c250af1890f226c182bc15bd8cc94cf1
|
||||
Subproject commit a20d1d9d7ad4a4be7b7ba97588e16ca8b9abb2b6
|
1
contrib/robin-map
vendored
Submodule
1
contrib/robin-map
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 851a59e0e3063ee0e23089062090a73fd3de482d
|
1
contrib/robin-map-cmake/CMakeLists.txt
Normal file
1
contrib/robin-map-cmake/CMakeLists.txt
Normal file
@ -0,0 +1 @@
|
||||
# See contrib/usearch-cmake/CMakeLists.txt
|
1
contrib/usearch
vendored
Submodule
1
contrib/usearch
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 387b78b28b17b8954024ffc81e97cbcfa10d1f30
|
17
contrib/usearch-cmake/CMakeLists.txt
Normal file
17
contrib/usearch-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,17 @@
|
||||
set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch")
|
||||
set(USEARCH_SOURCE_DIR "${USEARCH_PROJECT_DIR}/include")
|
||||
|
||||
set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16")
|
||||
set(ROBIN_MAP_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/robin-map")
|
||||
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD-map")
|
||||
|
||||
add_library(_usearch INTERFACE)
|
||||
|
||||
target_include_directories(_usearch SYSTEM INTERFACE
|
||||
${FP16_PROJECT_DIR}/include
|
||||
${ROBIN_MAP_PROJECT_DIR}/include
|
||||
${SIMSIMD_PROJECT_DIR}/include
|
||||
${USEARCH_SOURCE_DIR})
|
||||
|
||||
add_library(ch_contrib::usearch ALIAS _usearch)
|
||||
target_compile_definitions(_usearch INTERFACE ENABLE_USEARCH)
|
@ -1,5 +1,5 @@
|
||||
## ClickHouse Dockerfiles
|
||||
|
||||
This directory contain Dockerfiles for `clickhouse-client` and `clickhouse-server`. They are updated in each release.
|
||||
This directory contain Dockerfiles for `clickhouse-server`. They are updated in each release.
|
||||
|
||||
Also there is bunch of images for testing and CI. They are listed in `images.json` file and updated on each commit to master. If you need to add another image, place information about it into `images.json`.
|
||||
Also, there is a bunch of images for testing and CI. They are listed in `images.json` file and updated on each commit to master. If you need to add another image, place information about it into `images.json`.
|
||||
|
@ -1,34 +0,0 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
|
||||
ARG version=22.1.1.*
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
dirmngr \
|
||||
gnupg \
|
||||
&& mkdir -p /etc/apt/sources.list.d \
|
||||
&& apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 \
|
||||
&& echo $repository > /etc/apt/sources.list.d/clickhouse.list \
|
||||
&& apt-get update \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --allow-unauthenticated --yes --no-install-recommends \
|
||||
clickhouse-client=$version \
|
||||
clickhouse-common-static=$version \
|
||||
locales \
|
||||
tzdata \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf \
|
||||
&& apt-get clean
|
||||
|
||||
RUN locale-gen en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US:en
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
|
||||
ENTRYPOINT ["/usr/bin/clickhouse-client"]
|
@ -1,7 +0,0 @@
|
||||
# ClickHouse Client Docker Image
|
||||
|
||||
For more information see [ClickHouse Server Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/).
|
||||
|
||||
## License
|
||||
|
||||
View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.
|
@ -125,6 +125,7 @@
|
||||
"docker/test/keeper-jepsen",
|
||||
"docker/test/server-jepsen",
|
||||
"docker/test/sqllogic",
|
||||
"docker/test/sqltest",
|
||||
"docker/test/stateless"
|
||||
]
|
||||
},
|
||||
@ -155,13 +156,16 @@
|
||||
},
|
||||
"docker/docs/builder": {
|
||||
"name": "clickhouse/docs-builder",
|
||||
"dependent": [
|
||||
]
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/sqllogic": {
|
||||
"name": "clickhouse/sqllogic-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/sqltest": {
|
||||
"name": "clickhouse/sqltest",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/integration/nginx_dav": {
|
||||
"name": "clickhouse/nginx-dav",
|
||||
"dependent": []
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
esac
|
||||
|
||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||
ARG VERSION="23.7.3.14"
|
||||
ARG VERSION="23.7.4.5"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -58,33 +58,6 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
||||
rustup target add aarch64-apple-darwin && \
|
||||
rustup target add powerpc64le-unknown-linux-gnu
|
||||
|
||||
# Create vendor cache for cargo.
|
||||
#
|
||||
# Note, that the config.toml for the root is used, you will not be able to
|
||||
# install any other crates, except those which had been vendored (since if
|
||||
# there is "replace-with" for some source, then cargo will not look to other
|
||||
# remotes except this).
|
||||
#
|
||||
# Notes for the command itself:
|
||||
# - --chown is required to preserve the rights
|
||||
# - unstable-options for -C
|
||||
# - chmod is required to fix the permissions, since builds are running from a different user
|
||||
# - copy of the Cargo.lock is required for proper dependencies versions
|
||||
# - cargo vendor --sync is requried to overcome [1] bug.
|
||||
#
|
||||
# [1]: https://github.com/rust-lang/wg-cargo-std-aware/issues/23
|
||||
COPY --chown=root:root /rust /rust/packages
|
||||
RUN cargo -Z unstable-options -C /rust/packages vendor > $CARGO_HOME/config.toml && \
|
||||
cp "$(rustc --print=sysroot)"/lib/rustlib/src/rust/Cargo.lock "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/ && \
|
||||
cargo -Z unstable-options -C /rust/packages vendor --sync "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/Cargo.toml && \
|
||||
rm "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/Cargo.lock && \
|
||||
sed -i "s#\"vendor\"#\"/rust/vendor\"#" $CARGO_HOME/config.toml && \
|
||||
cat $CARGO_HOME/config.toml && \
|
||||
mv /rust/packages/vendor /rust/vendor && \
|
||||
chmod -R o=r+X /rust/vendor && \
|
||||
ls -R -l /rust/packages && \
|
||||
rm -r /rust/packages
|
||||
|
||||
# NOTE: Seems like gcc-11 is too new for ubuntu20 repository
|
||||
# A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work):
|
||||
RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||
@ -107,6 +80,14 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
|
||||
# Download toolchain and SDK for Darwin
|
||||
RUN curl -sL -O https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11.0.sdk.tar.xz
|
||||
|
||||
# Download and install mold 2.0 for s390x build
|
||||
RUN curl -Lo /tmp/mold.tar.gz "https://github.com/rui314/mold/releases/download/v2.0.0/mold-2.0.0-x86_64-linux.tar.gz" \
|
||||
&& mkdir /tmp/mold \
|
||||
&& tar -xzf /tmp/mold.tar.gz -C /tmp/mold \
|
||||
&& cp -r /tmp/mold/mold*/* /usr \
|
||||
&& rm -rf /tmp/mold \
|
||||
&& rm /tmp/mold.tar.gz
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
ARG NFPM_VERSION=2.20.0
|
||||
|
@ -1 +0,0 @@
|
||||
../../../rust
|
@ -22,7 +22,7 @@ def check_image_exists_locally(image_name: str) -> bool:
|
||||
output = subprocess.check_output(
|
||||
f"docker images -q {image_name} 2> /dev/null", shell=True
|
||||
)
|
||||
return output != ""
|
||||
return output != b""
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
@ -46,7 +46,7 @@ def build_image(image_name: str, filepath: Path) -> None:
|
||||
)
|
||||
|
||||
|
||||
def pre_build(repo_path: Path, env_variables: List[str]):
|
||||
def pre_build(repo_path: Path, env_variables: List[str]) -> None:
|
||||
if "WITH_PERFORMANCE=1" in env_variables:
|
||||
current_branch = subprocess.check_output(
|
||||
"git branch --show-current", shell=True, encoding="utf-8"
|
||||
@ -80,9 +80,12 @@ def run_docker_image_with_env(
|
||||
output_dir: Path,
|
||||
env_variables: List[str],
|
||||
ch_root: Path,
|
||||
cargo_cache_dir: Path,
|
||||
ccache_dir: Optional[Path],
|
||||
):
|
||||
) -> None:
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
cargo_cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
env_part = " -e ".join(env_variables)
|
||||
if env_part:
|
||||
env_part = " -e " + env_part
|
||||
@ -104,7 +107,7 @@ def run_docker_image_with_env(
|
||||
cmd = (
|
||||
f"docker run --network=host --user={user} --rm {ccache_mount}"
|
||||
f"--volume={output_dir}:/output --volume={ch_root}:/build {env_part} "
|
||||
f"{interactive} {image_name}"
|
||||
f"--volume={cargo_cache_dir}:/rust/cargo/registry {interactive} {image_name}"
|
||||
)
|
||||
|
||||
logging.info("Will build ClickHouse pkg with cmd: '%s'", cmd)
|
||||
@ -129,9 +132,10 @@ def parse_env_variables(
|
||||
version: str,
|
||||
official: bool,
|
||||
additional_pkgs: bool,
|
||||
with_profiler: bool,
|
||||
with_coverage: bool,
|
||||
with_binaries: str,
|
||||
):
|
||||
) -> List[str]:
|
||||
DARWIN_SUFFIX = "-darwin"
|
||||
DARWIN_ARM_SUFFIX = "-darwin-aarch64"
|
||||
ARM_SUFFIX = "-aarch64"
|
||||
@ -139,6 +143,7 @@ def parse_env_variables(
|
||||
FREEBSD_SUFFIX = "-freebsd"
|
||||
PPC_SUFFIX = "-ppc64le"
|
||||
RISCV_SUFFIX = "-riscv64"
|
||||
S390X_SUFFIX = "-s390x"
|
||||
AMD64_COMPAT_SUFFIX = "-amd64-compat"
|
||||
|
||||
result = []
|
||||
@ -152,6 +157,7 @@ def parse_env_variables(
|
||||
is_cross_arm_v80compat = compiler.endswith(ARM_V80COMPAT_SUFFIX)
|
||||
is_cross_ppc = compiler.endswith(PPC_SUFFIX)
|
||||
is_cross_riscv = compiler.endswith(RISCV_SUFFIX)
|
||||
is_cross_s390x = compiler.endswith(S390X_SUFFIX)
|
||||
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
|
||||
is_amd64_compat = compiler.endswith(AMD64_COMPAT_SUFFIX)
|
||||
|
||||
@ -213,6 +219,11 @@ def parse_env_variables(
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-riscv64.cmake"
|
||||
)
|
||||
elif is_cross_s390x:
|
||||
cc = compiler[: -len(S390X_SUFFIX)]
|
||||
cmake_flags.append(
|
||||
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-s390x.cmake"
|
||||
)
|
||||
elif is_amd64_compat:
|
||||
cc = compiler[: -len(AMD64_COMPAT_SUFFIX)]
|
||||
result.append("DEB_ARCH=amd64")
|
||||
@ -322,6 +333,9 @@ def parse_env_variables(
|
||||
# utils are not included into clickhouse-bundle, so build everything
|
||||
build_target = "all"
|
||||
|
||||
if with_profiler:
|
||||
cmake_flags.append("-DENABLE_BUILD_PROFILING=1")
|
||||
|
||||
if with_coverage:
|
||||
cmake_flags.append("-DWITH_COVERAGE=1")
|
||||
|
||||
@ -373,6 +387,7 @@ def parse_args() -> argparse.Namespace:
|
||||
"clang-16-aarch64-v80compat",
|
||||
"clang-16-ppc64le",
|
||||
"clang-16-riscv64",
|
||||
"clang-16-s390x",
|
||||
"clang-16-amd64-compat",
|
||||
"clang-16-freebsd",
|
||||
),
|
||||
@ -412,10 +427,18 @@ def parse_args() -> argparse.Namespace:
|
||||
action="store_true",
|
||||
help="if set, the build fails on errors writing cache to S3",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cargo-cache-dir",
|
||||
default=Path(os.getenv("CARGO_HOME", "") or Path.home() / ".cargo")
|
||||
/ "registry",
|
||||
type=dir_name,
|
||||
help="a directory to preserve the rust cargo crates",
|
||||
)
|
||||
parser.add_argument("--force-build-image", action="store_true")
|
||||
parser.add_argument("--version")
|
||||
parser.add_argument("--official", action="store_true")
|
||||
parser.add_argument("--additional-pkgs", action="store_true")
|
||||
parser.add_argument("--with-profiler", action="store_true")
|
||||
parser.add_argument("--with-coverage", action="store_true")
|
||||
parser.add_argument(
|
||||
"--with-binaries", choices=("programs", "tests", ""), default=""
|
||||
@ -451,7 +474,7 @@ def parse_args() -> argparse.Namespace:
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
def main() -> None:
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
args = parse_args()
|
||||
|
||||
@ -479,6 +502,7 @@ def main():
|
||||
args.version,
|
||||
args.official,
|
||||
args.additional_pkgs,
|
||||
args.with_profiler,
|
||||
args.with_coverage,
|
||||
args.with_binaries,
|
||||
)
|
||||
@ -490,6 +514,7 @@ def main():
|
||||
args.output_dir,
|
||||
env_prepared,
|
||||
ch_root,
|
||||
args.cargo_cache_dir,
|
||||
args.ccache_dir,
|
||||
)
|
||||
logging.info("Output placed into %s", args.output_dir)
|
||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.7.3.14"
|
||||
ARG VERSION="23.7.4.5"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -23,7 +23,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.7.3.14"
|
||||
ARG VERSION="23.7.4.5"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -35,4 +35,7 @@ ENV LC_ALL en_US.UTF-8
|
||||
ENV TZ=Europe/Amsterdam
|
||||
RUN ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
|
||||
|
||||
# This script is used to setup realtime export of server logs from the CI into external ClickHouse cluster:
|
||||
COPY setup_export_logs.sh /
|
||||
|
||||
CMD sleep 1
|
||||
|
65
docker/test/base/setup_export_logs.sh
Executable file
65
docker/test/base/setup_export_logs.sh
Executable file
@ -0,0 +1,65 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script sets up export of system log tables to a remote server.
|
||||
# Remote tables are created if not exist, and augmented with extra columns,
|
||||
# and their names will contain a hash of the table structure,
|
||||
# which allows exporting tables from servers of different versions.
|
||||
|
||||
# Pre-configured destination cluster, where to export the data
|
||||
CLUSTER=${CLUSTER:=system_logs_export}
|
||||
|
||||
EXTRA_COLUMNS=${EXTRA_COLUMNS:="pull_request_number UInt32, commit_sha String, check_start_time DateTime, check_name LowCardinality(String), instance_type LowCardinality(String), "}
|
||||
EXTRA_COLUMNS_EXPRESSION=${EXTRA_COLUMNS_EXPRESSION:="0 AS pull_request_number, '' AS commit_sha, now() AS check_start_time, '' AS check_name, '' AS instance_type"}
|
||||
EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:="check_name, "}
|
||||
|
||||
CONNECTION_PARAMETERS=${CONNECTION_PARAMETERS:=""}
|
||||
|
||||
# Create all configured system logs:
|
||||
clickhouse-client --query "SYSTEM FLUSH LOGS"
|
||||
|
||||
# It's doesn't make sense to try creating tables if SYNC fails
|
||||
echo "SYSTEM SYNC DATABASE REPLICA default" | clickhouse-client --receive_timeout 180 $CONNECTION_PARAMETERS || exit 0
|
||||
|
||||
# For each system log table:
|
||||
clickhouse-client --query "SHOW TABLES FROM system LIKE '%\\_log'" | while read -r table
|
||||
do
|
||||
# Calculate hash of its structure:
|
||||
hash=$(clickhouse-client --query "
|
||||
SELECT sipHash64(groupArray((name, type)))
|
||||
FROM (SELECT name, type FROM system.columns
|
||||
WHERE database = 'system' AND table = '$table'
|
||||
ORDER BY position)
|
||||
")
|
||||
|
||||
# Create the destination table with adapted name and structure:
|
||||
statement=$(clickhouse-client --format TSVRaw --query "SHOW CREATE TABLE system.${table}" | sed -r -e '
|
||||
s/^\($/('"$EXTRA_COLUMNS"'/;
|
||||
s/ORDER BY \(/ORDER BY ('"$EXTRA_ORDER_BY_COLUMNS"'/;
|
||||
s/^CREATE TABLE system\.\w+_log$/CREATE TABLE IF NOT EXISTS '"$table"'_'"$hash"'/;
|
||||
/^TTL /d
|
||||
')
|
||||
|
||||
echo "Creating destination table ${table}_${hash}" >&2
|
||||
|
||||
echo "$statement" | clickhouse-client --distributed_ddl_task_timeout=10 $CONNECTION_PARAMETERS || continue
|
||||
|
||||
echo "Creating table system.${table}_sender" >&2
|
||||
|
||||
# Create Distributed table and materialized view to watch on the original table:
|
||||
clickhouse-client --query "
|
||||
CREATE TABLE system.${table}_sender
|
||||
ENGINE = Distributed(${CLUSTER}, default, ${table}_${hash})
|
||||
SETTINGS flush_on_detach=0
|
||||
EMPTY AS
|
||||
SELECT ${EXTRA_COLUMNS_EXPRESSION}, *
|
||||
FROM system.${table}
|
||||
"
|
||||
|
||||
echo "Creating materialized view system.${table}_watcher" >&2
|
||||
|
||||
clickhouse-client --query "
|
||||
CREATE MATERIALIZED VIEW system.${table}_watcher TO system.${table}_sender AS
|
||||
SELECT ${EXTRA_COLUMNS_EXPRESSION}, *
|
||||
FROM system.${table}
|
||||
"
|
||||
done
|
@ -148,6 +148,7 @@ function clone_submodules
|
||||
contrib/liburing
|
||||
contrib/libfiu
|
||||
contrib/incbin
|
||||
contrib/yaml-cpp
|
||||
)
|
||||
|
||||
git submodule sync
|
||||
@ -170,6 +171,7 @@ function run_cmake
|
||||
"-DENABLE_SIMDJSON=1"
|
||||
"-DENABLE_JEMALLOC=1"
|
||||
"-DENABLE_LIBURING=1"
|
||||
"-DENABLE_YAML_CPP=1"
|
||||
)
|
||||
|
||||
export CCACHE_DIR="$FASTTEST_WORKSPACE/ccache"
|
||||
|
@ -122,6 +122,23 @@ EOL
|
||||
<core_path>$PWD</core_path>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
# Setup a cluster for logs export to ClickHouse Cloud
|
||||
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
echo "
|
||||
remote_servers:
|
||||
system_logs_export:
|
||||
shard:
|
||||
replica:
|
||||
secure: 1
|
||||
user: ci
|
||||
host: '${CLICKHOUSE_CI_LOGS_HOST}'
|
||||
port: 9440
|
||||
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
|
||||
" > db/config.d/system_logs_export.yaml
|
||||
fi
|
||||
}
|
||||
|
||||
function filter_exists_and_template
|
||||
@ -223,7 +240,22 @@ quit
|
||||
done
|
||||
clickhouse-client --query "select 1" # This checks that the server is responding
|
||||
kill -0 $server_pid # This checks that it is our server that is started and not some other one
|
||||
echo Server started and responded
|
||||
echo 'Server started and responded'
|
||||
|
||||
# Initialize export of system logs to ClickHouse Cloud
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
export EXTRA_COLUMNS_EXPRESSION="$PR_TO_TEST AS pull_request_number, '$SHA_TO_TEST' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
|
||||
# TODO: Check if the password will appear in the logs.
|
||||
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
|
||||
|
||||
/setup_export_logs.sh
|
||||
|
||||
# Unset variables after use
|
||||
export CONNECTION_PARAMETERS=''
|
||||
export CLICKHOUSE_CI_LOGS_HOST=''
|
||||
export CLICKHOUSE_CI_LOGS_PASSWORD=''
|
||||
fi
|
||||
|
||||
# SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric.
|
||||
# SC2046: Quote this to prevent word splitting. Actually I need word splitting.
|
||||
|
@ -12,6 +12,7 @@ ENV \
|
||||
# install systemd packages
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
sudo \
|
||||
systemd \
|
||||
&& \
|
||||
apt-get clean && \
|
||||
|
@ -2,7 +2,7 @@ version: "2.3"
|
||||
|
||||
services:
|
||||
coredns:
|
||||
image: coredns/coredns:latest
|
||||
image: coredns/coredns:1.9.3 # :latest broke this test
|
||||
restart: always
|
||||
volumes:
|
||||
- ${COREDNS_CONFIG_DIR}/example.com:/example.com
|
||||
|
@ -1,18 +1,7 @@
|
||||
# docker build -t clickhouse/performance-comparison .
|
||||
|
||||
# Using ubuntu:22.04 over 20.04 as all other images, since:
|
||||
# a) ubuntu 20.04 has too old parallel, and does not support --memsuspend
|
||||
# b) anyway for perf tests it should not be important (backward compatiblity
|
||||
# with older ubuntu had been checked lots of times in various tests)
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# ARG for quick switch to a given ubuntu mirror
|
||||
ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
ENV TZ=Europe/Amsterdam
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-base:$FROM_TAG
|
||||
|
||||
RUN apt-get update \
|
||||
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
|
||||
@ -56,10 +45,9 @@ COPY * /
|
||||
# node #0 should be less stable because of system interruptions. We bind
|
||||
# randomly to node 1 or 0 to gather some statistics on that. We have to bind
|
||||
# both servers and the tmpfs on which the database is stored. How to do it
|
||||
# through Yandex Sandbox API is unclear, but by default tmpfs uses
|
||||
# is unclear, but by default tmpfs uses
|
||||
# 'process allocation policy', not sure which process but hopefully the one that
|
||||
# writes to it, so just bind the downloader script as well. We could also try to
|
||||
# remount it with proper options in Sandbox task.
|
||||
# writes to it, so just bind the downloader script as well.
|
||||
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
|
||||
# Double-escaped backslashes are a tribute to the engineering wonder of docker --
|
||||
# it gives '/bin/sh: 1: [bash,: not found' otherwise.
|
||||
|
@ -90,7 +90,7 @@ function configure
|
||||
set +m
|
||||
|
||||
wait_for_server $LEFT_SERVER_PORT $left_pid
|
||||
echo Server for setup started
|
||||
echo "Server for setup started"
|
||||
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "create database test" ||:
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "rename table datasets.hits_v1 to test.hits" ||:
|
||||
@ -156,9 +156,9 @@ function restart
|
||||
wait_for_server $RIGHT_SERVER_PORT $right_pid
|
||||
echo right ok
|
||||
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.tables where database != 'system'"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.tables where database NOT IN ('system', 'INFORMATION_SCHEMA', 'information_schema')"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.build_options"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.tables where database != 'system'"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.tables where database NOT IN ('system', 'INFORMATION_SCHEMA', 'information_schema')"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.build_options"
|
||||
|
||||
# Check again that both servers we started are running -- this is important
|
||||
@ -352,14 +352,12 @@ function get_profiles
|
||||
wait
|
||||
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type in ('QueryFinish', 'ExceptionWhileProcessing') format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > left-query-thread-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > left-trace-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > left-addresses.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > left-metric-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > left-async-metric-log.tsv ||: &
|
||||
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type in ('QueryFinish', 'ExceptionWhileProcessing') format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > right-query-thread-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > right-trace-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > right-addresses.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > right-metric-log.tsv ||: &
|
||||
@ -665,9 +663,8 @@ create view partial_query_times as select * from
|
||||
-- Report for backward-incompatible ('partial') queries that we could only run on the new server (e.g.
|
||||
-- queries with new functions added in the tested PR).
|
||||
create table partial_queries_report engine File(TSV, 'report/partial-queries-report.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as select toDecimal64(time_median, 3) time,
|
||||
toDecimal64(time_stddev / time_median, 3) relative_time_stddev,
|
||||
as select round(time_median, 3) time,
|
||||
round(time_stddev / time_median, 3) relative_time_stddev,
|
||||
test, query_index, query_display_name
|
||||
from partial_query_times
|
||||
join query_display_names using (test, query_index)
|
||||
@ -739,28 +736,26 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
|
||||
;
|
||||
|
||||
create table changed_perf_report engine File(TSV, 'report/changed-perf.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as with
|
||||
-- server_time is sometimes reported as zero (if it's less than 1 ms),
|
||||
-- so we have to work around this to not get an error about conversion
|
||||
-- of NaN to decimal.
|
||||
(left > right ? left / right : right / left) as times_change_float,
|
||||
isFinite(times_change_float) as times_change_finite,
|
||||
toDecimal64(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
|
||||
round(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
|
||||
times_change_finite
|
||||
? (left > right ? '-' : '+') || toString(times_change_decimal) || 'x'
|
||||
: '--' as times_change_str
|
||||
select
|
||||
toDecimal64(left, 3), toDecimal64(right, 3), times_change_str,
|
||||
toDecimal64(diff, 3), toDecimal64(stat_threshold, 3),
|
||||
round(left, 3), round(right, 3), times_change_str,
|
||||
round(diff, 3), round(stat_threshold, 3),
|
||||
changed_fail, test, query_index, query_display_name
|
||||
from queries where changed_show order by abs(diff) desc;
|
||||
|
||||
create table unstable_queries_report engine File(TSV, 'report/unstable-queries.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as select
|
||||
toDecimal64(left, 3), toDecimal64(right, 3), toDecimal64(diff, 3),
|
||||
toDecimal64(stat_threshold, 3), unstable_fail, test, query_index, query_display_name
|
||||
round(left, 3), round(right, 3), round(diff, 3),
|
||||
round(stat_threshold, 3), unstable_fail, test, query_index, query_display_name
|
||||
from queries where unstable_show order by stat_threshold desc;
|
||||
|
||||
|
||||
@ -789,11 +784,10 @@ create view total_speedup as
|
||||
;
|
||||
|
||||
create table test_perf_changes_report engine File(TSV, 'report/test-perf-changes.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as with
|
||||
(times_speedup >= 1
|
||||
? '-' || toString(toDecimal64(times_speedup, 3)) || 'x'
|
||||
: '+' || toString(toDecimal64(1 / times_speedup, 3)) || 'x')
|
||||
? '-' || toString(round(times_speedup, 3)) || 'x'
|
||||
: '+' || toString(round(1 / times_speedup, 3)) || 'x')
|
||||
as times_speedup_str
|
||||
select test, times_speedup_str, queries, bad, changed, unstable
|
||||
-- Not sure what's the precedence of UNION ALL vs WHERE & ORDER BY, hence all
|
||||
@ -817,11 +811,10 @@ create view total_client_time_per_query as select *
|
||||
'test text, query_index int, client float, server float');
|
||||
|
||||
create table slow_on_client_report engine File(TSV, 'report/slow-on-client.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as select client, server, toDecimal64(client/server, 3) p,
|
||||
as select client, server, round(client/server, 3) p,
|
||||
test, query_display_name
|
||||
from total_client_time_per_query left join query_display_names using (test, query_index)
|
||||
where p > toDecimal64(1.02, 3) order by p desc;
|
||||
where p > round(1.02, 3) order by p desc;
|
||||
|
||||
create table wall_clock_time_per_test engine Memory as select *
|
||||
from file('wall-clock-times.tsv', TSV, 'test text, real float, user float, system float');
|
||||
@ -899,15 +892,14 @@ create view test_times_view_total as
|
||||
;
|
||||
|
||||
create table test_times_report engine File(TSV, 'report/test-times.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as select
|
||||
test,
|
||||
toDecimal64(real, 3),
|
||||
toDecimal64(total_client_time, 3),
|
||||
round(real, 3),
|
||||
round(total_client_time, 3),
|
||||
queries,
|
||||
toDecimal64(query_max, 3),
|
||||
toDecimal64(avg_real_per_query, 3),
|
||||
toDecimal64(query_min, 3),
|
||||
round(query_max, 3),
|
||||
round(avg_real_per_query, 3),
|
||||
round(query_min, 3),
|
||||
runs
|
||||
from (
|
||||
select * from test_times_view
|
||||
@ -919,21 +911,20 @@ create table test_times_report engine File(TSV, 'report/test-times.tsv')
|
||||
|
||||
-- report for all queries page, only main metric
|
||||
create table all_tests_report engine File(TSV, 'report/all-queries.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as with
|
||||
-- server_time is sometimes reported as zero (if it's less than 1 ms),
|
||||
-- so we have to work around this to not get an error about conversion
|
||||
-- of NaN to decimal.
|
||||
(left > right ? left / right : right / left) as times_change_float,
|
||||
isFinite(times_change_float) as times_change_finite,
|
||||
toDecimal64(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
|
||||
round(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
|
||||
times_change_finite
|
||||
? (left > right ? '-' : '+') || toString(times_change_decimal) || 'x'
|
||||
: '--' as times_change_str
|
||||
select changed_fail, unstable_fail,
|
||||
toDecimal64(left, 3), toDecimal64(right, 3), times_change_str,
|
||||
toDecimal64(isFinite(diff) ? diff : 0, 3),
|
||||
toDecimal64(isFinite(stat_threshold) ? stat_threshold : 0, 3),
|
||||
round(left, 3), round(right, 3), times_change_str,
|
||||
round(isFinite(diff) ? diff : 0, 3),
|
||||
round(isFinite(stat_threshold) ? stat_threshold : 0, 3),
|
||||
test, query_index, query_display_name
|
||||
from queries order by test, query_index;
|
||||
|
||||
@ -1044,27 +1035,6 @@ create table unstable_run_traces engine File(TSVWithNamesAndTypes,
|
||||
order by count() desc
|
||||
;
|
||||
|
||||
create table metric_devation engine File(TSVWithNamesAndTypes,
|
||||
'report/metric-deviation.$version.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
-- first goes the key used to split the file with grep
|
||||
as select test, query_index, query_display_name,
|
||||
toDecimal64(d, 3) d, q, metric
|
||||
from (
|
||||
select
|
||||
test, query_index,
|
||||
(q[3] - q[1])/q[2] d,
|
||||
quantilesExact(0, 0.5, 1)(value) q, metric
|
||||
from (select * from unstable_run_metrics
|
||||
union all select * from unstable_run_traces
|
||||
union all select * from unstable_run_metrics_2) mm
|
||||
group by test, query_index, metric
|
||||
having isFinite(d) and d > 0.5 and q[3] > 5
|
||||
) metrics
|
||||
left join query_display_names using (test, query_index)
|
||||
order by test, query_index, d desc
|
||||
;
|
||||
|
||||
create table stacks engine File(TSV, 'report/stacks.$version.tsv') as
|
||||
select
|
||||
-- first goes the key used to split the file with grep
|
||||
@ -1173,9 +1143,8 @@ create table metrics engine File(TSV, 'metrics/metrics.tsv') as
|
||||
|
||||
-- Show metrics that have changed
|
||||
create table changes engine File(TSV, 'metrics/changes.tsv')
|
||||
settings output_format_decimal_trailing_zeros = 1
|
||||
as select metric, left, right,
|
||||
toDecimal64(diff, 3), toDecimal64(times_diff, 3)
|
||||
round(diff, 3), round(times_diff, 3)
|
||||
from (
|
||||
select metric, median(left) as left, median(right) as right,
|
||||
(right - left) / left diff,
|
||||
@ -1226,7 +1195,6 @@ create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv')
|
||||
'$SHA_TO_TEST' :: LowCardinality(String) AS commit_sha,
|
||||
'${CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME:-Performance}' :: LowCardinality(String) AS check_name,
|
||||
'$(sed -n 's/.*<!--status: \(.*\)-->/\1/p' report.html)' :: LowCardinality(String) AS check_status,
|
||||
-- TODO toDateTime() can't parse output of 'date', so no time for now.
|
||||
(($(date +%s) - $CHPC_CHECK_START_TIMESTAMP) * 1000) :: UInt64 AS check_duration_ms,
|
||||
fromUnixTimestamp($CHPC_CHECK_START_TIMESTAMP) check_start_time,
|
||||
test_name :: LowCardinality(String) AS test_name ,
|
||||
|
@ -19,31 +19,6 @@
|
||||
<opentelemetry_span_log remove="remove"/>
|
||||
<session_log remove="remove"/>
|
||||
|
||||
<!-- performance tests does not uses real block devices,
|
||||
instead they stores everything in memory.
|
||||
|
||||
And so, to avoid extra memory reference switch *_log to Memory engine. -->
|
||||
<query_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</query_log>
|
||||
<query_thread_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</query_thread_log>
|
||||
<trace_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</trace_log>
|
||||
<metric_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</metric_log>
|
||||
<asynchronous_metric_log>
|
||||
<engine>ENGINE = Memory</engine>
|
||||
<partition_by remove="remove"/>
|
||||
</asynchronous_metric_log>
|
||||
|
||||
<uncompressed_cache_size>1000000000</uncompressed_cache_size>
|
||||
|
||||
<asynchronous_metrics_update_period_s>10</asynchronous_metrics_update_period_s>
|
||||
|
@ -21,6 +21,7 @@
|
||||
<!-- disable JIT for perf tests -->
|
||||
<compile_expressions>0</compile_expressions>
|
||||
<compile_aggregate_expressions>0</compile_aggregate_expressions>
|
||||
<compile_sort_description>0</compile_sort_description>
|
||||
|
||||
<!-- Don't fail some prewarm queries too early -->
|
||||
<timeout_before_checking_execution_speed>60</timeout_before_checking_execution_speed>
|
||||
|
@ -31,8 +31,6 @@ function download
|
||||
# Test all of them.
|
||||
declare -a urls_to_try=(
|
||||
"$S3_URL/PRs/$left_pr/$left_sha/$BUILD_NAME/performance.tar.zst"
|
||||
"$S3_URL/$left_pr/$left_sha/$BUILD_NAME/performance.tar.zst"
|
||||
"$S3_URL/$left_pr/$left_sha/$BUILD_NAME/performance.tgz"
|
||||
)
|
||||
|
||||
for path in "${urls_to_try[@]}"
|
||||
|
@ -130,7 +130,7 @@ then
|
||||
git -C right/ch diff --name-only "$base" pr -- :!tests/performance :!docker/test/performance-comparison | tee other-changed-files.txt
|
||||
fi
|
||||
|
||||
# Set python output encoding so that we can print queries with Russian letters.
|
||||
# Set python output encoding so that we can print queries with non-ASCII letters.
|
||||
export PYTHONIOENCODING=utf-8
|
||||
|
||||
# By default, use the main comparison script from the tested package, so that we
|
||||
@ -151,11 +151,7 @@ export PATH
|
||||
export REF_PR
|
||||
export REF_SHA
|
||||
|
||||
# Try to collect some core dumps. I've seen two patterns in Sandbox:
|
||||
# 1) |/home/zomb-sandbox/venv/bin/python /home/zomb-sandbox/client/sandbox/bin/coredumper.py %e %p %g %u %s %P %c
|
||||
# Not sure what this script does (puts them to sandbox resources, logs some messages?),
|
||||
# and it's not accessible from inside docker anyway.
|
||||
# 2) something like %e.%p.core.dmp. The dump should end up in the workspace directory.
|
||||
# Try to collect some core dumps.
|
||||
# At least we remove the ulimit and then try to pack some common file names into output.
|
||||
ulimit -c unlimited
|
||||
cat /proc/sys/kernel/core_pattern
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -exu
|
||||
trap "exit" INT TERM
|
||||
|
||||
|
30
docker/test/sqltest/Dockerfile
Normal file
30
docker/test/sqltest/Dockerfile
Normal file
@ -0,0 +1,30 @@
|
||||
# docker build -t clickhouse/sqltest .
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-base:$FROM_TAG
|
||||
|
||||
RUN apt-get update --yes \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
wget \
|
||||
git \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
sudo \
|
||||
&& apt-get clean
|
||||
|
||||
RUN pip3 install \
|
||||
pyyaml \
|
||||
clickhouse-driver
|
||||
|
||||
ARG sqltest_repo="https://github.com/elliotchance/sqltest/"
|
||||
|
||||
RUN git clone ${sqltest_repo}
|
||||
|
||||
ENV TZ=UTC
|
||||
ENV MAX_RUN_TIME=900
|
||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
COPY run.sh /
|
||||
COPY test.py /
|
||||
CMD ["/bin/bash", "/run.sh"]
|
51
docker/test/sqltest/run.sh
Executable file
51
docker/test/sqltest/run.sh
Executable file
@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2015
|
||||
|
||||
set -x
|
||||
set -e
|
||||
set -u
|
||||
set -o pipefail
|
||||
|
||||
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-16_debug_none_unsplitted_disable_False_binary"}
|
||||
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
|
||||
|
||||
function wget_with_retry
|
||||
{
|
||||
for _ in 1 2 3 4; do
|
||||
if wget -nv -nd -c "$1";then
|
||||
return 0
|
||||
else
|
||||
sleep 0.5
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
wget_with_retry "$BINARY_URL_TO_DOWNLOAD"
|
||||
chmod +x clickhouse
|
||||
./clickhouse install --noninteractive
|
||||
|
||||
echo "
|
||||
users:
|
||||
default:
|
||||
access_management: 1" > /etc/clickhouse-server/users.d/access_management.yaml
|
||||
|
||||
clickhouse start
|
||||
|
||||
# Wait for start
|
||||
for _ in {1..100}
|
||||
do
|
||||
clickhouse-client --query "SELECT 1" && break ||:
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Run the test
|
||||
pushd sqltest/standards/2016/
|
||||
/test.py
|
||||
mv report.html test.log /workspace
|
||||
popd
|
||||
|
||||
zstd --threads=0 /var/log/clickhouse-server/clickhouse-server.log
|
||||
zstd --threads=0 /var/log/clickhouse-server/clickhouse-server.err.log
|
||||
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log.zst /var/log/clickhouse-server/clickhouse-server.err.log.zst /workspace
|
148
docker/test/sqltest/test.py
Executable file
148
docker/test/sqltest/test.py
Executable file
@ -0,0 +1,148 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import yaml
|
||||
import html
|
||||
import random
|
||||
import string
|
||||
from clickhouse_driver import Client
|
||||
|
||||
|
||||
client = Client(host="localhost", port=9000)
|
||||
settings = {
|
||||
"default_table_engine": "Memory",
|
||||
"union_default_mode": "DISTINCT",
|
||||
"calculate_text_stack_trace": 0,
|
||||
}
|
||||
|
||||
database_name = "sqltest_" + "".join(
|
||||
random.choice(string.ascii_lowercase) for _ in range(10)
|
||||
)
|
||||
|
||||
client.execute(f"DROP DATABASE IF EXISTS {database_name}", settings=settings)
|
||||
client.execute(f"CREATE DATABASE {database_name}", settings=settings)
|
||||
|
||||
client = Client(host="localhost", port=9000, database=database_name)
|
||||
|
||||
summary = {"success": 0, "total": 0, "results": {}}
|
||||
|
||||
log_file = open("test.log", "w")
|
||||
report_html_file = open("report.html", "w")
|
||||
|
||||
with open("features.yml", "r") as file:
|
||||
yaml_content = yaml.safe_load(file)
|
||||
|
||||
for category in yaml_content:
|
||||
log_file.write(category.capitalize() + " features:\n")
|
||||
summary["results"][category] = {"success": 0, "total": 0, "results": {}}
|
||||
|
||||
for test in yaml_content[category]:
|
||||
log_file.write(test + ": " + yaml_content[category][test] + "\n")
|
||||
summary["results"][category]["results"][test] = {
|
||||
"success": 0,
|
||||
"total": 0,
|
||||
"description": yaml_content[category][test],
|
||||
}
|
||||
|
||||
test_path = test[0] + "/" + test + ".tests.yml"
|
||||
if os.path.exists(test_path):
|
||||
with open(test_path, "r") as test_file:
|
||||
test_yaml_content = yaml.load_all(test_file, Loader=yaml.FullLoader)
|
||||
|
||||
for test_case in test_yaml_content:
|
||||
queries = test_case["sql"]
|
||||
if not isinstance(queries, list):
|
||||
queries = [queries]
|
||||
|
||||
for query in queries:
|
||||
# Example: E011-01
|
||||
test_group = ""
|
||||
if "-" in test:
|
||||
test_group = test.split("-", 1)[0]
|
||||
summary["results"][category]["results"][test_group][
|
||||
"total"
|
||||
] += 1
|
||||
summary["results"][category]["results"][test]["total"] += 1
|
||||
summary["results"][category]["total"] += 1
|
||||
summary["total"] += 1
|
||||
|
||||
log_file.write(query + "\n")
|
||||
|
||||
try:
|
||||
result = client.execute(query, settings=settings)
|
||||
log_file.write(str(result) + "\n")
|
||||
|
||||
if test_group:
|
||||
summary["results"][category]["results"][test_group][
|
||||
"success"
|
||||
] += 1
|
||||
summary["results"][category]["results"][test][
|
||||
"success"
|
||||
] += 1
|
||||
summary["results"][category]["success"] += 1
|
||||
summary["success"] += 1
|
||||
|
||||
except Exception as e:
|
||||
log_file.write(f"Error occurred: {str(e)}\n")
|
||||
|
||||
client.execute(f"DROP DATABASE {database_name}", settings=settings)
|
||||
|
||||
|
||||
def enable_color(ratio):
|
||||
if ratio == 0:
|
||||
return "<b style='color: red;'>"
|
||||
elif ratio < 0.5:
|
||||
return "<b style='color: orange;'>"
|
||||
elif ratio < 1:
|
||||
return "<b style='color: gray;'>"
|
||||
else:
|
||||
return "<b style='color: green;'>"
|
||||
|
||||
|
||||
reset_color = "</b>"
|
||||
|
||||
|
||||
def print_ratio(indent, name, success, total, description):
|
||||
report_html_file.write(
|
||||
"{}{}: {}{} / {} ({:.1%}){}{}\n".format(
|
||||
" " * indent,
|
||||
name.capitalize(),
|
||||
enable_color(success / total),
|
||||
success,
|
||||
total,
|
||||
success / total,
|
||||
reset_color,
|
||||
f" - " + html.escape(description) if description else "",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
report_html_file.write(
|
||||
"<html><body><pre style='font-size: 16pt; padding: 1em; line-height: 1.25;'>\n"
|
||||
)
|
||||
|
||||
print_ratio(0, "Total", summary["success"], summary["total"], "")
|
||||
|
||||
for category in summary["results"]:
|
||||
cat_summary = summary["results"][category]
|
||||
|
||||
if cat_summary["total"] == 0:
|
||||
continue
|
||||
|
||||
print_ratio(2, category, cat_summary["success"], cat_summary["total"], "")
|
||||
|
||||
for test in summary["results"][category]["results"]:
|
||||
test_summary = summary["results"][category]["results"][test]
|
||||
|
||||
if test_summary["total"] == 0:
|
||||
continue
|
||||
|
||||
print_ratio(
|
||||
6 if "-" in test else 4,
|
||||
test,
|
||||
test_summary["success"],
|
||||
test_summary["total"],
|
||||
test_summary["description"],
|
||||
)
|
||||
|
||||
report_html_file.write("</pre></body></html>\n")
|
@ -20,6 +20,22 @@ ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
./setup_minio.sh stateful
|
||||
|
||||
# Setup a cluster for logs export to ClickHouse Cloud
|
||||
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
echo "
|
||||
remote_servers:
|
||||
system_logs_export:
|
||||
shard:
|
||||
replica:
|
||||
secure: 1
|
||||
user: ci
|
||||
host: '${CLICKHOUSE_CI_LOGS_HOST}'
|
||||
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
|
||||
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
fi
|
||||
|
||||
function start()
|
||||
{
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
@ -65,6 +81,22 @@ function start()
|
||||
}
|
||||
|
||||
start
|
||||
|
||||
# Initialize export of system logs to ClickHouse Cloud
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
|
||||
# TODO: Check if the password will appear in the logs.
|
||||
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
|
||||
|
||||
./setup_export_logs.sh
|
||||
|
||||
# Unset variables after use
|
||||
export CONNECTION_PARAMETERS=''
|
||||
export CLICKHOUSE_CI_LOGS_HOST=''
|
||||
export CLICKHOUSE_CI_LOGS_PASSWORD=''
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
|
@ -87,4 +87,5 @@ RUN npm install -g azurite \
|
||||
COPY run.sh /
|
||||
COPY setup_minio.sh /
|
||||
COPY setup_hdfs_minicluster.sh /
|
||||
|
||||
CMD ["/bin/bash", "/run.sh"]
|
||||
|
@ -36,6 +36,22 @@ fi
|
||||
./setup_minio.sh stateless
|
||||
./setup_hdfs_minicluster.sh
|
||||
|
||||
# Setup a cluster for logs export to ClickHouse Cloud
|
||||
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
echo "
|
||||
remote_servers:
|
||||
system_logs_export:
|
||||
shard:
|
||||
replica:
|
||||
secure: 1
|
||||
user: ci
|
||||
host: '${CLICKHOUSE_CI_LOGS_HOST}'
|
||||
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
|
||||
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
fi
|
||||
|
||||
# For flaky check we also enable thread fuzzer
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
|
||||
@ -92,7 +108,28 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
|
||||
# Wait for the server to start, but not for too long.
|
||||
for _ in {1..100}
|
||||
do
|
||||
clickhouse-client --query "SELECT 1" && break
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Initialize export of system logs to ClickHouse Cloud
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
|
||||
# TODO: Check if the password will appear in the logs.
|
||||
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
|
||||
|
||||
./setup_export_logs.sh
|
||||
|
||||
# Unset variables after use
|
||||
export CONNECTION_PARAMETERS=''
|
||||
export CLICKHOUSE_CI_LOGS_HOST=''
|
||||
export CLICKHOUSE_CI_LOGS_PASSWORD=''
|
||||
fi
|
||||
|
||||
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
|
||||
|
||||
|
@ -51,8 +51,39 @@ configure
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
./setup_minio.sh stateless # to have a proper environment
|
||||
|
||||
# Setup a cluster for logs export to ClickHouse Cloud
|
||||
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
echo "
|
||||
remote_servers:
|
||||
system_logs_export:
|
||||
shard:
|
||||
replica:
|
||||
secure: 1
|
||||
user: ci
|
||||
host: '${CLICKHOUSE_CI_LOGS_HOST}'
|
||||
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
|
||||
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
fi
|
||||
|
||||
start
|
||||
|
||||
# Initialize export of system logs to ClickHouse Cloud
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
|
||||
# TODO: Check if the password will appear in the logs.
|
||||
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
|
||||
|
||||
./setup_export_logs.sh
|
||||
|
||||
# Unset variables after use
|
||||
export CONNECTION_PARAMETERS=''
|
||||
export CLICKHOUSE_CI_LOGS_HOST=''
|
||||
export CLICKHOUSE_CI_LOGS_PASSWORD=''
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
||||
chmod 777 -R /var/lib/clickhouse
|
||||
@ -180,6 +211,11 @@ mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/cli
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
|
||||
sudo cat /etc/clickhouse-server/config.d/logger_trace.xml \
|
||||
| sed "s|<level>trace</level>|<level>test</level>|" \
|
||||
> /etc/clickhouse-server/config.d/logger_trace.xml.tmp
|
||||
mv /etc/clickhouse-server/config.d/logger_trace.xml.tmp /etc/clickhouse-server/config.d/logger_trace.xml
|
||||
|
||||
start
|
||||
|
||||
stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t clickhouse/style-test .
|
||||
FROM ubuntu:20.04
|
||||
FROM ubuntu:22.04
|
||||
ARG ACT_VERSION=0.2.33
|
||||
ARG ACTIONLINT_VERSION=1.6.22
|
||||
|
||||
|
@ -36,6 +36,9 @@ then
|
||||
elif [ "${ARCH}" = "riscv64" ]
|
||||
then
|
||||
DIR="riscv64"
|
||||
elif [ "${ARCH}" = "s390x" ]
|
||||
then
|
||||
DIR="s390x"
|
||||
fi
|
||||
elif [ "${OS}" = "FreeBSD" ]
|
||||
then
|
||||
|
45
docs/changelogs/v23.3.9.55-lts.md
Normal file
45
docs/changelogs/v23.3.9.55-lts.md
Normal file
@ -0,0 +1,45 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.3.9.55-lts (b9c5c8622d3) FIXME as compared to v23.3.8.21-lts (1675f2264f3)
|
||||
|
||||
#### Performance Improvement
|
||||
* Backported in [#52213](https://github.com/ClickHouse/ClickHouse/issues/52213): Do not store blocks in `ANY` hash join if nothing is inserted. [#48633](https://github.com/ClickHouse/ClickHouse/pull/48633) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#52826](https://github.com/ClickHouse/ClickHouse/issues/52826): Fix incorrect projection analysis which invalidates primary keys. This issue only exists when `query_plan_optimize_primary_key = 1, query_plan_optimize_projection = 1` . This fixes [#48823](https://github.com/ClickHouse/ClickHouse/issues/48823) . This fixes [#51173](https://github.com/ClickHouse/ClickHouse/issues/51173) . [#52308](https://github.com/ClickHouse/ClickHouse/pull/52308) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#53019](https://github.com/ClickHouse/ClickHouse/issues/53019): Packing inline cache into docker images sometimes causes strange special effects. Since we don't use it at all, it's good to go. [#53008](https://github.com/ClickHouse/ClickHouse/pull/53008) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#53288](https://github.com/ClickHouse/ClickHouse/issues/53288): The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud., the second attempt after [#53100](https://github.com/ClickHouse/ClickHouse/issues/53100). [#53213](https://github.com/ClickHouse/ClickHouse/pull/53213) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#53461](https://github.com/ClickHouse/ClickHouse/issues/53461): Preserve environment parameters in `clickhouse start` command. Fixes [#51962](https://github.com/ClickHouse/ClickHouse/issues/51962). [#53418](https://github.com/ClickHouse/ClickHouse/pull/53418) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix optimization to move functions before sorting. [#51481](https://github.com/ClickHouse/ClickHouse/pull/51481) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix Block structure mismatch in Pipe::unitePipes for FINAL [#51492](https://github.com/ClickHouse/ClickHouse/pull/51492) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix binary arithmetic for Nullable(IPv4) [#51642](https://github.com/ClickHouse/ClickHouse/pull/51642) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Support IPv4 and IPv6 as dictionary attributes [#51756](https://github.com/ClickHouse/ClickHouse/pull/51756) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix ORDER BY tuple of WINDOW functions [#52145](https://github.com/ClickHouse/ClickHouse/pull/52145) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Disable expression templates for time intervals [#52335](https://github.com/ClickHouse/ClickHouse/pull/52335) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix `countSubstrings()` hang with empty needle and a column haystack [#52409](https://github.com/ClickHouse/ClickHouse/pull/52409) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Fixed inserting into Buffer engine [#52440](https://github.com/ClickHouse/ClickHouse/pull/52440) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* The implementation of AnyHash was non-conformant. [#52448](https://github.com/ClickHouse/ClickHouse/pull/52448) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* init and destroy ares channel on demand.. [#52634](https://github.com/ClickHouse/ClickHouse/pull/52634) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Fix crash in function `tuple` with one sparse column argument [#52659](https://github.com/ClickHouse/ClickHouse/pull/52659) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* clickhouse-keeper: fix implementation of server with poll() [#52833](https://github.com/ClickHouse/ClickHouse/pull/52833) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||
* Fix password leak in show create mysql table [#52962](https://github.com/ClickHouse/ClickHouse/pull/52962) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fix incorrect normal projection AST format [#53347](https://github.com/ClickHouse/ClickHouse/pull/53347) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix loading lazy database during system.table select query [#53372](https://github.com/ClickHouse/ClickHouse/pull/53372) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix wrong columns order for queries with parallel FINAL. [#53489](https://github.com/ClickHouse/ClickHouse/pull/53489) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix: interpolate expression takes source column instead of same name aliased from select expression. [#53572](https://github.com/ClickHouse/ClickHouse/pull/53572) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Fix crash in comparison functions due to incorrect query analysis [#52172](https://github.com/ClickHouse/ClickHouse/pull/52172) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix deadlocks in StorageTableFunctionProxy [#52626](https://github.com/ClickHouse/ClickHouse/pull/52626) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_reverse_dns_query/test.py [#53195](https://github.com/ClickHouse/ClickHouse/pull/53195) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_host_regexp_multiple_ptr_records/test.py [#53211](https://github.com/ClickHouse/ClickHouse/pull/53211) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
17
docs/changelogs/v23.7.4.5-stable.md
Normal file
17
docs/changelogs/v23.7.4.5-stable.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.7.4.5-stable (bd2fcd44553) FIXME as compared to v23.7.3.14-stable (bd9a510550c)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Disable the new parquet encoder [#53130](https://github.com/ClickHouse/ClickHouse/pull/53130) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Revert changes in `ZstdDeflatingAppendableWriteBuffer` [#53111](https://github.com/ClickHouse/ClickHouse/pull/53111) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
@ -42,20 +42,20 @@ sudo apt-get install git cmake ccache python3 ninja-build nasm yasm gawk lsb-rel
|
||||
|
||||
### Install and Use the Clang compiler
|
||||
|
||||
On Ubuntu/Debian you can use LLVM's automatic installation script, see [here](https://apt.llvm.org/).
|
||||
On Ubuntu/Debian, you can use LLVM's automatic installation script; see [here](https://apt.llvm.org/).
|
||||
|
||||
``` bash
|
||||
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
```
|
||||
|
||||
Note: in case of troubles, you can also use this:
|
||||
Note: in case of trouble, you can also use this:
|
||||
|
||||
```bash
|
||||
sudo apt-get install software-properties-common
|
||||
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
```
|
||||
|
||||
For other Linux distribution - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
|
||||
For other Linux distributions - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
|
||||
|
||||
As of April 2023, clang-16 or higher will work.
|
||||
GCC as a compiler is not supported.
|
||||
@ -92,8 +92,12 @@ cmake -S . -B build
|
||||
cmake --build build # or: `cd build; ninja`
|
||||
```
|
||||
|
||||
:::tip
|
||||
In case `cmake` isn't able to detect the number of available logical cores, the build will be done by one thread. To overcome this, you can tweak `cmake` to use a specific number of threads with `-j` flag, for example, `cmake --build build -j 16`. Alternatively, you can generate build files with a specific number of jobs in advance to avoid always setting the flag: `cmake -DPARALLEL_COMPILE_JOBS=16 -S . -B build`, where `16` is the desired number of threads.
|
||||
:::
|
||||
|
||||
To create an executable, run `cmake --build build --target clickhouse` (or: `cd build; ninja clickhouse`).
|
||||
This will create executable `build/programs/clickhouse` which can be used with `client` or `server` arguments.
|
||||
This will create an executable `build/programs/clickhouse`, which can be used with `client` or `server` arguments.
|
||||
|
||||
## Building on Any Linux {#how-to-build-clickhouse-on-any-linux}
|
||||
|
||||
@ -107,7 +111,7 @@ The build requires the following components:
|
||||
- Yasm
|
||||
- Gawk
|
||||
|
||||
If all the components are installed, you may build in the same way as the steps above.
|
||||
If all the components are installed, you may build it in the same way as the steps above.
|
||||
|
||||
Example for OpenSUSE Tumbleweed:
|
||||
|
||||
@ -123,7 +127,7 @@ Example for Fedora Rawhide:
|
||||
|
||||
``` bash
|
||||
sudo yum update
|
||||
sudo yum --nogpg install git cmake make clang python3 ccache nasm yasm gawk
|
||||
sudo yum --nogpg install git cmake make clang python3 ccache lld nasm yasm gawk
|
||||
git clone --recursive https://github.com/ClickHouse/ClickHouse.git
|
||||
mkdir build
|
||||
cmake -S . -B build
|
||||
|
@ -190,7 +190,7 @@ These are the schema conversion manipulations you can do with table overrides fo
|
||||
* Modify [column TTL](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-column-ttl).
|
||||
* Modify [column compression codec](/docs/en/sql-reference/statements/create/table.md/#codecs).
|
||||
* Add [ALIAS columns](/docs/en/sql-reference/statements/create/table.md/#alias).
|
||||
* Add [skipping indexes](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-data_skipping-indexes)
|
||||
* Add [skipping indexes](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-data_skipping-indexes). Note that you need to enable `use_skip_indexes_if_final` setting to make them work (MaterializedMySQL is using `SELECT ... FINAL` by default)
|
||||
* Add [projections](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#projections). Note that projection optimizations are
|
||||
disabled when using `SELECT ... FINAL` (which MaterializedMySQL does by default), so their utility is limited here.
|
||||
`INDEX ... TYPE hypothesis` as [described in the v21.12 blog post]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/)
|
||||
|
@ -21,7 +21,7 @@ CREATE TABLE azure_blob_storage_table (name String, value UInt32)
|
||||
|
||||
- `connection_string|storage_account_url` — connection_string includes account name & key ([Create connection string](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json#configure-a-connection-string-for-an-azure-storage-account)) or you could also provide the storage account url here and account name & account key as separate parameters (see parameters account_name & account_key)
|
||||
- `container_name` - Container name
|
||||
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
|
||||
- `account_name` - if storage_account_url is used, then account name can be specified here
|
||||
- `account_key` - if storage_account_url is used, then account key can be specified here
|
||||
- `format` — The [format](/docs/en/interfaces/formats.md) of the file.
|
||||
|
@ -173,6 +173,7 @@ Similar to GraphiteMergeTree, the Kafka engine supports extended configuration u
|
||||
<!-- Global configuration options for all tables of Kafka engine type -->
|
||||
<debug>cgrp</debug>
|
||||
<auto_offset_reset>smallest</auto_offset_reset>
|
||||
<statistics_interval_ms>600</statistics_interval_ms>
|
||||
|
||||
<!-- Configuration specific to topics "logs" and "stats" -->
|
||||
|
||||
@ -260,3 +261,4 @@ The number of rows in one Kafka message depends on whether the format is row-bas
|
||||
|
||||
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||
- [background_message_broker_schedule_pool_size](../../../operations/server-configuration-parameters/settings.md#background_message_broker_schedule_pool_size)
|
||||
- [system.kafka_consumers](../../../operations/system-tables/kafka_consumers.md)
|
||||
|
@ -13,7 +13,7 @@ If more than one table is required, it is highly recommended to use the [Materia
|
||||
|
||||
``` sql
|
||||
CREATE TABLE postgresql_db.postgresql_replica (key UInt64, value UInt64)
|
||||
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password')
|
||||
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_table', 'postgres_user', 'postgres_password')
|
||||
PRIMARY KEY key;
|
||||
```
|
||||
|
||||
|
@ -37,7 +37,7 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
|
||||
### Engine parameters
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||
@ -164,6 +164,7 @@ For more information about virtual columns see [here](../../../engines/table-eng
|
||||
`path` argument can specify multiple files using bash-like wildcards. For being processed file should exist and match to the whole path pattern. Listing of files is determined during `SELECT` (not at `CREATE` moment).
|
||||
|
||||
- `*` — Substitutes any number of any characters except `/` including empty string.
|
||||
- `**` — Substitutes any number of any character include `/` including empty string.
|
||||
- `?` — Substitutes any single character.
|
||||
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`.
|
||||
- `{N..M}` — Substitutes any number in range from N to M including both borders. N and M can have leading zeroes e.g. `000..078`.
|
||||
|
@ -27,7 +27,7 @@ CREATE TABLE s3_queue_engine_table (name String, value UInt32)
|
||||
|
||||
**Engine parameters**
|
||||
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
|
||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||
- `format` — The [format](../../../interfaces/formats.md#formats) of the file.
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||
@ -213,6 +213,7 @@ For more information about virtual columns see [here](../../../engines/table-eng
|
||||
`path` argument can specify multiple files using bash-like wildcards. For being processed file should exist and match to the whole path pattern. Listing of files is determined during `SELECT` (not at `CREATE` moment).
|
||||
|
||||
- `*` — Substitutes any number of any characters except `/` including empty string.
|
||||
- `**` — Substitutes any number of any characters include `/` including empty string.
|
||||
- `?` — Substitutes any single character.
|
||||
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`.
|
||||
- `{N..M}` — Substitutes any number in range from N to M including both borders. N and M can have leading zeroes e.g. `000..078`.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Approximate Nearest Neighbor Search Indexes [experimental] {#table_engines-ANNIndex}
|
||||
# Approximate Nearest Neighbor Search Indexes [experimental]
|
||||
|
||||
Nearest neighborhood search is the problem of finding the M closest points for a given point in an N-dimensional vector space. The most
|
||||
straightforward approach to solve this problem is a brute force search where the distance between all points in the vector space and the
|
||||
@ -17,7 +17,7 @@ In terms of SQL, the nearest neighborhood problem can be expressed as follows:
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM table
|
||||
FROM table_with_ann_index
|
||||
ORDER BY Distance(vectors, Point)
|
||||
LIMIT N
|
||||
```
|
||||
@ -32,7 +32,7 @@ An alternative formulation of the nearest neighborhood search problem looks as f
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM table
|
||||
FROM table_with_ann_index
|
||||
WHERE Distance(vectors, Point) < MaxDistance
|
||||
LIMIT N
|
||||
```
|
||||
@ -45,12 +45,12 @@ With brute force search, both queries are expensive (linear in the number of poi
|
||||
`Point` must be computed. To speed this process up, Approximate Nearest Neighbor Search Indexes (ANN indexes) store a compact representation
|
||||
of the search space (using clustering, search trees, etc.) which allows to compute an approximate answer much quicker (in sub-linear time).
|
||||
|
||||
# Creating and Using ANN Indexes
|
||||
# Creating and Using ANN Indexes {#creating_using_ann_indexes}
|
||||
|
||||
Syntax to create an ANN index over an [Array](../../../sql-reference/data-types/array.md) column:
|
||||
|
||||
```sql
|
||||
CREATE TABLE table
|
||||
CREATE TABLE table_with_ann_index
|
||||
(
|
||||
`id` Int64,
|
||||
`vectors` Array(Float32),
|
||||
@ -63,7 +63,7 @@ ORDER BY id;
|
||||
Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column:
|
||||
|
||||
```sql
|
||||
CREATE TABLE table
|
||||
CREATE TABLE table_with_ann_index
|
||||
(
|
||||
`id` Int64,
|
||||
`vectors` Tuple(Float32[, Float32[, ...]]),
|
||||
@ -83,7 +83,7 @@ ANN indexes support two types of queries:
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM table
|
||||
FROM table_with_ann_index
|
||||
[WHERE ...]
|
||||
ORDER BY Distance(vectors, Point)
|
||||
LIMIT N
|
||||
@ -93,7 +93,7 @@ ANN indexes support two types of queries:
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM table
|
||||
FROM table_with_ann_index
|
||||
WHERE Distance(vectors, Point) < MaxDistance
|
||||
LIMIT N
|
||||
```
|
||||
@ -103,7 +103,7 @@ To avoid writing out large vectors, you can use [query
|
||||
parameters](/docs/en/interfaces/cli.md#queries-with-parameters-cli-queries-with-parameters), e.g.
|
||||
|
||||
```bash
|
||||
clickhouse-client --param_vec='hello' --query="SELECT * FROM table WHERE L2Distance(vectors, {vec: Array(Float32)}) < 1.0"
|
||||
clickhouse-client --param_vec='hello' --query="SELECT * FROM table_with_ann_index WHERE L2Distance(vectors, {vec: Array(Float32)}) < 1.0"
|
||||
```
|
||||
:::
|
||||
|
||||
@ -138,17 +138,19 @@ back to a smaller `GRANULARITY` values only in case of problems like excessive m
|
||||
was specified for ANN indexes, the default value is 100 million.
|
||||
|
||||
|
||||
# Available ANN Indexes
|
||||
# Available ANN Indexes {#available_ann_indexes}
|
||||
|
||||
- [Annoy](/docs/en/engines/table-engines/mergetree-family/annindexes.md#annoy-annoy)
|
||||
|
||||
- [USearch](/docs/en/engines/table-engines/mergetree-family/annindexes.md#usearch-usearch)
|
||||
|
||||
## Annoy {#annoy}
|
||||
|
||||
Annoy indexes are currently experimental, to use them you first need to `SET allow_experimental_annoy_index = 1`. They are also currently
|
||||
disabled on ARM due to memory safety problems with the algorithm.
|
||||
|
||||
This type of ANN index implements [the Annoy algorithm](https://github.com/spotify/annoy) which is based on a recursive division of the
|
||||
space in random linear surfaces (lines in 2D, planes in 3D etc.).
|
||||
This type of ANN index is based on the [Annoy library](https://github.com/spotify/annoy) which recursively divides the space into random
|
||||
linear surfaces (lines in 2D, planes in 3D etc.).
|
||||
|
||||
<div class='vimeo-container'>
|
||||
<iframe src="//www.youtube.com/embed/QkCCyLW0ehU"
|
||||
@ -165,7 +167,7 @@ space in random linear surfaces (lines in 2D, planes in 3D etc.).
|
||||
Syntax to create an Annoy index over an [Array](../../../sql-reference/data-types/array.md) column:
|
||||
|
||||
```sql
|
||||
CREATE TABLE table
|
||||
CREATE TABLE table_with_annoy_index
|
||||
(
|
||||
id Int64,
|
||||
vectors Array(Float32),
|
||||
@ -178,7 +180,7 @@ ORDER BY id;
|
||||
Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column:
|
||||
|
||||
```sql
|
||||
CREATE TABLE table
|
||||
CREATE TABLE table_with_annoy_index
|
||||
(
|
||||
id Int64,
|
||||
vectors Tuple(Float32[, Float32[, ...]]),
|
||||
@ -188,23 +190,17 @@ ENGINE = MergeTree
|
||||
ORDER BY id;
|
||||
```
|
||||
|
||||
Annoy currently supports `L2Distance` and `cosineDistance` as distance function `Distance`. If no distance function was specified during
|
||||
index creation, `L2Distance` is used as default. Parameter `NumTrees` is the number of trees which the algorithm creates (default if not
|
||||
specified: 100). Higher values of `NumTree` mean more accurate search results but slower index creation / query times (approximately
|
||||
linearly) as well as larger index sizes.
|
||||
Annoy currently supports two distance functions:
|
||||
- `L2Distance`, also called Euclidean distance, is the length of a line segment between two points in Euclidean space
|
||||
([Wikipedia](https://en.wikipedia.org/wiki/Euclidean_distance)).
|
||||
- `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors
|
||||
([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)).
|
||||
|
||||
`L2Distance` is also called Euclidean distance, the Euclidean distance between two points in Euclidean space is the length of a line segment between the two points.
|
||||
For example: If we have point P(p1,p2), Q(q1,q2), their distance will be d(p,q)
|
||||
![L2Distance](https://en.wikipedia.org/wiki/Euclidean_distance#/media/File:Euclidean_distance_2d.svg)
|
||||
For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no
|
||||
distance function was specified during index creation, `L2Distance` is used as default.
|
||||
|
||||
`cosineDistance` also called cosine similarity is a measure of similarity between two non-zero vectors defined in an inner product space. Cosine similarity is the cosine of the angle between the vectors; that is, it is the dot product of the vectors divided by the product of their lengths.
|
||||
![cosineDistance](https://www.tyrrell4innovation.ca/wp-content/uploads/2021/06/rsz_jenny_du_miword.png)
|
||||
|
||||
The Euclidean distance corresponds to the L2-norm of a difference between vectors. The cosine similarity is proportional to the dot product of two vectors and inversely proportional to the product of their magnitudes.
|
||||
![compare](https://www.researchgate.net/publication/320914786/figure/fig2/AS:558221849841664@1510101868614/The-difference-between-Euclidean-distance-and-cosine-similarity.png)
|
||||
In one sentence: cosine similarity care only about the angle between them, but do not care about the "distance" we normally think.
|
||||
![L2 distance](https://www.baeldung.com/wp-content/uploads/sites/4/2020/06/4-1.png)
|
||||
![cosineDistance](https://www.baeldung.com/wp-content/uploads/sites/4/2020/06/5.png)
|
||||
Parameter `NumTrees` is the number of trees which the algorithm creates (default if not specified: 100). Higher values of `NumTree` mean
|
||||
more accurate search results but slower index creation / query times (approximately linearly) as well as larger index sizes.
|
||||
|
||||
:::note
|
||||
Indexes over columns of type `Array` will generally work faster than indexes on `Tuple` columns. All arrays **must** have same length. Use
|
||||
@ -222,3 +218,60 @@ ORDER BY L2Distance(vectors, Point)
|
||||
LIMIT N
|
||||
SETTINGS annoy_index_search_k_nodes=100;
|
||||
```
|
||||
|
||||
## USearch {#usearch}
|
||||
|
||||
This type of ANN index is based on the [the USearch library](https://github.com/unum-cloud/usearch), which implements the [HNSW
|
||||
algorithm](https://arxiv.org/abs/1603.09320), i.e., builds a hierarchical graph where each point represents a vector and the edges represent
|
||||
similarity. Such hierarchical structures can be very efficient on large collections. They may often fetch 0.05% or less data from the
|
||||
overall dataset, while still providing 99% recall. This is especially useful when working with high-dimensional vectors,
|
||||
that are expensive to load and compare. The library also has several hardware-specific SIMD optimizations to accelerate further
|
||||
distance computations on modern Arm (NEON and SVE) and x86 (AVX2 and AVX-512) CPUs and OS-specific optimizations to allow efficient
|
||||
navigation around immutable persistent files, without loading them into RAM.
|
||||
|
||||
<div class='vimeo-container'>
|
||||
<iframe src="//www.youtube.com/embed/UMrhB3icP9w"
|
||||
width="640"
|
||||
height="360"
|
||||
frameborder="0"
|
||||
allow="autoplay;
|
||||
fullscreen;
|
||||
picture-in-picture"
|
||||
allowfullscreen>
|
||||
</iframe>
|
||||
</div>
|
||||
|
||||
Syntax to create an USearch index over an [Array](../../../sql-reference/data-types/array.md) column:
|
||||
|
||||
```sql
|
||||
CREATE TABLE table_with_usearch_index
|
||||
(
|
||||
id Int64,
|
||||
vectors Array(Float32),
|
||||
INDEX [ann_index_name] vectors TYPE usearch([Distance]) [GRANULARITY N]
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY id;
|
||||
```
|
||||
|
||||
Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column:
|
||||
|
||||
```sql
|
||||
CREATE TABLE table_with_usearch_index
|
||||
(
|
||||
id Int64,
|
||||
vectors Tuple(Float32[, Float32[, ...]]),
|
||||
INDEX [ann_index_name] vectors TYPE usearch([Distance]) [GRANULARITY N]
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY id;
|
||||
```
|
||||
|
||||
USearch currently supports two distance functions:
|
||||
- `L2Distance`, also called Euclidean distance, is the length of a line segment between two points in Euclidean space
|
||||
([Wikipedia](https://en.wikipedia.org/wiki/Euclidean_distance)).
|
||||
- `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors
|
||||
([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)).
|
||||
|
||||
For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no
|
||||
distance function was specified during index creation, `L2Distance` is used as default.
|
||||
|
@ -323,9 +323,9 @@ clickhouse-client clickhouse://192.168.1.15,192.168.1.25
|
||||
`clickhouse-client` uses the first existing file of the following:
|
||||
|
||||
- Defined in the `--config-file` parameter.
|
||||
- `./clickhouse-client.xml`
|
||||
- `~/.clickhouse-client/config.xml`
|
||||
- `/etc/clickhouse-client/config.xml`
|
||||
- `./clickhouse-client.xml`, `.yaml`, `.yml`
|
||||
- `~/.clickhouse-client/config.xml`, `.yaml`, `.yml`
|
||||
- `/etc/clickhouse-client/config.xml`, `.yaml`, `.yml`
|
||||
|
||||
Example of a config file:
|
||||
|
||||
@ -342,6 +342,17 @@ Example of a config file:
|
||||
</config>
|
||||
```
|
||||
|
||||
Or the same config in a YAML format:
|
||||
|
||||
```yaml
|
||||
user: username
|
||||
password: 'password'
|
||||
secure: true
|
||||
openSSL:
|
||||
client:
|
||||
caConfig: '/etc/ssl/cert.pem'
|
||||
```
|
||||
|
||||
### Query ID Format {#query-id-format}
|
||||
|
||||
In interactive mode `clickhouse-client` shows query ID for every query. By default, the ID is formatted like this:
|
||||
|
@ -11,82 +11,83 @@ results of a `SELECT`, and to perform `INSERT`s into a file-backed table.
|
||||
The supported formats are:
|
||||
|
||||
| Format | Input | Output |
|
||||
|-------------------------------------------------------------------------------------------|------|--------|
|
||||
| [TabSeparated](#tabseparated) | ✔ | ✔ |
|
||||
| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ |
|
||||
| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ |
|
||||
| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ |
|
||||
| [TabSeparatedRawWithNames](#tabseparatedrawwithnames) | ✔ | ✔ |
|
||||
| [TabSeparatedRawWithNamesAndTypes](#tabseparatedrawwithnamesandtypes) | ✔ | ✔ |
|
||||
| [Template](#format-template) | ✔ | ✔ |
|
||||
| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ |
|
||||
| [CSV](#csv) | ✔ | ✔ |
|
||||
| [CSVWithNames](#csvwithnames) | ✔ | ✔ |
|
||||
| [CSVWithNamesAndTypes](#csvwithnamesandtypes) | ✔ | ✔ |
|
||||
| [CustomSeparated](#format-customseparated) | ✔ | ✔ |
|
||||
| [CustomSeparatedWithNames](#customseparatedwithnames) | ✔ | ✔ |
|
||||
| [CustomSeparatedWithNamesAndTypes](#customseparatedwithnamesandtypes) | ✔ | ✔ |
|
||||
| [SQLInsert](#sqlinsert) | ✗ | ✔ |
|
||||
| [Values](#data-format-values) | ✔ | ✔ |
|
||||
| [Vertical](#vertical) | ✗ | ✔ |
|
||||
| [JSON](#json) | ✔ | ✔ |
|
||||
| [JSONAsString](#jsonasstring) | ✔ | ✗ |
|
||||
| [JSONStrings](#jsonstrings) | ✔ | ✔ |
|
||||
| [JSONColumns](#jsoncolumns) | ✔ | ✔ |
|
||||
| [JSONColumnsWithMetadata](#jsoncolumnsmonoblock)) | ✔ | ✔ |
|
||||
| [JSONCompact](#jsoncompact) | ✔ | ✔ |
|
||||
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
|
||||
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
|
||||
| [JSONEachRow](#jsoneachrow) | ✔ | ✔ |
|
||||
| [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ |
|
||||
| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ |
|
||||
| [JSONStringsEachRow](#jsonstringseachrow) | ✔ | ✔ |
|
||||
| [JSONStringsEachRowWithProgress](#jsonstringseachrowwithprogress) | ✗ | ✔ |
|
||||
| [JSONCompactEachRow](#jsoncompacteachrow) | ✔ | ✔ |
|
||||
| [JSONCompactEachRowWithNames](#jsoncompacteachrowwithnames) | ✔ | ✔ |
|
||||
| [JSONCompactEachRowWithNamesAndTypes](#jsoncompacteachrowwithnamesandtypes) | ✔ | ✔ |
|
||||
| [JSONCompactStringsEachRow](#jsoncompactstringseachrow) | ✔ | ✔ |
|
||||
| [JSONCompactStringsEachRowWithNames](#jsoncompactstringseachrowwithnames) | ✔ | ✔ |
|
||||
| [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ |
|
||||
| [JSONObjectEachRow](#jsonobjecteachrow) | ✔ | ✔ |
|
||||
| [BSONEachRow](#bsoneachrow) | ✔ | ✔ |
|
||||
| [TSKV](#tskv) | ✔ | ✔ |
|
||||
| [Pretty](#pretty) | ✗ | ✔ |
|
||||
| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ |
|
||||
| [PrettyMonoBlock](#prettymonoblock) | ✗ | ✔ |
|
||||
| [PrettyNoEscapesMonoBlock](#prettynoescapesmonoblock) | ✗ | ✔ |
|
||||
| [PrettyCompact](#prettycompact) | ✗ | ✔ |
|
||||
| [PrettyCompactNoEscapes](#prettycompactnoescapes) | ✗ | ✔ |
|
||||
| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ |
|
||||
| [PrettyCompactNoEscapesMonoBlock](#prettycompactnoescapesmonoblock) | ✗ | ✔ |
|
||||
| [PrettySpace](#prettyspace) | ✗ | ✔ |
|
||||
| [PrettySpaceNoEscapes](#prettyspacenoescapes) | ✗ | ✔ |
|
||||
| [PrettySpaceMonoBlock](#prettyspacemonoblock) | ✗ | ✔ |
|
||||
| [PrettySpaceNoEscapesMonoBlock](#prettyspacenoescapesmonoblock) | ✗ | ✔ |
|
||||
| [Prometheus](#prometheus) | ✗ | ✔ |
|
||||
| [Protobuf](#protobuf) | ✔ | ✔ |
|
||||
| [ProtobufSingle](#protobufsingle) | ✔ | ✔ |
|
||||
| [Avro](#data-format-avro) | ✔ | ✔ |
|
||||
| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ |
|
||||
| [Parquet](#data-format-parquet) | ✔ | ✔ |
|
||||
| [ParquetMetadata](#data-format-parquet-metadata) | ✔ | ✗ |
|
||||
| [Arrow](#data-format-arrow) | ✔ | ✔ |
|
||||
| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ |
|
||||
| [ORC](#data-format-orc) | ✔ | ✔ |
|
||||
| [RowBinary](#rowbinary) | ✔ | ✔ |
|
||||
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✔ |
|
||||
| [Native](#native) | ✔ | ✔ |
|
||||
| [Null](#null) | ✗ | ✔ |
|
||||
| [XML](#xml) | ✗ | ✔ |
|
||||
| [CapnProto](#capnproto) | ✔ | ✔ |
|
||||
| [LineAsString](#lineasstring) | ✔ | ✔ |
|
||||
| [Regexp](#data-format-regexp) | ✔ | ✗ |
|
||||
| [RawBLOB](#rawblob) | ✔ | ✔ |
|
||||
| [MsgPack](#msgpack) | ✔ | ✔ |
|
||||
| [MySQLDump](#mysqldump) | ✔ | ✗ |
|
||||
| [Markdown](#markdown) | ✗ | ✔ |
|
||||
|-------------------------------------------------------------------------------------------|------|-------|
|
||||
| [TabSeparated](#tabseparated) | ✔ | ✔ |
|
||||
| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ |
|
||||
| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ |
|
||||
| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ |
|
||||
| [TabSeparatedRawWithNames](#tabseparatedrawwithnames) | ✔ | ✔ |
|
||||
| [TabSeparatedRawWithNamesAndTypes](#tabseparatedrawwithnamesandtypes) | ✔ | ✔ |
|
||||
| [Template](#format-template) | ✔ | ✔ |
|
||||
| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ |
|
||||
| [CSV](#csv) | ✔ | ✔ |
|
||||
| [CSVWithNames](#csvwithnames) | ✔ | ✔ |
|
||||
| [CSVWithNamesAndTypes](#csvwithnamesandtypes) | ✔ | ✔ |
|
||||
| [CustomSeparated](#format-customseparated) | ✔ | ✔ |
|
||||
| [CustomSeparatedWithNames](#customseparatedwithnames) | ✔ | ✔ |
|
||||
| [CustomSeparatedWithNamesAndTypes](#customseparatedwithnamesandtypes) | ✔ | ✔ |
|
||||
| [SQLInsert](#sqlinsert) | ✗ | ✔ |
|
||||
| [Values](#data-format-values) | ✔ | ✔ |
|
||||
| [Vertical](#vertical) | ✗ | ✔ |
|
||||
| [JSON](#json) | ✔ | ✔ |
|
||||
| [JSONAsString](#jsonasstring) | ✔ | ✗ |
|
||||
| [JSONStrings](#jsonstrings) | ✔ | ✔ |
|
||||
| [JSONColumns](#jsoncolumns) | ✔ | ✔ |
|
||||
| [JSONColumnsWithMetadata](#jsoncolumnsmonoblock)) | ✔ | ✔ |
|
||||
| [JSONCompact](#jsoncompact) | ✔ | ✔ |
|
||||
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
|
||||
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
|
||||
| [JSONEachRow](#jsoneachrow) | ✔ | ✔ |
|
||||
| [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ |
|
||||
| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ |
|
||||
| [JSONStringsEachRow](#jsonstringseachrow) | ✔ | ✔ |
|
||||
| [JSONStringsEachRowWithProgress](#jsonstringseachrowwithprogress) | ✗ | ✔ |
|
||||
| [JSONCompactEachRow](#jsoncompacteachrow) | ✔ | ✔ |
|
||||
| [JSONCompactEachRowWithNames](#jsoncompacteachrowwithnames) | ✔ | ✔ |
|
||||
| [JSONCompactEachRowWithNamesAndTypes](#jsoncompacteachrowwithnamesandtypes) | ✔ | ✔ |
|
||||
| [JSONCompactStringsEachRow](#jsoncompactstringseachrow) | ✔ | ✔ |
|
||||
| [JSONCompactStringsEachRowWithNames](#jsoncompactstringseachrowwithnames) | ✔ | ✔ |
|
||||
| [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ |
|
||||
| [JSONObjectEachRow](#jsonobjecteachrow) | ✔ | ✔ |
|
||||
| [BSONEachRow](#bsoneachrow) | ✔ | ✔ |
|
||||
| [TSKV](#tskv) | ✔ | ✔ |
|
||||
| [Pretty](#pretty) | ✗ | ✔ |
|
||||
| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ |
|
||||
| [PrettyMonoBlock](#prettymonoblock) | ✗ | ✔ |
|
||||
| [PrettyNoEscapesMonoBlock](#prettynoescapesmonoblock) | ✗ | ✔ |
|
||||
| [PrettyCompact](#prettycompact) | ✗ | ✔ |
|
||||
| [PrettyCompactNoEscapes](#prettycompactnoescapes) | ✗ | ✔ |
|
||||
| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ |
|
||||
| [PrettyCompactNoEscapesMonoBlock](#prettycompactnoescapesmonoblock) | ✗ | ✔ |
|
||||
| [PrettySpace](#prettyspace) | ✗ | ✔ |
|
||||
| [PrettySpaceNoEscapes](#prettyspacenoescapes) | ✗ | ✔ |
|
||||
| [PrettySpaceMonoBlock](#prettyspacemonoblock) | ✗ | ✔ |
|
||||
| [PrettySpaceNoEscapesMonoBlock](#prettyspacenoescapesmonoblock) | ✗ | ✔ |
|
||||
| [Prometheus](#prometheus) | ✗ | ✔ |
|
||||
| [Protobuf](#protobuf) | ✔ | ✔ |
|
||||
| [ProtobufSingle](#protobufsingle) | ✔ | ✔ |
|
||||
| [Avro](#data-format-avro) | ✔ | ✔ |
|
||||
| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ |
|
||||
| [Parquet](#data-format-parquet) | ✔ | ✔ |
|
||||
| [ParquetMetadata](#data-format-parquet-metadata) | ✔ | ✗ |
|
||||
| [Arrow](#data-format-arrow) | ✔ | ✔ |
|
||||
| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ |
|
||||
| [ORC](#data-format-orc) | ✔ | ✔ |
|
||||
| [One](#data-format-one) | ✔ | ✗ |
|
||||
| [RowBinary](#rowbinary) | ✔ | ✔ |
|
||||
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
|
||||
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✔ |
|
||||
| [Native](#native) | ✔ | ✔ |
|
||||
| [Null](#null) | ✗ | ✔ |
|
||||
| [XML](#xml) | ✗ | ✔ |
|
||||
| [CapnProto](#capnproto) | ✔ | ✔ |
|
||||
| [LineAsString](#lineasstring) | ✔ | ✔ |
|
||||
| [Regexp](#data-format-regexp) | ✔ | ✗ |
|
||||
| [RawBLOB](#rawblob) | ✔ | ✔ |
|
||||
| [MsgPack](#msgpack) | ✔ | ✔ |
|
||||
| [MySQLDump](#mysqldump) | ✔ | ✗ |
|
||||
| [Markdown](#markdown) | ✗ | ✔ |
|
||||
|
||||
|
||||
You can control some format processing parameters with the ClickHouse settings. For more information read the [Settings](/docs/en/operations/settings/settings-formats.md) section.
|
||||
@ -195,6 +196,7 @@ SELECT * FROM nestedt FORMAT TSV
|
||||
- [input_format_tsv_skip_first_lines](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_skip_first_lines) - skip specified number of lines at the beginning of data. Default value - `0`.
|
||||
- [input_format_tsv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_detect_header) - automatically detect header with names and types in TSV format. Default value - `true`.
|
||||
- [input_format_tsv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
|
||||
- [input_format_tsv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_allow_variable_number_of_columns) - allow variable number of columns in TSV format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
||||
|
||||
## TabSeparatedRaw {#tabseparatedraw}
|
||||
|
||||
@ -472,7 +474,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe
|
||||
- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
|
||||
- [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`.
|
||||
- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`.
|
||||
- [input_format_csv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_columns) - ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values. Default value - `false`.
|
||||
- [input_format_csv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_columns) - allow variable number of columns in CSV format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
||||
- [input_format_csv_use_default_on_bad_values](/docs/en/operations/settings/settings-formats.md/#input_format_csv_use_default_on_bad_values) - Allow to set default value to column when CSV field deserialization failed on bad value. Default value - `false`.
|
||||
|
||||
## CSVWithNames {#csvwithnames}
|
||||
@ -501,9 +503,10 @@ the types from input data will be compared with the types of the corresponding c
|
||||
|
||||
Similar to [Template](#format-template), but it prints or reads all names and types of columns and uses escaping rule from [format_custom_escaping_rule](/docs/en/operations/settings/settings-formats.md/#format_custom_escaping_rule) setting and delimiters from [format_custom_field_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_field_delimiter), [format_custom_row_before_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_before_delimiter), [format_custom_row_after_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_after_delimiter), [format_custom_row_between_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_between_delimiter), [format_custom_result_before_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_result_before_delimiter) and [format_custom_result_after_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_result_after_delimiter) settings, not from format strings.
|
||||
|
||||
If setting [input_format_custom_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_custom_detect_header) is enabled, ClickHouse will automatically detect header with names and types if any.
|
||||
|
||||
If setting [input_format_tsv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_custom_detect_header) is enabled, trailing empty lines at the end of file will be skipped.
|
||||
Additional settings:
|
||||
- [input_format_custom_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_custom_detect_header) - enables automatic detection of header with names and types if any. Default value - `true`.
|
||||
- [input_format_custom_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_custom_skip_trailing_empty_lines) - skip trailing empty lines at the end of file . Default value - `false`.
|
||||
- [input_format_custom_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_custom_allow_variable_number_of_columns) - allow variable number of columns in CustomSeparated format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
||||
|
||||
There is also `CustomSeparatedIgnoreSpaces` format, which is similar to [TemplateIgnoreSpaces](#templateignorespaces).
|
||||
|
||||
@ -1261,6 +1264,7 @@ SELECT * FROM json_each_row_nested
|
||||
- [input_format_json_named_tuples_as_objects](/docs/en/operations/settings/settings-formats.md/#input_format_json_named_tuples_as_objects) - parse named tuple columns as JSON objects. Default value - `true`.
|
||||
- [input_format_json_defaults_for_missing_elements_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_defaults_for_missing_elements_in_named_tuple) - insert default values for missing elements in JSON object while parsing named tuple. Default value - `true`.
|
||||
- [input_format_json_ignore_unknown_keys_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) - Ignore unknown keys in json object for named tuples. Default value - `false`.
|
||||
- [input_format_json_compact_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_json_compact_allow_variable_number_of_columns) - allow variable number of columns in JSONCompact/JSONCompactEachRow format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
||||
- [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`.
|
||||
- [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`.
|
||||
- [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`.
|
||||
@ -2131,9 +2135,11 @@ To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/t
|
||||
|
||||
- [output_format_parquet_row_group_size](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_row_group_size) - row group size in rows while data output. Default value - `1000000`.
|
||||
- [output_format_parquet_string_as_string](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_string_as_string) - use Parquet String type instead of Binary for String columns. Default value - `false`.
|
||||
- [input_format_parquet_import_nested](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_import_nested) - allow inserting array of structs into [Nested](/docs/en/sql-reference/data-types/nested-data-structures/index.md) table in Parquet input format. Default value - `false`.
|
||||
- [input_format_parquet_case_insensitive_column_matching](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_case_insensitive_column_matching) - ignore case when matching Parquet columns with ClickHouse columns. Default value - `false`.
|
||||
- [input_format_parquet_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_allow_missing_columns) - allow missing columns while reading Parquet data. Default value - `false`.
|
||||
- [input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Parquet format. Default value - `false`.
|
||||
- [input_format_parquet_local_file_min_bytes_for_seek](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_local_file_min_bytes_for_seek) - min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format. Default value - `8192`.
|
||||
- [output_format_parquet_fixed_string_as_fixed_byte_array](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_fixed_string_as_fixed_byte_array) - use Parquet FIXED_LENGTH_BYTE_ARRAY type instead of Binary/String for FixedString columns. Default value - `true`.
|
||||
- [output_format_parquet_version](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_version) - The version of Parquet format used in output format. Default value - `2.latest`.
|
||||
- [output_format_parquet_compression_method](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_compression_method) - compression method used in output Parquet format. Default value - `snappy`.
|
||||
@ -2407,6 +2413,34 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT ORC" > {filename.
|
||||
|
||||
To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/table-engines/integrations/hdfs.md).
|
||||
|
||||
## One {#data-format-one}
|
||||
|
||||
Special input format that doesn't read any data from file and returns only one row with column of type `UInt8`, name `dummy` and value `0` (like `system.one` table).
|
||||
Can be used with virtual columns `_file/_path` to list all files without reading actual data.
|
||||
|
||||
Example:
|
||||
|
||||
Query:
|
||||
```sql
|
||||
SELECT _file FROM file('path/to/files/data*', One);
|
||||
```
|
||||
|
||||
Result:
|
||||
```text
|
||||
┌─_file────┐
|
||||
│ data.csv │
|
||||
└──────────┘
|
||||
┌─_file──────┐
|
||||
│ data.jsonl │
|
||||
└────────────┘
|
||||
┌─_file────┐
|
||||
│ data.tsv │
|
||||
└──────────┘
|
||||
┌─_file────────┐
|
||||
│ data.parquet │
|
||||
└──────────────┘
|
||||
```
|
||||
|
||||
## LineAsString {#lineasstring}
|
||||
|
||||
In this format, every line of input data is interpreted as a single string value. This format can only be parsed for table with a single field of type [String](/docs/en/sql-reference/data-types/string.md). The remaining columns must be set to [DEFAULT](/docs/en/sql-reference/statements/create/table.md/#default) or [MATERIALIZED](/docs/en/sql-reference/statements/create/table.md/#materialized), or omitted.
|
||||
|
BIN
docs/en/interfaces/images/mysql1.png
Normal file
BIN
docs/en/interfaces/images/mysql1.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 232 KiB |
BIN
docs/en/interfaces/images/mysql2.png
Normal file
BIN
docs/en/interfaces/images/mysql2.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 102 KiB |
BIN
docs/en/interfaces/images/mysql3.png
Normal file
BIN
docs/en/interfaces/images/mysql3.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 37 KiB |
BIN
docs/en/interfaces/images/mysql4.png
Normal file
BIN
docs/en/interfaces/images/mysql4.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 88 KiB |
BIN
docs/en/interfaces/images/mysql5.png
Normal file
BIN
docs/en/interfaces/images/mysql5.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 246 KiB |
@ -6,7 +6,34 @@ sidebar_label: MySQL Interface
|
||||
|
||||
# MySQL Interface
|
||||
|
||||
ClickHouse supports MySQL wire protocol. To enable the MySQL wire protocol, add the [mysql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-mysql_port) setting to your server's configuration file. For example, you could define the port in a new XML file in your `config.d` folder:
|
||||
ClickHouse supports the MySQL wire protocol. This allow tools that are MySQL-compatible to interact with ClickHouse seamlessly (e.g. [Looker Studio](../integrations/data-visualization/looker-studio-and-clickhouse.md)).
|
||||
|
||||
## Enabling the MySQL Interface On ClickHouse Cloud
|
||||
|
||||
1. After creating your ClickHouse Cloud Service, on the credentials screen, select the MySQL tab
|
||||
|
||||
![Credentials screen - Prompt](./images/mysql1.png)
|
||||
|
||||
2. Toggle the switch to enable the MySQL interface for this specific service. This will expose port `3306` for this service and prompt you with your MySQL connection screen that include your unique MySQL username. The password will be the same as the service's default user password.
|
||||
|
||||
![Credentials screen - Enabled MySQL](./images/mysql2.png)
|
||||
|
||||
Alternatively, in order to enable the MySQL interface for an existing service:
|
||||
|
||||
1. Ensure your service is in `Running` state then click on the "View connection string" button for the service you want to enable the MySQL interface for
|
||||
|
||||
![Connection screen - Prompt MySQL](./images/mysql3.png)
|
||||
|
||||
2. Toggle the switch to enable the MySQL interface for this specific service. This will prompt you to enter the default password.
|
||||
|
||||
![Connection screen - Prompt MySQL](./images/mysql4.png)
|
||||
|
||||
3. After entering the password, you will get prompted the MySQL connection string for this service
|
||||
![Connection screen - MySQL Enabled](./images/mysql5.png)
|
||||
|
||||
## Enabling the MySQL Interface On Self-managed ClickHouse
|
||||
|
||||
Add the [mysql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-mysql_port) setting to your server's configuration file. For example, you could define the port in a new XML file in your `config.d/` [folder](../operations/configuration-files):
|
||||
|
||||
``` xml
|
||||
<clickhouse>
|
||||
@ -20,7 +47,7 @@ Startup your ClickHouse server and look for a log message similar to the followi
|
||||
{} <Information> Application: Listening for MySQL compatibility protocol: 127.0.0.1:9004
|
||||
```
|
||||
|
||||
## Connect mysql to ClickHouse
|
||||
## Connect MySQL to ClickHouse
|
||||
|
||||
The following command demonstrates how to connect the MySQL client `mysql` to ClickHouse:
|
||||
|
||||
|
@ -21,6 +21,11 @@ In most cases it is recommended to use an appropriate tool or library instead of
|
||||
- [ODBC driver](../interfaces/odbc.md)
|
||||
- [C++ client library](../interfaces/cpp.md)
|
||||
|
||||
ClickHouse server provides embedded visual interfaces for power users:
|
||||
|
||||
- Play UI: open `/play` in the browser;
|
||||
- Advanced Dashboard: open `/dashboard` in the browser;
|
||||
|
||||
There are also a wide range of third-party libraries for working with ClickHouse:
|
||||
|
||||
- [Client libraries](../interfaces/third-party/client-libraries.md)
|
||||
|
@ -83,8 +83,8 @@ ClickHouse, Inc. does **not** maintain the tools and libraries listed below and
|
||||
- Python
|
||||
- [SQLAlchemy](https://www.sqlalchemy.org)
|
||||
- [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (uses [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm))
|
||||
- [pandas](https://pandas.pydata.org)
|
||||
- [pandahouse](https://github.com/kszucs/pandahouse)
|
||||
- [PyArrow/Pandas](https://pandas.pydata.org)
|
||||
- [Ibis](https://github.com/ibis-project/ibis)
|
||||
- PHP
|
||||
- [Doctrine](https://www.doctrine-project.org/)
|
||||
- [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse)
|
||||
|
@ -169,7 +169,6 @@ host = '127.0.0.1',
|
||||
port = 3306,
|
||||
database = 'test',
|
||||
connection_pool_size = 8,
|
||||
on_duplicate_clause = 1,
|
||||
replace_query = 1
|
||||
```
|
||||
|
||||
@ -185,7 +184,6 @@ replace_query = 1
|
||||
<port>3306</port>
|
||||
<database>test</database>
|
||||
<connection_pool_size>8</connection_pool_size>
|
||||
<on_duplicate_clause>1</on_duplicate_clause>
|
||||
<replace_query>1</replace_query>
|
||||
</mymysql>
|
||||
</named_collections>
|
||||
|
@ -221,6 +221,10 @@ Default: 1024
|
||||
|
||||
Size of cache for index marks. Zero means disabled.
|
||||
|
||||
:::note
|
||||
This setting can be modified at runtime and will take effect immediately.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 0
|
||||
@ -230,6 +234,10 @@ Default: 0
|
||||
|
||||
Size of cache for uncompressed blocks of MergeTree indices. Zero means disabled.
|
||||
|
||||
:::note
|
||||
This setting can be modified at runtime and will take effect immediately.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 0
|
||||
@ -255,6 +263,10 @@ Default: SLRU
|
||||
|
||||
Size of cache for marks (index of MergeTree family of tables).
|
||||
|
||||
:::note
|
||||
This setting can be modified at runtime and will take effect immediately.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 5368709120
|
||||
@ -288,7 +300,7 @@ Default: 1000
|
||||
Limit on total number of concurrently executed queries. Zero means Unlimited. Note that limits on insert and select queries, and on the maximum number of queries for users must also be considered. See also max_concurrent_insert_queries, max_concurrent_select_queries, max_concurrent_queries_for_all_users. Zero means unlimited.
|
||||
|
||||
:::note
|
||||
These settings can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
This setting can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
@ -300,7 +312,7 @@ Default: 0
|
||||
Limit on total number of concurrent insert queries. Zero means Unlimited.
|
||||
|
||||
:::note
|
||||
These settings can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
This setting can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
@ -312,7 +324,7 @@ Default: 0
|
||||
Limit on total number of concurrently select queries. Zero means Unlimited.
|
||||
|
||||
:::note
|
||||
These settings can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
This setting can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
@ -456,6 +468,10 @@ Sets the cache size (in bytes) for mapped files. This setting allows avoiding fr
|
||||
|
||||
Note that the amount of data in mapped files does not consume memory directly and is not accounted for in query or server memory usage — because this memory can be discarded similar to the OS page cache. The cache is dropped (the files are closed) automatically on the removal of old parts in tables of the MergeTree family, also it can be dropped manually by the `SYSTEM DROP MMAP CACHE` query.
|
||||
|
||||
:::note
|
||||
This setting can be modified at runtime and will take effect immediately.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 1000
|
||||
@ -605,6 +621,10 @@ There is one shared cache for the server. Memory is allocated on demand. The cac
|
||||
|
||||
The uncompressed cache is advantageous for very short queries in individual cases.
|
||||
|
||||
:::note
|
||||
This setting can be modified at runtime and will take effect immediately.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 0
|
||||
@ -1640,7 +1660,7 @@ Keys for server/client settings:
|
||||
- verificationMode (default: relaxed) – The method for checking the node’s certificates. Details are in the description of the [Context](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) class. Possible values: `none`, `relaxed`, `strict`, `once`.
|
||||
- verificationDepth (default: 9) – The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value.
|
||||
- loadDefaultCAFile (default: true) – Wether built-in CA certificates for OpenSSL will be used. ClickHouse assumes that builtin CA certificates are in the file `/etc/ssl/cert.pem` (resp. the directory `/etc/ssl/certs`) or in file (resp. directory) specified by the environment variable `SSL_CERT_FILE` (resp. `SSL_CERT_DIR`).
|
||||
- cipherList (default: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`) - Supported OpenSSL encryptions.
|
||||
- cipherList (default: `ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH`) - Supported OpenSSL encryptions.
|
||||
- cacheSessions (default: false) – Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. Acceptable values: `true`, `false`.
|
||||
- sessionIdContext (default: `${application.name}`) – A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. This parameter is always recommended since it helps avoid problems both if the server caches the session and if the client requested caching. Default value: `${application.name}`.
|
||||
- sessionCacheSize (default: [1024\*20](https://github.com/ClickHouse/boringssl/blob/master/include/openssl/ssl.h#L1978)) – The maximum number of sessions that the server caches. A value of 0 means unlimited sessions.
|
||||
|
@ -7,6 +7,10 @@ pagination_next: en/operations/settings/settings
|
||||
|
||||
# Settings Overview
|
||||
|
||||
:::note
|
||||
XML-based Settings Profiles and [configuration files](https://clickhouse.com/docs/en/operations/configuration-files) are currently not supported for ClickHouse Cloud. To specify settings for your ClickHouse Cloud service, you must use [SQL-driven Settings Profiles](https://clickhouse.com/docs/en/operations/access-rights#settings-profiles-management).
|
||||
:::
|
||||
|
||||
There are two main groups of ClickHouse settings:
|
||||
|
||||
- Global server settings
|
||||
|
@ -56,11 +56,11 @@ Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
|
||||
Default value: 300.
|
||||
Default value: 3000.
|
||||
|
||||
To achieve maximum performance of `SELECT` queries, it is necessary to minimize the number of parts processed, see [Merge Tree](../../development/architecture.md#merge-tree).
|
||||
|
||||
You can set a larger value to 600 (1200), this will reduce the probability of the `Too many parts` error, but at the same time `SELECT` performance might degrade. Also in case of a merge issue (for example, due to insufficient disk space) you will notice it later than it could be with the original 300.
|
||||
Prior to 23.6 this setting was set to 300. You can set a higher different value, it will reduce the probability of the `Too many parts` error, but at the same time `SELECT` performance might degrade. Also in case of a merge issue (for example, due to insufficient disk space) you will notice it later than it could be with the original 300.
|
||||
|
||||
|
||||
## parts_to_delay_insert {#parts-to-delay-insert}
|
||||
|
@ -627,6 +627,13 @@ Column type should be String. If value is empty, default names `row_{i}`will be
|
||||
|
||||
Default value: ''.
|
||||
|
||||
### input_format_json_compact_allow_variable_number_of_columns {#input_format_json_compact_allow_variable_number_of_columns}
|
||||
|
||||
Allow variable number of columns in rows in JSONCompact/JSONCompactEachRow input formats.
|
||||
Ignore extra columns in rows with more columns than expected and treat missing columns as default values.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
## TSV format settings {#tsv-format-settings}
|
||||
|
||||
### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default}
|
||||
@ -764,6 +771,13 @@ When enabled, trailing empty lines at the end of TSV file will be skipped.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
### input_format_tsv_allow_variable_number_of_columns {#input_format_tsv_allow_variable_number_of_columns}
|
||||
|
||||
Allow variable number of columns in rows in TSV input format.
|
||||
Ignore extra columns in rows with more columns than expected and treat missing columns as default values.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
## CSV format settings {#csv-format-settings}
|
||||
|
||||
### format_csv_delimiter {#format_csv_delimiter}
|
||||
@ -955,9 +969,11 @@ Result
|
||||
```text
|
||||
" string "
|
||||
```
|
||||
|
||||
### input_format_csv_allow_variable_number_of_columns {#input_format_csv_allow_variable_number_of_columns}
|
||||
|
||||
ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values.
|
||||
Allow variable number of columns in rows in CSV input format.
|
||||
Ignore extra columns in rows with more columns than expected and treat missing columns as default values.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
@ -1223,6 +1239,12 @@ Allow skipping columns with unsupported types while schema inference for format
|
||||
|
||||
Disabled by default.
|
||||
|
||||
### input_format_parquet_local_file_min_bytes_for_seek {#input_format_parquet_local_file_min_bytes_for_seek}
|
||||
|
||||
min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format.
|
||||
|
||||
Default value - `8192`.
|
||||
|
||||
### output_format_parquet_string_as_string {#output_format_parquet_string_as_string}
|
||||
|
||||
Use Parquet String type instead of Binary for String columns.
|
||||
@ -1565,6 +1587,13 @@ When enabled, trailing empty lines at the end of file in CustomSeparated format
|
||||
|
||||
Disabled by default.
|
||||
|
||||
### input_format_custom_allow_variable_number_of_columns {#input_format_custom_allow_variable_number_of_columns}
|
||||
|
||||
Allow variable number of columns in rows in CustomSeparated input format.
|
||||
Ignore extra columns in rows with more columns than expected and treat missing columns as default values.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
## Regexp format settings {#regexp-format-settings}
|
||||
|
||||
### format_regexp_escaping_rule {#format_regexp_escaping_rule}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user