Merge branch 'master' into fast-count-from-files

This commit is contained in:
Kruglov Pavel 2023-08-21 14:46:33 +02:00 committed by GitHub
commit 88aee95122
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1139 changed files with 16023 additions and 11467 deletions

View File

@ -3,6 +3,9 @@ name: BackportPR
env: env:
# Force the stdout and stderr streams to be unbuffered # Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1 PYTHONUNBUFFERED: 1
# Export system tables to ClickHouse Cloud
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
on: # yamllint disable-line rule:truthy on: # yamllint disable-line rule:truthy
push: push:

View File

@ -3,6 +3,9 @@ name: MasterCI
env: env:
# Force the stdout and stderr streams to be unbuffered # Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1 PYTHONUNBUFFERED: 1
# Export system tables to ClickHouse Cloud
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
on: # yamllint disable-line rule:truthy on: # yamllint disable-line rule:truthy
push: push:
@ -892,6 +895,48 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinS390X:
needs: [DockerHubPush]
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
BUILD_NAME=binary_s390x
EOF
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
fetch-depth: 0 # otherwise we will have no info about contributors
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################ ############################################################################################
##################################### Docker images ####################################### ##################################### Docker images #######################################
############################################################################################ ############################################################################################
@ -975,6 +1020,7 @@ jobs:
- BuilderBinFreeBSD - BuilderBinFreeBSD
- BuilderBinPPC64 - BuilderBinPPC64
- BuilderBinRISCV64 - BuilderBinRISCV64
- BuilderBinS390X
- BuilderBinAmd64Compat - BuilderBinAmd64Compat
- BuilderBinAarch64V80Compat - BuilderBinAarch64V80Compat
- BuilderBinClangTidy - BuilderBinClangTidy

View File

@ -3,6 +3,9 @@ name: PullRequestCI
env: env:
# Force the stdout and stderr streams to be unbuffered # Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1 PYTHONUNBUFFERED: 1
# Export system tables to ClickHouse Cloud
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
on: # yamllint disable-line rule:truthy on: # yamllint disable-line rule:truthy
pull_request: pull_request:
@ -952,6 +955,47 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinS390X:
needs: [DockerHubPush, FastTest, StyleCheck]
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
BUILD_NAME=binary_s390x
EOF
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
############################################################################################ ############################################################################################
##################################### Docker images ####################################### ##################################### Docker images #######################################
############################################################################################ ############################################################################################
@ -1034,6 +1078,7 @@ jobs:
- BuilderBinFreeBSD - BuilderBinFreeBSD
- BuilderBinPPC64 - BuilderBinPPC64
- BuilderBinRISCV64 - BuilderBinRISCV64
- BuilderBinS390X
- BuilderBinAmd64Compat - BuilderBinAmd64Compat
- BuilderBinAarch64V80Compat - BuilderBinAarch64V80Compat
- BuilderBinClangTidy - BuilderBinClangTidy
@ -5182,3 +5227,39 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" sudo rm -fr "$TEMP_PATH"
##############################################################################################
##################################### SQL TEST ###############################################
##############################################################################################
SQLTest:
needs: [BuilderDebRelease]
runs-on: [self-hosted, fuzzer-unit-tester]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/sqltest
REPORTS_PATH=${{runner.temp}}/reports_dir
CHECK_NAME=SQLTest
REPO_COPY=${{runner.temp}}/sqltest/ClickHouse
EOF
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: SQLTest
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci"
python3 sqltest.py "$CHECK_NAME"
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"

View File

@ -3,6 +3,9 @@ name: ReleaseBranchCI
env: env:
# Force the stdout and stderr streams to be unbuffered # Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1 PYTHONUNBUFFERED: 1
# Export system tables to ClickHouse Cloud
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
on: # yamllint disable-line rule:truthy on: # yamllint disable-line rule:truthy
push: push:

View File

@ -52,7 +52,6 @@
* Add new setting `disable_url_encoding` that allows to disable decoding/encoding path in uri in URL engine. [#52337](https://github.com/ClickHouse/ClickHouse/pull/52337) ([Kruglov Pavel](https://github.com/Avogar)). * Add new setting `disable_url_encoding` that allows to disable decoding/encoding path in uri in URL engine. [#52337](https://github.com/ClickHouse/ClickHouse/pull/52337) ([Kruglov Pavel](https://github.com/Avogar)).
#### Performance Improvement #### Performance Improvement
* Writing parquet files is 10x faster, it's multi-threaded now. Almost the same speed as reading. [#49367](https://github.com/ClickHouse/ClickHouse/pull/49367) ([Michael Kolupaev](https://github.com/al13n321)).
* Enable automatic selection of the sparse serialization format by default. It improves performance. The format is supported since version 22.1. After this change, downgrading to versions older than 22.1 might not be possible. You can turn off the usage of the sparse serialization format by providing the `ratio_of_defaults_for_sparse_serialization = 1` setting for your MergeTree tables. [#49631](https://github.com/ClickHouse/ClickHouse/pull/49631) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Enable automatic selection of the sparse serialization format by default. It improves performance. The format is supported since version 22.1. After this change, downgrading to versions older than 22.1 might not be possible. You can turn off the usage of the sparse serialization format by providing the `ratio_of_defaults_for_sparse_serialization = 1` setting for your MergeTree tables. [#49631](https://github.com/ClickHouse/ClickHouse/pull/49631) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Enable `move_all_conditions_to_prewhere` and `enable_multiple_prewhere_read_steps` settings by default. [#46365](https://github.com/ClickHouse/ClickHouse/pull/46365) ([Alexander Gololobov](https://github.com/davenger)). * Enable `move_all_conditions_to_prewhere` and `enable_multiple_prewhere_read_steps` settings by default. [#46365](https://github.com/ClickHouse/ClickHouse/pull/46365) ([Alexander Gololobov](https://github.com/davenger)).
* Improves performance of some queries by tuning allocator. [#46416](https://github.com/ClickHouse/ClickHouse/pull/46416) ([Azat Khuzhin](https://github.com/azat)). * Improves performance of some queries by tuning allocator. [#46416](https://github.com/ClickHouse/ClickHouse/pull/46416) ([Azat Khuzhin](https://github.com/azat)).
@ -114,6 +113,7 @@
* Now interserver port will be closed only after tables are shut down. [#52498](https://github.com/ClickHouse/ClickHouse/pull/52498) ([alesapin](https://github.com/alesapin)). * Now interserver port will be closed only after tables are shut down. [#52498](https://github.com/ClickHouse/ClickHouse/pull/52498) ([alesapin](https://github.com/alesapin)).
#### Experimental Feature #### Experimental Feature
* Writing parquet files is 10x faster, it's multi-threaded now. Almost the same speed as reading. [#49367](https://github.com/ClickHouse/ClickHouse/pull/49367) ([Michael Kolupaev](https://github.com/al13n321)). This is controlled by the setting `output_format_parquet_use_custom_encoder` which is disabled by default, because the feature is non-ideal.
* Added support for [PRQL](https://prql-lang.org/) as a query language. [#50686](https://github.com/ClickHouse/ClickHouse/pull/50686) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). * Added support for [PRQL](https://prql-lang.org/) as a query language. [#50686](https://github.com/ClickHouse/ClickHouse/pull/50686) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Allow to add disk name for custom disks. Previously custom disks would use an internal generated disk name. Now it will be possible with `disk = disk_<name>(...)` (e.g. disk will have name `name`) . [#51552](https://github.com/ClickHouse/ClickHouse/pull/51552) ([Kseniia Sumarokova](https://github.com/kssenii)). This syntax can be changed in this release. * Allow to add disk name for custom disks. Previously custom disks would use an internal generated disk name. Now it will be possible with `disk = disk_<name>(...)` (e.g. disk will have name `name`) . [#51552](https://github.com/ClickHouse/ClickHouse/pull/51552) ([Kseniia Sumarokova](https://github.com/kssenii)). This syntax can be changed in this release.
* (experimental MaterializedMySQL) Fixed crash when `mysqlxx::Pool::Entry` is used after it was disconnected. [#52063](https://github.com/ClickHouse/ClickHouse/pull/52063) ([Val Doroshchuk](https://github.com/valbok)). * (experimental MaterializedMySQL) Fixed crash when `mysqlxx::Pool::Entry` is used after it was disconnected. [#52063](https://github.com/ClickHouse/ClickHouse/pull/52063) ([Val Doroshchuk](https://github.com/valbok)).

View File

@ -208,9 +208,6 @@ option(OMIT_HEAVY_DEBUG_SYMBOLS
"Do not generate debugger info for heavy modules (ClickHouse functions and dictionaries, some contrib)" "Do not generate debugger info for heavy modules (ClickHouse functions and dictionaries, some contrib)"
${OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT}) ${OMIT_HEAVY_DEBUG_SYMBOLS_DEFAULT})
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
set(USE_DEBUG_HELPERS ON)
endif()
option(USE_DEBUG_HELPERS "Enable debug helpers" ${USE_DEBUG_HELPERS}) option(USE_DEBUG_HELPERS "Enable debug helpers" ${USE_DEBUG_HELPERS})
option(BUILD_STANDALONE_KEEPER "Build keeper as small standalone binary" OFF) option(BUILD_STANDALONE_KEEPER "Build keeper as small standalone binary" OFF)

View File

@ -3,6 +3,7 @@
#include <magic_enum.hpp> #include <magic_enum.hpp>
#include <fmt/format.h> #include <fmt/format.h>
template <class T> concept is_enum = std::is_enum_v<T>; template <class T> concept is_enum = std::is_enum_v<T>;
namespace detail namespace detail

View File

@ -7,8 +7,6 @@
#include <base/find_symbols.h> #include <base/find_symbols.h>
#include <base/preciseExp10.h> #include <base/preciseExp10.h>
#include <iostream>
#define JSON_MAX_DEPTH 100 #define JSON_MAX_DEPTH 100

View File

@ -12,7 +12,6 @@
#include <tuple> #include <tuple>
#include <limits> #include <limits>
#include <boost/multiprecision/cpp_bin_float.hpp>
#include <boost/math/special_functions/fpclassify.hpp> #include <boost/math/special_functions/fpclassify.hpp>
// NOLINTBEGIN(*) // NOLINTBEGIN(*)
@ -22,6 +21,7 @@
#define CONSTEXPR_FROM_DOUBLE constexpr #define CONSTEXPR_FROM_DOUBLE constexpr
using FromDoubleIntermediateType = long double; using FromDoubleIntermediateType = long double;
#else #else
#include <boost/multiprecision/cpp_bin_float.hpp>
/// `wide_integer_from_builtin` can't be constexpr with non-literal `cpp_bin_float_double_extended` /// `wide_integer_from_builtin` can't be constexpr with non-literal `cpp_bin_float_double_extended`
#define CONSTEXPR_FROM_DOUBLE #define CONSTEXPR_FROM_DOUBLE
using FromDoubleIntermediateType = boost::multiprecision::cpp_bin_float_double_extended; using FromDoubleIntermediateType = boost::multiprecision::cpp_bin_float_double_extended;

View File

@ -19,7 +19,6 @@
#include "Poco/UTF16Encoding.h" #include "Poco/UTF16Encoding.h"
#include "Poco/Buffer.h" #include "Poco/Buffer.h"
#include "Poco/Exception.h" #include "Poco/Exception.h"
#include <iostream>
using Poco::Buffer; using Poco::Buffer;

View File

@ -97,7 +97,7 @@ namespace Data
/// ///
/// static void extract(std::size_t pos, Person& obj, const Person& defVal, AbstractExtractor::Ptr pExt) /// static void extract(std::size_t pos, Person& obj, const Person& defVal, AbstractExtractor::Ptr pExt)
/// { /// {
/// // defVal is the default person we should use if we encunter NULL entries, so we take the individual fields /// // defVal is the default person we should use if we encounter NULL entries, so we take the individual fields
/// // as defaults. You can do more complex checking, ie return defVal if only one single entry of the fields is null etc... /// // as defaults. You can do more complex checking, ie return defVal if only one single entry of the fields is null etc...
/// poco_assert_dbg (!pExt.isNull()); /// poco_assert_dbg (!pExt.isNull());
/// std::string lastName; /// std::string lastName;

View File

@ -16,7 +16,6 @@
#include "Poco/TaskManager.h" #include "Poco/TaskManager.h"
#include "Poco/Exception.h" #include "Poco/Exception.h"
#include <iostream>
#include <array> #include <array>

View File

@ -14,7 +14,6 @@
#include "Poco/JSON/Object.h" #include "Poco/JSON/Object.h"
#include <iostream> #include <iostream>
#include <sstream>
using Poco::Dynamic::Var; using Poco::Dynamic::Var;

View File

@ -26,7 +26,6 @@
#include "Poco/CountingStream.h" #include "Poco/CountingStream.h"
#include "Poco/RegularExpression.h" #include "Poco/RegularExpression.h"
#include <sstream> #include <sstream>
#include <iostream>
using Poco::NumberFormatter; using Poco::NumberFormatter;

View File

@ -146,7 +146,7 @@ namespace Net
std::string cipherList; std::string cipherList;
/// Specifies the supported ciphers in OpenSSL notation. /// Specifies the supported ciphers in OpenSSL notation.
/// Defaults to "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH". /// Defaults to "ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH".
std::string dhParamsFile; std::string dhParamsFile;
/// Specifies a file containing Diffie-Hellman parameters. /// Specifies a file containing Diffie-Hellman parameters.
@ -172,7 +172,7 @@ namespace Net
VerificationMode verificationMode = VERIFY_RELAXED, VerificationMode verificationMode = VERIFY_RELAXED,
int verificationDepth = 9, int verificationDepth = 9,
bool loadDefaultCAs = false, bool loadDefaultCAs = false,
const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH"); const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH");
/// Creates a Context. /// Creates a Context.
/// ///
/// * usage specifies whether the context is used by a client or server. /// * usage specifies whether the context is used by a client or server.
@ -200,7 +200,7 @@ namespace Net
VerificationMode verificationMode = VERIFY_RELAXED, VerificationMode verificationMode = VERIFY_RELAXED,
int verificationDepth = 9, int verificationDepth = 9,
bool loadDefaultCAs = false, bool loadDefaultCAs = false,
const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH"); const std::string & cipherList = "ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH");
/// Creates a Context. /// Creates a Context.
/// ///
/// * usage specifies whether the context is used by a client or server. /// * usage specifies whether the context is used by a client or server.

View File

@ -76,7 +76,7 @@ namespace Net
/// <verificationMode>none|relaxed|strict|once</verificationMode> /// <verificationMode>none|relaxed|strict|once</verificationMode>
/// <verificationDepth>1..9</verificationDepth> /// <verificationDepth>1..9</verificationDepth>
/// <loadDefaultCAFile>true|false</loadDefaultCAFile> /// <loadDefaultCAFile>true|false</loadDefaultCAFile>
/// <cipherList>ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH</cipherList> /// <cipherList>ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH</cipherList>
/// <preferServerCiphers>true|false</preferServerCiphers> /// <preferServerCiphers>true|false</preferServerCiphers>
/// <privateKeyPassphraseHandler> /// <privateKeyPassphraseHandler>
/// <name>KeyFileHandler</name> /// <name>KeyFileHandler</name>

View File

@ -41,7 +41,7 @@ Context::Params::Params():
verificationMode(VERIFY_RELAXED), verificationMode(VERIFY_RELAXED),
verificationDepth(9), verificationDepth(9),
loadDefaultCAs(false), loadDefaultCAs(false),
cipherList("ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH") cipherList("ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH")
{ {
} }

View File

@ -20,6 +20,9 @@ set (CMAKE_SYSROOT "${TOOLCHAIN_PATH}/s390x-linux-gnu/libc")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=mold -Wl,-L${CMAKE_SYSROOT}/usr/lib64")
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE) set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE) set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -47,7 +47,7 @@ if (CMAKE_CROSSCOMPILING)
set (ENABLE_RUST OFF CACHE INTERNAL "") set (ENABLE_RUST OFF CACHE INTERNAL "")
elseif (ARCH_S390X) elseif (ARCH_S390X)
set (ENABLE_GRPC OFF CACHE INTERNAL "") set (ENABLE_GRPC OFF CACHE INTERNAL "")
set (ENABLE_SENTRY OFF CACHE INTERNAL "") set (ENABLE_RUST OFF CACHE INTERNAL "")
endif () endif ()
elseif (OS_FREEBSD) elseif (OS_FREEBSD)
# FIXME: broken dependencies # FIXME: broken dependencies

2
contrib/base64 vendored

@ -1 +1 @@
Subproject commit 9499e0c4945589973b9ea1bc927377cfbc84aa46 Subproject commit 8628e258090f9eb76d90ac3c91e1ab4690e9aa11

2
contrib/boost vendored

@ -1 +1 @@
Subproject commit aec12eea7fc762721ae16943d1361340c66c9c17 Subproject commit bb179652862b528d94a9032a784796c4db846c3f

View File

@ -172,9 +172,9 @@ endif()
# coroutine # coroutine
set (SRCS_COROUTINE set (SRCS_COROUTINE
"${LIBRARY_DIR}/libs/coroutine/detail/coroutine_context.cpp" "${LIBRARY_DIR}/libs/coroutine/src/detail/coroutine_context.cpp"
"${LIBRARY_DIR}/libs/coroutine/exceptions.cpp" "${LIBRARY_DIR}/libs/coroutine/src/exceptions.cpp"
"${LIBRARY_DIR}/libs/coroutine/posix/stack_traits.cpp" "${LIBRARY_DIR}/libs/coroutine/src/posix/stack_traits.cpp"
) )
add_library (_boost_coroutine ${SRCS_COROUTINE}) add_library (_boost_coroutine ${SRCS_COROUTINE})
add_library (boost::coroutine ALIAS _boost_coroutine) add_library (boost::coroutine ALIAS _boost_coroutine)

View File

@ -73,8 +73,8 @@ struct uint128
uint128() = default; uint128() = default;
uint128(uint64 low64_, uint64 high64_) : low64(low64_), high64(high64_) {} uint128(uint64 low64_, uint64 high64_) : low64(low64_), high64(high64_) {}
friend bool operator ==(const uint128 & x, const uint128 & y) { return (x.low64 == y.low64) && (x.high64 == y.high64); }
friend bool operator !=(const uint128 & x, const uint128 & y) { return !(x == y); } friend auto operator<=>(const uint128 &, const uint128 &) = default;
}; };
inline uint64 Uint128Low64(const uint128 & x) { return x.low64; } inline uint64 Uint128Low64(const uint128 & x) { return x.low64; }

2
contrib/curl vendored

@ -1 +1 @@
Subproject commit b0edf0b7dae44d9e66f270a257cf654b35d5263d Subproject commit eb3b049df526bf125eda23218e680ce7fa9ec46c

View File

@ -8,125 +8,122 @@ endif()
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/curl") set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/curl")
set (SRCS set (SRCS
"${LIBRARY_DIR}/lib/fopen.c" "${LIBRARY_DIR}/lib/altsvc.c"
"${LIBRARY_DIR}/lib/noproxy.c" "${LIBRARY_DIR}/lib/amigaos.c"
"${LIBRARY_DIR}/lib/idn.c" "${LIBRARY_DIR}/lib/asyn-thread.c"
"${LIBRARY_DIR}/lib/cfilters.c" "${LIBRARY_DIR}/lib/base64.c"
"${LIBRARY_DIR}/lib/cf-socket.c" "${LIBRARY_DIR}/lib/bufq.c"
"${LIBRARY_DIR}/lib/bufref.c"
"${LIBRARY_DIR}/lib/cf-h1-proxy.c"
"${LIBRARY_DIR}/lib/cf-haproxy.c" "${LIBRARY_DIR}/lib/cf-haproxy.c"
"${LIBRARY_DIR}/lib/cf-https-connect.c" "${LIBRARY_DIR}/lib/cf-https-connect.c"
"${LIBRARY_DIR}/lib/file.c" "${LIBRARY_DIR}/lib/cf-socket.c"
"${LIBRARY_DIR}/lib/timeval.c" "${LIBRARY_DIR}/lib/cfilters.c"
"${LIBRARY_DIR}/lib/base64.c" "${LIBRARY_DIR}/lib/conncache.c"
"${LIBRARY_DIR}/lib/hostip.c"
"${LIBRARY_DIR}/lib/progress.c"
"${LIBRARY_DIR}/lib/formdata.c"
"${LIBRARY_DIR}/lib/cookie.c"
"${LIBRARY_DIR}/lib/http.c"
"${LIBRARY_DIR}/lib/sendf.c"
"${LIBRARY_DIR}/lib/url.c"
"${LIBRARY_DIR}/lib/dict.c"
"${LIBRARY_DIR}/lib/if2ip.c"
"${LIBRARY_DIR}/lib/speedcheck.c"
"${LIBRARY_DIR}/lib/ldap.c"
"${LIBRARY_DIR}/lib/version.c"
"${LIBRARY_DIR}/lib/getenv.c"
"${LIBRARY_DIR}/lib/escape.c"
"${LIBRARY_DIR}/lib/mprintf.c"
"${LIBRARY_DIR}/lib/telnet.c"
"${LIBRARY_DIR}/lib/netrc.c"
"${LIBRARY_DIR}/lib/getinfo.c"
"${LIBRARY_DIR}/lib/transfer.c"
"${LIBRARY_DIR}/lib/strcase.c"
"${LIBRARY_DIR}/lib/easy.c"
"${LIBRARY_DIR}/lib/curl_fnmatch.c"
"${LIBRARY_DIR}/lib/curl_log.c"
"${LIBRARY_DIR}/lib/fileinfo.c"
"${LIBRARY_DIR}/lib/krb5.c"
"${LIBRARY_DIR}/lib/memdebug.c"
"${LIBRARY_DIR}/lib/http_chunks.c"
"${LIBRARY_DIR}/lib/strtok.c"
"${LIBRARY_DIR}/lib/connect.c" "${LIBRARY_DIR}/lib/connect.c"
"${LIBRARY_DIR}/lib/llist.c"
"${LIBRARY_DIR}/lib/hash.c"
"${LIBRARY_DIR}/lib/multi.c"
"${LIBRARY_DIR}/lib/content_encoding.c" "${LIBRARY_DIR}/lib/content_encoding.c"
"${LIBRARY_DIR}/lib/share.c" "${LIBRARY_DIR}/lib/cookie.c"
"${LIBRARY_DIR}/lib/http_digest.c" "${LIBRARY_DIR}/lib/curl_addrinfo.c"
"${LIBRARY_DIR}/lib/md4.c" "${LIBRARY_DIR}/lib/curl_des.c"
"${LIBRARY_DIR}/lib/md5.c" "${LIBRARY_DIR}/lib/curl_endian.c"
"${LIBRARY_DIR}/lib/http_negotiate.c" "${LIBRARY_DIR}/lib/curl_fnmatch.c"
"${LIBRARY_DIR}/lib/inet_pton.c" "${LIBRARY_DIR}/lib/curl_get_line.c"
"${LIBRARY_DIR}/lib/strtoofft.c" "${LIBRARY_DIR}/lib/curl_gethostname.c"
"${LIBRARY_DIR}/lib/strerror.c" "${LIBRARY_DIR}/lib/curl_gssapi.c"
"${LIBRARY_DIR}/lib/amigaos.c" "${LIBRARY_DIR}/lib/curl_memrchr.c"
"${LIBRARY_DIR}/lib/curl_multibyte.c"
"${LIBRARY_DIR}/lib/curl_ntlm_core.c"
"${LIBRARY_DIR}/lib/curl_ntlm_wb.c"
"${LIBRARY_DIR}/lib/curl_path.c"
"${LIBRARY_DIR}/lib/curl_range.c"
"${LIBRARY_DIR}/lib/curl_rtmp.c"
"${LIBRARY_DIR}/lib/curl_sasl.c"
"${LIBRARY_DIR}/lib/curl_sspi.c"
"${LIBRARY_DIR}/lib/curl_threads.c"
"${LIBRARY_DIR}/lib/curl_trc.c"
"${LIBRARY_DIR}/lib/dict.c"
"${LIBRARY_DIR}/lib/doh.c"
"${LIBRARY_DIR}/lib/dynbuf.c"
"${LIBRARY_DIR}/lib/dynhds.c"
"${LIBRARY_DIR}/lib/easy.c"
"${LIBRARY_DIR}/lib/escape.c"
"${LIBRARY_DIR}/lib/file.c"
"${LIBRARY_DIR}/lib/fileinfo.c"
"${LIBRARY_DIR}/lib/fopen.c"
"${LIBRARY_DIR}/lib/formdata.c"
"${LIBRARY_DIR}/lib/getenv.c"
"${LIBRARY_DIR}/lib/getinfo.c"
"${LIBRARY_DIR}/lib/gopher.c"
"${LIBRARY_DIR}/lib/hash.c"
"${LIBRARY_DIR}/lib/headers.c"
"${LIBRARY_DIR}/lib/hmac.c"
"${LIBRARY_DIR}/lib/hostasyn.c" "${LIBRARY_DIR}/lib/hostasyn.c"
"${LIBRARY_DIR}/lib/hostip.c"
"${LIBRARY_DIR}/lib/hostip4.c" "${LIBRARY_DIR}/lib/hostip4.c"
"${LIBRARY_DIR}/lib/hostip6.c" "${LIBRARY_DIR}/lib/hostip6.c"
"${LIBRARY_DIR}/lib/hostsyn.c" "${LIBRARY_DIR}/lib/hostsyn.c"
"${LIBRARY_DIR}/lib/hsts.c"
"${LIBRARY_DIR}/lib/http.c"
"${LIBRARY_DIR}/lib/http2.c"
"${LIBRARY_DIR}/lib/http_aws_sigv4.c"
"${LIBRARY_DIR}/lib/http_chunks.c"
"${LIBRARY_DIR}/lib/http_digest.c"
"${LIBRARY_DIR}/lib/http_negotiate.c"
"${LIBRARY_DIR}/lib/http_ntlm.c"
"${LIBRARY_DIR}/lib/http_proxy.c"
"${LIBRARY_DIR}/lib/idn.c"
"${LIBRARY_DIR}/lib/if2ip.c"
"${LIBRARY_DIR}/lib/imap.c"
"${LIBRARY_DIR}/lib/inet_ntop.c" "${LIBRARY_DIR}/lib/inet_ntop.c"
"${LIBRARY_DIR}/lib/inet_pton.c"
"${LIBRARY_DIR}/lib/krb5.c"
"${LIBRARY_DIR}/lib/ldap.c"
"${LIBRARY_DIR}/lib/llist.c"
"${LIBRARY_DIR}/lib/md4.c"
"${LIBRARY_DIR}/lib/md5.c"
"${LIBRARY_DIR}/lib/memdebug.c"
"${LIBRARY_DIR}/lib/mime.c"
"${LIBRARY_DIR}/lib/mprintf.c"
"${LIBRARY_DIR}/lib/mqtt.c"
"${LIBRARY_DIR}/lib/multi.c"
"${LIBRARY_DIR}/lib/netrc.c"
"${LIBRARY_DIR}/lib/nonblock.c"
"${LIBRARY_DIR}/lib/noproxy.c"
"${LIBRARY_DIR}/lib/openldap.c"
"${LIBRARY_DIR}/lib/parsedate.c" "${LIBRARY_DIR}/lib/parsedate.c"
"${LIBRARY_DIR}/lib/pingpong.c"
"${LIBRARY_DIR}/lib/pop3.c"
"${LIBRARY_DIR}/lib/progress.c"
"${LIBRARY_DIR}/lib/psl.c"
"${LIBRARY_DIR}/lib/rand.c"
"${LIBRARY_DIR}/lib/rename.c"
"${LIBRARY_DIR}/lib/rtsp.c"
"${LIBRARY_DIR}/lib/select.c" "${LIBRARY_DIR}/lib/select.c"
"${LIBRARY_DIR}/lib/splay.c" "${LIBRARY_DIR}/lib/sendf.c"
"${LIBRARY_DIR}/lib/strdup.c" "${LIBRARY_DIR}/lib/setopt.c"
"${LIBRARY_DIR}/lib/sha256.c"
"${LIBRARY_DIR}/lib/share.c"
"${LIBRARY_DIR}/lib/slist.c"
"${LIBRARY_DIR}/lib/smb.c"
"${LIBRARY_DIR}/lib/smtp.c"
"${LIBRARY_DIR}/lib/socketpair.c"
"${LIBRARY_DIR}/lib/socks.c" "${LIBRARY_DIR}/lib/socks.c"
"${LIBRARY_DIR}/lib/curl_addrinfo.c"
"${LIBRARY_DIR}/lib/socks_gssapi.c" "${LIBRARY_DIR}/lib/socks_gssapi.c"
"${LIBRARY_DIR}/lib/socks_sspi.c" "${LIBRARY_DIR}/lib/socks_sspi.c"
"${LIBRARY_DIR}/lib/curl_sspi.c" "${LIBRARY_DIR}/lib/speedcheck.c"
"${LIBRARY_DIR}/lib/slist.c" "${LIBRARY_DIR}/lib/splay.c"
"${LIBRARY_DIR}/lib/nonblock.c" "${LIBRARY_DIR}/lib/strcase.c"
"${LIBRARY_DIR}/lib/curl_memrchr.c" "${LIBRARY_DIR}/lib/strdup.c"
"${LIBRARY_DIR}/lib/imap.c" "${LIBRARY_DIR}/lib/strerror.c"
"${LIBRARY_DIR}/lib/pop3.c" "${LIBRARY_DIR}/lib/strtok.c"
"${LIBRARY_DIR}/lib/smtp.c" "${LIBRARY_DIR}/lib/strtoofft.c"
"${LIBRARY_DIR}/lib/pingpong.c"
"${LIBRARY_DIR}/lib/rtsp.c"
"${LIBRARY_DIR}/lib/curl_threads.c"
"${LIBRARY_DIR}/lib/warnless.c"
"${LIBRARY_DIR}/lib/hmac.c"
"${LIBRARY_DIR}/lib/curl_rtmp.c"
"${LIBRARY_DIR}/lib/openldap.c"
"${LIBRARY_DIR}/lib/curl_gethostname.c"
"${LIBRARY_DIR}/lib/gopher.c"
"${LIBRARY_DIR}/lib/http_proxy.c"
"${LIBRARY_DIR}/lib/asyn-thread.c"
"${LIBRARY_DIR}/lib/curl_gssapi.c"
"${LIBRARY_DIR}/lib/http_ntlm.c"
"${LIBRARY_DIR}/lib/curl_ntlm_wb.c"
"${LIBRARY_DIR}/lib/curl_ntlm_core.c"
"${LIBRARY_DIR}/lib/curl_sasl.c"
"${LIBRARY_DIR}/lib/rand.c"
"${LIBRARY_DIR}/lib/curl_multibyte.c"
"${LIBRARY_DIR}/lib/conncache.c"
"${LIBRARY_DIR}/lib/cf-h1-proxy.c"
"${LIBRARY_DIR}/lib/http2.c"
"${LIBRARY_DIR}/lib/smb.c"
"${LIBRARY_DIR}/lib/curl_endian.c"
"${LIBRARY_DIR}/lib/curl_des.c"
"${LIBRARY_DIR}/lib/system_win32.c" "${LIBRARY_DIR}/lib/system_win32.c"
"${LIBRARY_DIR}/lib/mime.c" "${LIBRARY_DIR}/lib/telnet.c"
"${LIBRARY_DIR}/lib/sha256.c"
"${LIBRARY_DIR}/lib/setopt.c"
"${LIBRARY_DIR}/lib/curl_path.c"
"${LIBRARY_DIR}/lib/curl_range.c"
"${LIBRARY_DIR}/lib/psl.c"
"${LIBRARY_DIR}/lib/doh.c"
"${LIBRARY_DIR}/lib/urlapi.c"
"${LIBRARY_DIR}/lib/curl_get_line.c"
"${LIBRARY_DIR}/lib/altsvc.c"
"${LIBRARY_DIR}/lib/socketpair.c"
"${LIBRARY_DIR}/lib/bufref.c"
"${LIBRARY_DIR}/lib/bufq.c"
"${LIBRARY_DIR}/lib/dynbuf.c"
"${LIBRARY_DIR}/lib/dynhds.c"
"${LIBRARY_DIR}/lib/hsts.c"
"${LIBRARY_DIR}/lib/http_aws_sigv4.c"
"${LIBRARY_DIR}/lib/mqtt.c"
"${LIBRARY_DIR}/lib/rename.c"
"${LIBRARY_DIR}/lib/headers.c"
"${LIBRARY_DIR}/lib/timediff.c" "${LIBRARY_DIR}/lib/timediff.c"
"${LIBRARY_DIR}/lib/vauth/vauth.c" "${LIBRARY_DIR}/lib/timeval.c"
"${LIBRARY_DIR}/lib/transfer.c"
"${LIBRARY_DIR}/lib/url.c"
"${LIBRARY_DIR}/lib/urlapi.c"
"${LIBRARY_DIR}/lib/vauth/cleartext.c" "${LIBRARY_DIR}/lib/vauth/cleartext.c"
"${LIBRARY_DIR}/lib/vauth/cram.c" "${LIBRARY_DIR}/lib/vauth/cram.c"
"${LIBRARY_DIR}/lib/vauth/digest.c" "${LIBRARY_DIR}/lib/vauth/digest.c"
@ -138,23 +135,24 @@ set (SRCS
"${LIBRARY_DIR}/lib/vauth/oauth2.c" "${LIBRARY_DIR}/lib/vauth/oauth2.c"
"${LIBRARY_DIR}/lib/vauth/spnego_gssapi.c" "${LIBRARY_DIR}/lib/vauth/spnego_gssapi.c"
"${LIBRARY_DIR}/lib/vauth/spnego_sspi.c" "${LIBRARY_DIR}/lib/vauth/spnego_sspi.c"
"${LIBRARY_DIR}/lib/vauth/vauth.c"
"${LIBRARY_DIR}/lib/version.c"
"${LIBRARY_DIR}/lib/vquic/vquic.c" "${LIBRARY_DIR}/lib/vquic/vquic.c"
"${LIBRARY_DIR}/lib/vtls/openssl.c" "${LIBRARY_DIR}/lib/vssh/libssh.c"
"${LIBRARY_DIR}/lib/vssh/libssh2.c"
"${LIBRARY_DIR}/lib/vtls/bearssl.c"
"${LIBRARY_DIR}/lib/vtls/gtls.c" "${LIBRARY_DIR}/lib/vtls/gtls.c"
"${LIBRARY_DIR}/lib/vtls/vtls.c" "${LIBRARY_DIR}/lib/vtls/hostcheck.c"
"${LIBRARY_DIR}/lib/vtls/nss.c" "${LIBRARY_DIR}/lib/vtls/keylog.c"
"${LIBRARY_DIR}/lib/vtls/wolfssl.c" "${LIBRARY_DIR}/lib/vtls/mbedtls.c"
"${LIBRARY_DIR}/lib/vtls/openssl.c"
"${LIBRARY_DIR}/lib/vtls/schannel.c" "${LIBRARY_DIR}/lib/vtls/schannel.c"
"${LIBRARY_DIR}/lib/vtls/schannel_verify.c" "${LIBRARY_DIR}/lib/vtls/schannel_verify.c"
"${LIBRARY_DIR}/lib/vtls/sectransp.c" "${LIBRARY_DIR}/lib/vtls/sectransp.c"
"${LIBRARY_DIR}/lib/vtls/gskit.c" "${LIBRARY_DIR}/lib/vtls/vtls.c"
"${LIBRARY_DIR}/lib/vtls/mbedtls.c" "${LIBRARY_DIR}/lib/vtls/wolfssl.c"
"${LIBRARY_DIR}/lib/vtls/bearssl.c"
"${LIBRARY_DIR}/lib/vtls/keylog.c"
"${LIBRARY_DIR}/lib/vtls/x509asn1.c" "${LIBRARY_DIR}/lib/vtls/x509asn1.c"
"${LIBRARY_DIR}/lib/vtls/hostcheck.c" "${LIBRARY_DIR}/lib/warnless.c"
"${LIBRARY_DIR}/lib/vssh/libssh2.c"
"${LIBRARY_DIR}/lib/vssh/libssh.c"
) )
add_library (_curl ${SRCS}) add_library (_curl ${SRCS})

2
contrib/krb5 vendored

@ -1 +1 @@
Subproject commit b56ce6ba690e1f320df1a64afa34980c3e462617 Subproject commit 1d5c970e9369f444caf81d1d06a231a6bad8581f

View File

@ -147,7 +147,7 @@ target_compile_definitions(_libarchive PUBLIC
target_compile_options(_libarchive PRIVATE "-Wno-reserved-macro-identifier") target_compile_options(_libarchive PRIVATE "-Wno-reserved-macro-identifier")
if (TARGET ch_contrib::xz) if (TARGET ch_contrib::xz)
target_compile_definitions(_libarchive PUBLIC HAVE_LZMA_H=1) target_compile_definitions(_libarchive PUBLIC HAVE_LZMA_H=1 HAVE_LIBLZMA=1)
target_link_libraries(_libarchive PRIVATE ch_contrib::xz) target_link_libraries(_libarchive PRIVATE ch_contrib::xz)
endif() endif()
@ -156,6 +156,16 @@ if (TARGET ch_contrib::zlib)
target_link_libraries(_libarchive PRIVATE ch_contrib::zlib) target_link_libraries(_libarchive PRIVATE ch_contrib::zlib)
endif() endif()
if (TARGET ch_contrib::zstd)
target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1)
target_link_libraries(_libarchive PRIVATE ch_contrib::zstd)
endif()
if (TARGET ch_contrib::bzip2)
target_compile_definitions(_libarchive PUBLIC HAVE_BZLIB_H=1)
target_link_libraries(_libarchive PRIVATE ch_contrib::bzip2)
endif()
if (OS_LINUX) if (OS_LINUX)
target_compile_definitions( target_compile_definitions(
_libarchive PUBLIC _libarchive PUBLIC

@ -1 +1 @@
Subproject commit d857c707fccd50423bea1c4710dc469cf89607a9 Subproject commit 4ef26de16c229429141e424375142c9b03234b66

View File

@ -1,18 +1,16 @@
if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined") if (APPLE OR SANITIZE STREQUAL "undefined")
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF) set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
else() else()
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON) set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
endif() endif()
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT}) option (ENABLE_EMBEDDED_COMPILER "Enable support for JIT compilation during query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
if (NOT ENABLE_EMBEDDED_COMPILER) if (NOT ENABLE_EMBEDDED_COMPILER)
message(STATUS "Not using LLVM") message(STATUS "Not using LLVM")
return() return()
endif() endif()
# TODO: Enable compilation on AArch64
set (LLVM_VERSION "15.0.0bundled") set (LLVM_VERSION "15.0.0bundled")
set (LLVM_INCLUDE_DIRS set (LLVM_INCLUDE_DIRS
"${ClickHouse_SOURCE_DIR}/contrib/llvm-project/llvm/include" "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/llvm/include"
@ -58,18 +56,30 @@ set (REQUIRED_LLVM_LIBRARIES
LLVMDemangle LLVMDemangle
) )
# if (ARCH_AMD64) if (ARCH_AMD64)
set (LLVM_TARGETS_TO_BUILD "X86" CACHE INTERNAL "")
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen) list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen)
# elseif (ARCH_AARCH64) elseif (ARCH_AARCH64)
# list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen) set (LLVM_TARGETS_TO_BUILD "AArch64" CACHE INTERNAL "")
# endif () list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen)
elseif (ARCH_PPC64LE)
set (LLVM_TARGETS_TO_BUILD "PowerPC" CACHE INTERNAL "")
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMPowerPCInfo LLVMPowerPCDesc LLVMPowerPCCodeGen)
elseif (ARCH_S390X)
set (LLVM_TARGETS_TO_BUILD "SystemZ" CACHE INTERNAL "")
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMSystemZInfo LLVMSystemZDesc LLVMSystemZCodeGen)
elseif (ARCH_RISCV64)
set (LLVM_TARGETS_TO_BUILD "RISCV" CACHE INTERNAL "")
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMRISCVInfo LLVMRISCVDesc LLVMRISCVCodeGen)
endif ()
message (STATUS "LLVM TARGETS TO BUILD ${LLVM_TARGETS_TO_BUILD}")
set (CMAKE_INSTALL_RPATH "ON") # Do not adjust RPATH in llvm, since then it will not be able to find libcxx/libcxxabi/libunwind set (CMAKE_INSTALL_RPATH "ON") # Do not adjust RPATH in llvm, since then it will not be able to find libcxx/libcxxabi/libunwind
set (LLVM_COMPILER_CHECKED 1 CACHE INTERNAL "") # Skip internal compiler selection set (LLVM_COMPILER_CHECKED 1 CACHE INTERNAL "") # Skip internal compiler selection
set (LLVM_ENABLE_EH 1 CACHE INTERNAL "") # With exception handling set (LLVM_ENABLE_EH 1 CACHE INTERNAL "") # With exception handling
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "") set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "") set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
set (LLVM_TARGETS_TO_BUILD "X86" CACHE STRING "") # for x86 + ARM: "X86;AArch64"
# Omit unnecessary stuff (just the options which are ON by default) # Omit unnecessary stuff (just the options which are ON by default)
set(LLVM_ENABLE_BACKTRACES 0 CACHE INTERNAL "") set(LLVM_ENABLE_BACKTRACES 0 CACHE INTERNAL "")
@ -99,15 +109,12 @@ set(LLVM_ENABLE_BINDINGS 0 CACHE INTERNAL "")
set (LLVM_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/llvm") set (LLVM_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/llvm")
set (LLVM_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm") set (LLVM_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm")
# Since we always use toolchain files to generate hermatic builds, cmake will message (STATUS "LLVM CMAKE CROSS COMPILING ${CMAKE_CROSSCOMPILING}")
# think it's a cross compilation, and LLVM will try to configure NATIVE LLVM if (CMAKE_CROSSCOMPILING)
# targets with all tests enabled, which will slow down cmake configuration and set (LLVM_HOST_TRIPLE "${CMAKE_C_COMPILER_TARGET}" CACHE INTERNAL "")
# compilation (You'll see Building native llvm-tblgen...). Let's disable the message (STATUS "CROSS COMPILING SET LLVM HOST TRIPLE ${LLVM_HOST_TRIPLE}")
# cross compiling indicator for now. endif()
#
# TODO We should let cmake know whether it's indeed a cross compilation in the
# first place.
set (CMAKE_CROSSCOMPILING 0)
add_subdirectory ("${LLVM_SOURCE_DIR}" "${LLVM_BINARY_DIR}") add_subdirectory ("${LLVM_SOURCE_DIR}" "${LLVM_BINARY_DIR}")
set_directory_properties (PROPERTIES set_directory_properties (PROPERTIES

View File

@ -1,5 +1,5 @@
## ClickHouse Dockerfiles ## ClickHouse Dockerfiles
This directory contain Dockerfiles for `clickhouse-client` and `clickhouse-server`. They are updated in each release. This directory contain Dockerfiles for `clickhouse-server`. They are updated in each release.
Also there is bunch of images for testing and CI. They are listed in `images.json` file and updated on each commit to master. If you need to add another image, place information about it into `images.json`. Also, there is a bunch of images for testing and CI. They are listed in `images.json` file and updated on each commit to master. If you need to add another image, place information about it into `images.json`.

View File

@ -1,34 +0,0 @@
FROM ubuntu:18.04
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ARG repository="deb https://repo.clickhouse.com/deb/stable/ main/"
ARG version=22.1.1.*
RUN apt-get update \
&& apt-get install --yes --no-install-recommends \
apt-transport-https \
ca-certificates \
dirmngr \
gnupg \
&& mkdir -p /etc/apt/sources.list.d \
&& apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 \
&& echo $repository > /etc/apt/sources.list.d/clickhouse.list \
&& apt-get update \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get install --allow-unauthenticated --yes --no-install-recommends \
clickhouse-client=$version \
clickhouse-common-static=$version \
locales \
tzdata \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf \
&& apt-get clean
RUN locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
ENTRYPOINT ["/usr/bin/clickhouse-client"]

View File

@ -1,7 +0,0 @@
# ClickHouse Client Docker Image
For more information see [ClickHouse Server Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/).
## License
View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.

View File

@ -125,6 +125,7 @@
"docker/test/keeper-jepsen", "docker/test/keeper-jepsen",
"docker/test/server-jepsen", "docker/test/server-jepsen",
"docker/test/sqllogic", "docker/test/sqllogic",
"docker/test/sqltest",
"docker/test/stateless" "docker/test/stateless"
] ]
}, },
@ -155,13 +156,16 @@
}, },
"docker/docs/builder": { "docker/docs/builder": {
"name": "clickhouse/docs-builder", "name": "clickhouse/docs-builder",
"dependent": [ "dependent": []
]
}, },
"docker/test/sqllogic": { "docker/test/sqllogic": {
"name": "clickhouse/sqllogic-test", "name": "clickhouse/sqllogic-test",
"dependent": [] "dependent": []
}, },
"docker/test/sqltest": {
"name": "clickhouse/sqltest",
"dependent": []
},
"docker/test/integration/nginx_dav": { "docker/test/integration/nginx_dav": {
"name": "clickhouse/nginx-dav", "name": "clickhouse/nginx-dav",
"dependent": [] "dependent": []

View File

@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
esac esac
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release" ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
ARG VERSION="23.7.3.14" ARG VERSION="23.7.4.5"
ARG PACKAGES="clickhouse-keeper" ARG PACKAGES="clickhouse-keeper"
# user/group precreated explicitly with fixed uid/gid on purpose. # user/group precreated explicitly with fixed uid/gid on purpose.

View File

@ -58,33 +58,6 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
rustup target add aarch64-apple-darwin && \ rustup target add aarch64-apple-darwin && \
rustup target add powerpc64le-unknown-linux-gnu rustup target add powerpc64le-unknown-linux-gnu
# Create vendor cache for cargo.
#
# Note, that the config.toml for the root is used, you will not be able to
# install any other crates, except those which had been vendored (since if
# there is "replace-with" for some source, then cargo will not look to other
# remotes except this).
#
# Notes for the command itself:
# - --chown is required to preserve the rights
# - unstable-options for -C
# - chmod is required to fix the permissions, since builds are running from a different user
# - copy of the Cargo.lock is required for proper dependencies versions
# - cargo vendor --sync is requried to overcome [1] bug.
#
# [1]: https://github.com/rust-lang/wg-cargo-std-aware/issues/23
COPY --chown=root:root /rust /rust/packages
RUN cargo -Z unstable-options -C /rust/packages vendor > $CARGO_HOME/config.toml && \
cp "$(rustc --print=sysroot)"/lib/rustlib/src/rust/Cargo.lock "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/ && \
cargo -Z unstable-options -C /rust/packages vendor --sync "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/Cargo.toml && \
rm "$(rustc --print=sysroot)"/lib/rustlib/src/rust/library/test/Cargo.lock && \
sed -i "s#\"vendor\"#\"/rust/vendor\"#" $CARGO_HOME/config.toml && \
cat $CARGO_HOME/config.toml && \
mv /rust/packages/vendor /rust/vendor && \
chmod -R o=r+X /rust/vendor && \
ls -R -l /rust/packages && \
rm -r /rust/packages
# NOTE: Seems like gcc-11 is too new for ubuntu20 repository # NOTE: Seems like gcc-11 is too new for ubuntu20 repository
# A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work): # A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work):
RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \ RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
@ -107,6 +80,14 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
# Download toolchain and SDK for Darwin # Download toolchain and SDK for Darwin
RUN curl -sL -O https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11.0.sdk.tar.xz RUN curl -sL -O https://github.com/phracker/MacOSX-SDKs/releases/download/11.3/MacOSX11.0.sdk.tar.xz
# Download and install mold 2.0 for s390x build
RUN curl -Lo /tmp/mold.tar.gz "https://github.com/rui314/mold/releases/download/v2.0.0/mold-2.0.0-x86_64-linux.tar.gz" \
&& mkdir /tmp/mold \
&& tar -xzf /tmp/mold.tar.gz -C /tmp/mold \
&& cp -r /tmp/mold/mold*/* /usr \
&& rm -rf /tmp/mold \
&& rm /tmp/mold.tar.gz
# Architecture of the image when BuildKit/buildx is used # Architecture of the image when BuildKit/buildx is used
ARG TARGETARCH ARG TARGETARCH
ARG NFPM_VERSION=2.20.0 ARG NFPM_VERSION=2.20.0

View File

@ -1 +0,0 @@
../../../rust

View File

@ -22,7 +22,7 @@ def check_image_exists_locally(image_name: str) -> bool:
output = subprocess.check_output( output = subprocess.check_output(
f"docker images -q {image_name} 2> /dev/null", shell=True f"docker images -q {image_name} 2> /dev/null", shell=True
) )
return output != "" return output != b""
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
return False return False
@ -46,7 +46,7 @@ def build_image(image_name: str, filepath: Path) -> None:
) )
def pre_build(repo_path: Path, env_variables: List[str]): def pre_build(repo_path: Path, env_variables: List[str]) -> None:
if "WITH_PERFORMANCE=1" in env_variables: if "WITH_PERFORMANCE=1" in env_variables:
current_branch = subprocess.check_output( current_branch = subprocess.check_output(
"git branch --show-current", shell=True, encoding="utf-8" "git branch --show-current", shell=True, encoding="utf-8"
@ -80,9 +80,12 @@ def run_docker_image_with_env(
output_dir: Path, output_dir: Path,
env_variables: List[str], env_variables: List[str],
ch_root: Path, ch_root: Path,
cargo_cache_dir: Path,
ccache_dir: Optional[Path], ccache_dir: Optional[Path],
): ) -> None:
output_dir.mkdir(parents=True, exist_ok=True) output_dir.mkdir(parents=True, exist_ok=True)
cargo_cache_dir.mkdir(parents=True, exist_ok=True)
env_part = " -e ".join(env_variables) env_part = " -e ".join(env_variables)
if env_part: if env_part:
env_part = " -e " + env_part env_part = " -e " + env_part
@ -104,7 +107,7 @@ def run_docker_image_with_env(
cmd = ( cmd = (
f"docker run --network=host --user={user} --rm {ccache_mount}" f"docker run --network=host --user={user} --rm {ccache_mount}"
f"--volume={output_dir}:/output --volume={ch_root}:/build {env_part} " f"--volume={output_dir}:/output --volume={ch_root}:/build {env_part} "
f"{interactive} {image_name}" f"--volume={cargo_cache_dir}:/rust/cargo/registry {interactive} {image_name}"
) )
logging.info("Will build ClickHouse pkg with cmd: '%s'", cmd) logging.info("Will build ClickHouse pkg with cmd: '%s'", cmd)
@ -129,9 +132,10 @@ def parse_env_variables(
version: str, version: str,
official: bool, official: bool,
additional_pkgs: bool, additional_pkgs: bool,
with_profiler: bool,
with_coverage: bool, with_coverage: bool,
with_binaries: str, with_binaries: str,
): ) -> List[str]:
DARWIN_SUFFIX = "-darwin" DARWIN_SUFFIX = "-darwin"
DARWIN_ARM_SUFFIX = "-darwin-aarch64" DARWIN_ARM_SUFFIX = "-darwin-aarch64"
ARM_SUFFIX = "-aarch64" ARM_SUFFIX = "-aarch64"
@ -139,6 +143,7 @@ def parse_env_variables(
FREEBSD_SUFFIX = "-freebsd" FREEBSD_SUFFIX = "-freebsd"
PPC_SUFFIX = "-ppc64le" PPC_SUFFIX = "-ppc64le"
RISCV_SUFFIX = "-riscv64" RISCV_SUFFIX = "-riscv64"
S390X_SUFFIX = "-s390x"
AMD64_COMPAT_SUFFIX = "-amd64-compat" AMD64_COMPAT_SUFFIX = "-amd64-compat"
result = [] result = []
@ -152,6 +157,7 @@ def parse_env_variables(
is_cross_arm_v80compat = compiler.endswith(ARM_V80COMPAT_SUFFIX) is_cross_arm_v80compat = compiler.endswith(ARM_V80COMPAT_SUFFIX)
is_cross_ppc = compiler.endswith(PPC_SUFFIX) is_cross_ppc = compiler.endswith(PPC_SUFFIX)
is_cross_riscv = compiler.endswith(RISCV_SUFFIX) is_cross_riscv = compiler.endswith(RISCV_SUFFIX)
is_cross_s390x = compiler.endswith(S390X_SUFFIX)
is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX) is_cross_freebsd = compiler.endswith(FREEBSD_SUFFIX)
is_amd64_compat = compiler.endswith(AMD64_COMPAT_SUFFIX) is_amd64_compat = compiler.endswith(AMD64_COMPAT_SUFFIX)
@ -213,6 +219,11 @@ def parse_env_variables(
cmake_flags.append( cmake_flags.append(
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-riscv64.cmake" "-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-riscv64.cmake"
) )
elif is_cross_s390x:
cc = compiler[: -len(S390X_SUFFIX)]
cmake_flags.append(
"-DCMAKE_TOOLCHAIN_FILE=/build/cmake/linux/toolchain-s390x.cmake"
)
elif is_amd64_compat: elif is_amd64_compat:
cc = compiler[: -len(AMD64_COMPAT_SUFFIX)] cc = compiler[: -len(AMD64_COMPAT_SUFFIX)]
result.append("DEB_ARCH=amd64") result.append("DEB_ARCH=amd64")
@ -322,6 +333,9 @@ def parse_env_variables(
# utils are not included into clickhouse-bundle, so build everything # utils are not included into clickhouse-bundle, so build everything
build_target = "all" build_target = "all"
if with_profiler:
cmake_flags.append("-DENABLE_BUILD_PROFILING=1")
if with_coverage: if with_coverage:
cmake_flags.append("-DWITH_COVERAGE=1") cmake_flags.append("-DWITH_COVERAGE=1")
@ -373,6 +387,7 @@ def parse_args() -> argparse.Namespace:
"clang-16-aarch64-v80compat", "clang-16-aarch64-v80compat",
"clang-16-ppc64le", "clang-16-ppc64le",
"clang-16-riscv64", "clang-16-riscv64",
"clang-16-s390x",
"clang-16-amd64-compat", "clang-16-amd64-compat",
"clang-16-freebsd", "clang-16-freebsd",
), ),
@ -412,10 +427,18 @@ def parse_args() -> argparse.Namespace:
action="store_true", action="store_true",
help="if set, the build fails on errors writing cache to S3", help="if set, the build fails on errors writing cache to S3",
) )
parser.add_argument(
"--cargo-cache-dir",
default=Path(os.getenv("CARGO_HOME", "") or Path.home() / ".cargo")
/ "registry",
type=dir_name,
help="a directory to preserve the rust cargo crates",
)
parser.add_argument("--force-build-image", action="store_true") parser.add_argument("--force-build-image", action="store_true")
parser.add_argument("--version") parser.add_argument("--version")
parser.add_argument("--official", action="store_true") parser.add_argument("--official", action="store_true")
parser.add_argument("--additional-pkgs", action="store_true") parser.add_argument("--additional-pkgs", action="store_true")
parser.add_argument("--with-profiler", action="store_true")
parser.add_argument("--with-coverage", action="store_true") parser.add_argument("--with-coverage", action="store_true")
parser.add_argument( parser.add_argument(
"--with-binaries", choices=("programs", "tests", ""), default="" "--with-binaries", choices=("programs", "tests", ""), default=""
@ -451,7 +474,7 @@ def parse_args() -> argparse.Namespace:
return args return args
def main(): def main() -> None:
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s") logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
args = parse_args() args = parse_args()
@ -479,6 +502,7 @@ def main():
args.version, args.version,
args.official, args.official,
args.additional_pkgs, args.additional_pkgs,
args.with_profiler,
args.with_coverage, args.with_coverage,
args.with_binaries, args.with_binaries,
) )
@ -490,6 +514,7 @@ def main():
args.output_dir, args.output_dir,
env_prepared, env_prepared,
ch_root, ch_root,
args.cargo_cache_dir,
args.ccache_dir, args.ccache_dir,
) )
logging.info("Output placed into %s", args.output_dir) logging.info("Output placed into %s", args.output_dir)

View File

@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc # lts / testing / prestable / etc
ARG REPO_CHANNEL="stable" ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="23.7.3.14" ARG VERSION="23.7.4.5"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# user/group precreated explicitly with fixed uid/gid on purpose. # user/group precreated explicitly with fixed uid/gid on purpose.

View File

@ -23,7 +23,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable" ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="23.7.3.14" ARG VERSION="23.7.4.5"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# set non-empty deb_location_url url to create a docker image # set non-empty deb_location_url url to create a docker image

View File

@ -35,4 +35,7 @@ ENV LC_ALL en_US.UTF-8
ENV TZ=Europe/Amsterdam ENV TZ=Europe/Amsterdam
RUN ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone RUN ln -snf "/usr/share/zoneinfo/$TZ" /etc/localtime && echo "$TZ" > /etc/timezone
# This script is used to setup realtime export of server logs from the CI into external ClickHouse cluster:
COPY setup_export_logs.sh /
CMD sleep 1 CMD sleep 1

View File

@ -0,0 +1,65 @@
#!/bin/bash
# This script sets up export of system log tables to a remote server.
# Remote tables are created if not exist, and augmented with extra columns,
# and their names will contain a hash of the table structure,
# which allows exporting tables from servers of different versions.
# Pre-configured destination cluster, where to export the data
CLUSTER=${CLUSTER:=system_logs_export}
EXTRA_COLUMNS=${EXTRA_COLUMNS:="pull_request_number UInt32, commit_sha String, check_start_time DateTime, check_name LowCardinality(String), instance_type LowCardinality(String), "}
EXTRA_COLUMNS_EXPRESSION=${EXTRA_COLUMNS_EXPRESSION:="0 AS pull_request_number, '' AS commit_sha, now() AS check_start_time, '' AS check_name, '' AS instance_type"}
EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:="check_name, "}
CONNECTION_PARAMETERS=${CONNECTION_PARAMETERS:=""}
# Create all configured system logs:
clickhouse-client --query "SYSTEM FLUSH LOGS"
# It's doesn't make sense to try creating tables if SYNC fails
echo "SYSTEM SYNC DATABASE REPLICA default" | clickhouse-client --receive_timeout 180 $CONNECTION_PARAMETERS || exit 0
# For each system log table:
clickhouse-client --query "SHOW TABLES FROM system LIKE '%\\_log'" | while read -r table
do
# Calculate hash of its structure:
hash=$(clickhouse-client --query "
SELECT sipHash64(groupArray((name, type)))
FROM (SELECT name, type FROM system.columns
WHERE database = 'system' AND table = '$table'
ORDER BY position)
")
# Create the destination table with adapted name and structure:
statement=$(clickhouse-client --format TSVRaw --query "SHOW CREATE TABLE system.${table}" | sed -r -e '
s/^\($/('"$EXTRA_COLUMNS"'/;
s/ORDER BY \(/ORDER BY ('"$EXTRA_ORDER_BY_COLUMNS"'/;
s/^CREATE TABLE system\.\w+_log$/CREATE TABLE IF NOT EXISTS '"$table"'_'"$hash"'/;
/^TTL /d
')
echo "Creating destination table ${table}_${hash}" >&2
echo "$statement" | clickhouse-client --distributed_ddl_task_timeout=10 $CONNECTION_PARAMETERS || continue
echo "Creating table system.${table}_sender" >&2
# Create Distributed table and materialized view to watch on the original table:
clickhouse-client --query "
CREATE TABLE system.${table}_sender
ENGINE = Distributed(${CLUSTER}, default, ${table}_${hash})
SETTINGS flush_on_detach=0
EMPTY AS
SELECT ${EXTRA_COLUMNS_EXPRESSION}, *
FROM system.${table}
"
echo "Creating materialized view system.${table}_watcher" >&2
clickhouse-client --query "
CREATE MATERIALIZED VIEW system.${table}_watcher TO system.${table}_sender AS
SELECT ${EXTRA_COLUMNS_EXPRESSION}, *
FROM system.${table}
"
done

View File

@ -148,6 +148,7 @@ function clone_submodules
contrib/liburing contrib/liburing
contrib/libfiu contrib/libfiu
contrib/incbin contrib/incbin
contrib/yaml-cpp
) )
git submodule sync git submodule sync
@ -170,6 +171,7 @@ function run_cmake
"-DENABLE_SIMDJSON=1" "-DENABLE_SIMDJSON=1"
"-DENABLE_JEMALLOC=1" "-DENABLE_JEMALLOC=1"
"-DENABLE_LIBURING=1" "-DENABLE_LIBURING=1"
"-DENABLE_YAML_CPP=1"
) )
export CCACHE_DIR="$FASTTEST_WORKSPACE/ccache" export CCACHE_DIR="$FASTTEST_WORKSPACE/ccache"

View File

@ -122,6 +122,23 @@ EOL
<core_path>$PWD</core_path> <core_path>$PWD</core_path>
</clickhouse> </clickhouse>
EOL EOL
# Setup a cluster for logs export to ClickHouse Cloud
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
echo "
remote_servers:
system_logs_export:
shard:
replica:
secure: 1
user: ci
host: '${CLICKHOUSE_CI_LOGS_HOST}'
port: 9440
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
" > db/config.d/system_logs_export.yaml
fi
} }
function filter_exists_and_template function filter_exists_and_template
@ -223,7 +240,22 @@ quit
done done
clickhouse-client --query "select 1" # This checks that the server is responding clickhouse-client --query "select 1" # This checks that the server is responding
kill -0 $server_pid # This checks that it is our server that is started and not some other one kill -0 $server_pid # This checks that it is our server that is started and not some other one
echo Server started and responded echo 'Server started and responded'
# Initialize export of system logs to ClickHouse Cloud
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
export EXTRA_COLUMNS_EXPRESSION="$PR_TO_TEST AS pull_request_number, '$SHA_TO_TEST' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
# TODO: Check if the password will appear in the logs.
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
/setup_export_logs.sh
# Unset variables after use
export CONNECTION_PARAMETERS=''
export CLICKHOUSE_CI_LOGS_HOST=''
export CLICKHOUSE_CI_LOGS_PASSWORD=''
fi
# SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric. # SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric.
# SC2046: Quote this to prevent word splitting. Actually I need word splitting. # SC2046: Quote this to prevent word splitting. Actually I need word splitting.

View File

@ -12,6 +12,7 @@ ENV \
# install systemd packages # install systemd packages
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
sudo \
systemd \ systemd \
&& \ && \
apt-get clean && \ apt-get clean && \

View File

@ -2,7 +2,7 @@ version: "2.3"
services: services:
coredns: coredns:
image: coredns/coredns:latest image: coredns/coredns:1.9.3 # :latest broke this test
restart: always restart: always
volumes: volumes:
- ${COREDNS_CONFIG_DIR}/example.com:/example.com - ${COREDNS_CONFIG_DIR}/example.com:/example.com

View File

@ -1,18 +1,7 @@
# docker build -t clickhouse/performance-comparison . # docker build -t clickhouse/performance-comparison .
# Using ubuntu:22.04 over 20.04 as all other images, since: ARG FROM_TAG=latest
# a) ubuntu 20.04 has too old parallel, and does not support --memsuspend FROM clickhouse/test-base:$FROM_TAG
# b) anyway for perf tests it should not be important (backward compatiblity
# with older ubuntu had been checked lots of times in various tests)
FROM ubuntu:22.04
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
ENV LANG=C.UTF-8
ENV TZ=Europe/Amsterdam
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
RUN apt-get update \ RUN apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \ && DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
@ -56,10 +45,9 @@ COPY * /
# node #0 should be less stable because of system interruptions. We bind # node #0 should be less stable because of system interruptions. We bind
# randomly to node 1 or 0 to gather some statistics on that. We have to bind # randomly to node 1 or 0 to gather some statistics on that. We have to bind
# both servers and the tmpfs on which the database is stored. How to do it # both servers and the tmpfs on which the database is stored. How to do it
# through Yandex Sandbox API is unclear, but by default tmpfs uses # is unclear, but by default tmpfs uses
# 'process allocation policy', not sure which process but hopefully the one that # 'process allocation policy', not sure which process but hopefully the one that
# writes to it, so just bind the downloader script as well. We could also try to # writes to it, so just bind the downloader script as well.
# remount it with proper options in Sandbox task.
# https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt # https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt
# Double-escaped backslashes are a tribute to the engineering wonder of docker -- # Double-escaped backslashes are a tribute to the engineering wonder of docker --
# it gives '/bin/sh: 1: [bash,: not found' otherwise. # it gives '/bin/sh: 1: [bash,: not found' otherwise.

View File

@ -90,7 +90,7 @@ function configure
set +m set +m
wait_for_server $LEFT_SERVER_PORT $left_pid wait_for_server $LEFT_SERVER_PORT $left_pid
echo Server for setup started echo "Server for setup started"
clickhouse-client --port $LEFT_SERVER_PORT --query "create database test" ||: clickhouse-client --port $LEFT_SERVER_PORT --query "create database test" ||:
clickhouse-client --port $LEFT_SERVER_PORT --query "rename table datasets.hits_v1 to test.hits" ||: clickhouse-client --port $LEFT_SERVER_PORT --query "rename table datasets.hits_v1 to test.hits" ||:
@ -156,9 +156,9 @@ function restart
wait_for_server $RIGHT_SERVER_PORT $right_pid wait_for_server $RIGHT_SERVER_PORT $right_pid
echo right ok echo right ok
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.tables where database != 'system'" clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.tables where database NOT IN ('system', 'INFORMATION_SCHEMA', 'information_schema')"
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.build_options" clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.build_options"
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.tables where database != 'system'" clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.tables where database NOT IN ('system', 'INFORMATION_SCHEMA', 'information_schema')"
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.build_options" clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.build_options"
# Check again that both servers we started are running -- this is important # Check again that both servers we started are running -- this is important
@ -352,14 +352,12 @@ function get_profiles
wait wait
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type in ('QueryFinish', 'ExceptionWhileProcessing') format TSVWithNamesAndTypes" > left-query-log.tsv ||: & clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type in ('QueryFinish', 'ExceptionWhileProcessing') format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > left-query-thread-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > left-trace-log.tsv ||: & clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > left-trace-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > left-addresses.tsv ||: & clickhouse-client --port $LEFT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > left-addresses.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > left-metric-log.tsv ||: & clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > left-metric-log.tsv ||: &
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > left-async-metric-log.tsv ||: & clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > left-async-metric-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type in ('QueryFinish', 'ExceptionWhileProcessing') format TSVWithNamesAndTypes" > right-query-log.tsv ||: & clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type in ('QueryFinish', 'ExceptionWhileProcessing') format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > right-query-thread-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > right-trace-log.tsv ||: & clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > right-trace-log.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > right-addresses.tsv ||: & clickhouse-client --port $RIGHT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > right-addresses.tsv ||: &
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > right-metric-log.tsv ||: & clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > right-metric-log.tsv ||: &
@ -665,9 +663,8 @@ create view partial_query_times as select * from
-- Report for backward-incompatible ('partial') queries that we could only run on the new server (e.g. -- Report for backward-incompatible ('partial') queries that we could only run on the new server (e.g.
-- queries with new functions added in the tested PR). -- queries with new functions added in the tested PR).
create table partial_queries_report engine File(TSV, 'report/partial-queries-report.tsv') create table partial_queries_report engine File(TSV, 'report/partial-queries-report.tsv')
settings output_format_decimal_trailing_zeros = 1 as select round(time_median, 3) time,
as select toDecimal64(time_median, 3) time, round(time_stddev / time_median, 3) relative_time_stddev,
toDecimal64(time_stddev / time_median, 3) relative_time_stddev,
test, query_index, query_display_name test, query_index, query_display_name
from partial_query_times from partial_query_times
join query_display_names using (test, query_index) join query_display_names using (test, query_index)
@ -739,28 +736,26 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
; ;
create table changed_perf_report engine File(TSV, 'report/changed-perf.tsv') create table changed_perf_report engine File(TSV, 'report/changed-perf.tsv')
settings output_format_decimal_trailing_zeros = 1
as with as with
-- server_time is sometimes reported as zero (if it's less than 1 ms), -- server_time is sometimes reported as zero (if it's less than 1 ms),
-- so we have to work around this to not get an error about conversion -- so we have to work around this to not get an error about conversion
-- of NaN to decimal. -- of NaN to decimal.
(left > right ? left / right : right / left) as times_change_float, (left > right ? left / right : right / left) as times_change_float,
isFinite(times_change_float) as times_change_finite, isFinite(times_change_float) as times_change_finite,
toDecimal64(times_change_finite ? times_change_float : 1., 3) as times_change_decimal, round(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
times_change_finite times_change_finite
? (left > right ? '-' : '+') || toString(times_change_decimal) || 'x' ? (left > right ? '-' : '+') || toString(times_change_decimal) || 'x'
: '--' as times_change_str : '--' as times_change_str
select select
toDecimal64(left, 3), toDecimal64(right, 3), times_change_str, round(left, 3), round(right, 3), times_change_str,
toDecimal64(diff, 3), toDecimal64(stat_threshold, 3), round(diff, 3), round(stat_threshold, 3),
changed_fail, test, query_index, query_display_name changed_fail, test, query_index, query_display_name
from queries where changed_show order by abs(diff) desc; from queries where changed_show order by abs(diff) desc;
create table unstable_queries_report engine File(TSV, 'report/unstable-queries.tsv') create table unstable_queries_report engine File(TSV, 'report/unstable-queries.tsv')
settings output_format_decimal_trailing_zeros = 1
as select as select
toDecimal64(left, 3), toDecimal64(right, 3), toDecimal64(diff, 3), round(left, 3), round(right, 3), round(diff, 3),
toDecimal64(stat_threshold, 3), unstable_fail, test, query_index, query_display_name round(stat_threshold, 3), unstable_fail, test, query_index, query_display_name
from queries where unstable_show order by stat_threshold desc; from queries where unstable_show order by stat_threshold desc;
@ -789,11 +784,10 @@ create view total_speedup as
; ;
create table test_perf_changes_report engine File(TSV, 'report/test-perf-changes.tsv') create table test_perf_changes_report engine File(TSV, 'report/test-perf-changes.tsv')
settings output_format_decimal_trailing_zeros = 1
as with as with
(times_speedup >= 1 (times_speedup >= 1
? '-' || toString(toDecimal64(times_speedup, 3)) || 'x' ? '-' || toString(round(times_speedup, 3)) || 'x'
: '+' || toString(toDecimal64(1 / times_speedup, 3)) || 'x') : '+' || toString(round(1 / times_speedup, 3)) || 'x')
as times_speedup_str as times_speedup_str
select test, times_speedup_str, queries, bad, changed, unstable select test, times_speedup_str, queries, bad, changed, unstable
-- Not sure what's the precedence of UNION ALL vs WHERE & ORDER BY, hence all -- Not sure what's the precedence of UNION ALL vs WHERE & ORDER BY, hence all
@ -817,11 +811,10 @@ create view total_client_time_per_query as select *
'test text, query_index int, client float, server float'); 'test text, query_index int, client float, server float');
create table slow_on_client_report engine File(TSV, 'report/slow-on-client.tsv') create table slow_on_client_report engine File(TSV, 'report/slow-on-client.tsv')
settings output_format_decimal_trailing_zeros = 1 as select client, server, round(client/server, 3) p,
as select client, server, toDecimal64(client/server, 3) p,
test, query_display_name test, query_display_name
from total_client_time_per_query left join query_display_names using (test, query_index) from total_client_time_per_query left join query_display_names using (test, query_index)
where p > toDecimal64(1.02, 3) order by p desc; where p > round(1.02, 3) order by p desc;
create table wall_clock_time_per_test engine Memory as select * create table wall_clock_time_per_test engine Memory as select *
from file('wall-clock-times.tsv', TSV, 'test text, real float, user float, system float'); from file('wall-clock-times.tsv', TSV, 'test text, real float, user float, system float');
@ -899,15 +892,14 @@ create view test_times_view_total as
; ;
create table test_times_report engine File(TSV, 'report/test-times.tsv') create table test_times_report engine File(TSV, 'report/test-times.tsv')
settings output_format_decimal_trailing_zeros = 1
as select as select
test, test,
toDecimal64(real, 3), round(real, 3),
toDecimal64(total_client_time, 3), round(total_client_time, 3),
queries, queries,
toDecimal64(query_max, 3), round(query_max, 3),
toDecimal64(avg_real_per_query, 3), round(avg_real_per_query, 3),
toDecimal64(query_min, 3), round(query_min, 3),
runs runs
from ( from (
select * from test_times_view select * from test_times_view
@ -919,21 +911,20 @@ create table test_times_report engine File(TSV, 'report/test-times.tsv')
-- report for all queries page, only main metric -- report for all queries page, only main metric
create table all_tests_report engine File(TSV, 'report/all-queries.tsv') create table all_tests_report engine File(TSV, 'report/all-queries.tsv')
settings output_format_decimal_trailing_zeros = 1
as with as with
-- server_time is sometimes reported as zero (if it's less than 1 ms), -- server_time is sometimes reported as zero (if it's less than 1 ms),
-- so we have to work around this to not get an error about conversion -- so we have to work around this to not get an error about conversion
-- of NaN to decimal. -- of NaN to decimal.
(left > right ? left / right : right / left) as times_change_float, (left > right ? left / right : right / left) as times_change_float,
isFinite(times_change_float) as times_change_finite, isFinite(times_change_float) as times_change_finite,
toDecimal64(times_change_finite ? times_change_float : 1., 3) as times_change_decimal, round(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
times_change_finite times_change_finite
? (left > right ? '-' : '+') || toString(times_change_decimal) || 'x' ? (left > right ? '-' : '+') || toString(times_change_decimal) || 'x'
: '--' as times_change_str : '--' as times_change_str
select changed_fail, unstable_fail, select changed_fail, unstable_fail,
toDecimal64(left, 3), toDecimal64(right, 3), times_change_str, round(left, 3), round(right, 3), times_change_str,
toDecimal64(isFinite(diff) ? diff : 0, 3), round(isFinite(diff) ? diff : 0, 3),
toDecimal64(isFinite(stat_threshold) ? stat_threshold : 0, 3), round(isFinite(stat_threshold) ? stat_threshold : 0, 3),
test, query_index, query_display_name test, query_index, query_display_name
from queries order by test, query_index; from queries order by test, query_index;
@ -1044,27 +1035,6 @@ create table unstable_run_traces engine File(TSVWithNamesAndTypes,
order by count() desc order by count() desc
; ;
create table metric_devation engine File(TSVWithNamesAndTypes,
'report/metric-deviation.$version.tsv')
settings output_format_decimal_trailing_zeros = 1
-- first goes the key used to split the file with grep
as select test, query_index, query_display_name,
toDecimal64(d, 3) d, q, metric
from (
select
test, query_index,
(q[3] - q[1])/q[2] d,
quantilesExact(0, 0.5, 1)(value) q, metric
from (select * from unstable_run_metrics
union all select * from unstable_run_traces
union all select * from unstable_run_metrics_2) mm
group by test, query_index, metric
having isFinite(d) and d > 0.5 and q[3] > 5
) metrics
left join query_display_names using (test, query_index)
order by test, query_index, d desc
;
create table stacks engine File(TSV, 'report/stacks.$version.tsv') as create table stacks engine File(TSV, 'report/stacks.$version.tsv') as
select select
-- first goes the key used to split the file with grep -- first goes the key used to split the file with grep
@ -1173,9 +1143,8 @@ create table metrics engine File(TSV, 'metrics/metrics.tsv') as
-- Show metrics that have changed -- Show metrics that have changed
create table changes engine File(TSV, 'metrics/changes.tsv') create table changes engine File(TSV, 'metrics/changes.tsv')
settings output_format_decimal_trailing_zeros = 1
as select metric, left, right, as select metric, left, right,
toDecimal64(diff, 3), toDecimal64(times_diff, 3) round(diff, 3), round(times_diff, 3)
from ( from (
select metric, median(left) as left, median(right) as right, select metric, median(left) as left, median(right) as right,
(right - left) / left diff, (right - left) / left diff,
@ -1226,7 +1195,6 @@ create table ci_checks engine File(TSVWithNamesAndTypes, 'ci-checks.tsv')
'$SHA_TO_TEST' :: LowCardinality(String) AS commit_sha, '$SHA_TO_TEST' :: LowCardinality(String) AS commit_sha,
'${CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME:-Performance}' :: LowCardinality(String) AS check_name, '${CLICKHOUSE_PERFORMANCE_COMPARISON_CHECK_NAME:-Performance}' :: LowCardinality(String) AS check_name,
'$(sed -n 's/.*<!--status: \(.*\)-->/\1/p' report.html)' :: LowCardinality(String) AS check_status, '$(sed -n 's/.*<!--status: \(.*\)-->/\1/p' report.html)' :: LowCardinality(String) AS check_status,
-- TODO toDateTime() can't parse output of 'date', so no time for now.
(($(date +%s) - $CHPC_CHECK_START_TIMESTAMP) * 1000) :: UInt64 AS check_duration_ms, (($(date +%s) - $CHPC_CHECK_START_TIMESTAMP) * 1000) :: UInt64 AS check_duration_ms,
fromUnixTimestamp($CHPC_CHECK_START_TIMESTAMP) check_start_time, fromUnixTimestamp($CHPC_CHECK_START_TIMESTAMP) check_start_time,
test_name :: LowCardinality(String) AS test_name , test_name :: LowCardinality(String) AS test_name ,

View File

@ -19,31 +19,6 @@
<opentelemetry_span_log remove="remove"/> <opentelemetry_span_log remove="remove"/>
<session_log remove="remove"/> <session_log remove="remove"/>
<!-- performance tests does not uses real block devices,
instead they stores everything in memory.
And so, to avoid extra memory reference switch *_log to Memory engine. -->
<query_log>
<engine>ENGINE = Memory</engine>
<partition_by remove="remove"/>
</query_log>
<query_thread_log>
<engine>ENGINE = Memory</engine>
<partition_by remove="remove"/>
</query_thread_log>
<trace_log>
<engine>ENGINE = Memory</engine>
<partition_by remove="remove"/>
</trace_log>
<metric_log>
<engine>ENGINE = Memory</engine>
<partition_by remove="remove"/>
</metric_log>
<asynchronous_metric_log>
<engine>ENGINE = Memory</engine>
<partition_by remove="remove"/>
</asynchronous_metric_log>
<uncompressed_cache_size>1000000000</uncompressed_cache_size> <uncompressed_cache_size>1000000000</uncompressed_cache_size>
<asynchronous_metrics_update_period_s>10</asynchronous_metrics_update_period_s> <asynchronous_metrics_update_period_s>10</asynchronous_metrics_update_period_s>

View File

@ -19,8 +19,9 @@
<max_threads>12</max_threads> <max_threads>12</max_threads>
<!-- disable JIT for perf tests --> <!-- disable JIT for perf tests -->
<compile_expressions>0</compile_expressions> <compile_expressions>1</compile_expressions>
<compile_aggregate_expressions>0</compile_aggregate_expressions> <compile_aggregate_expressions>1</compile_aggregate_expressions>
<compile_sort_description>1</compile_sort_description>
<!-- Don't fail some prewarm queries too early --> <!-- Don't fail some prewarm queries too early -->
<timeout_before_checking_execution_speed>60</timeout_before_checking_execution_speed> <timeout_before_checking_execution_speed>60</timeout_before_checking_execution_speed>

View File

@ -31,8 +31,6 @@ function download
# Test all of them. # Test all of them.
declare -a urls_to_try=( declare -a urls_to_try=(
"$S3_URL/PRs/$left_pr/$left_sha/$BUILD_NAME/performance.tar.zst" "$S3_URL/PRs/$left_pr/$left_sha/$BUILD_NAME/performance.tar.zst"
"$S3_URL/$left_pr/$left_sha/$BUILD_NAME/performance.tar.zst"
"$S3_URL/$left_pr/$left_sha/$BUILD_NAME/performance.tgz"
) )
for path in "${urls_to_try[@]}" for path in "${urls_to_try[@]}"

View File

@ -130,7 +130,7 @@ then
git -C right/ch diff --name-only "$base" pr -- :!tests/performance :!docker/test/performance-comparison | tee other-changed-files.txt git -C right/ch diff --name-only "$base" pr -- :!tests/performance :!docker/test/performance-comparison | tee other-changed-files.txt
fi fi
# Set python output encoding so that we can print queries with Russian letters. # Set python output encoding so that we can print queries with non-ASCII letters.
export PYTHONIOENCODING=utf-8 export PYTHONIOENCODING=utf-8
# By default, use the main comparison script from the tested package, so that we # By default, use the main comparison script from the tested package, so that we
@ -151,11 +151,7 @@ export PATH
export REF_PR export REF_PR
export REF_SHA export REF_SHA
# Try to collect some core dumps. I've seen two patterns in Sandbox: # Try to collect some core dumps.
# 1) |/home/zomb-sandbox/venv/bin/python /home/zomb-sandbox/client/sandbox/bin/coredumper.py %e %p %g %u %s %P %c
# Not sure what this script does (puts them to sandbox resources, logs some messages?),
# and it's not accessible from inside docker anyway.
# 2) something like %e.%p.core.dmp. The dump should end up in the workspace directory.
# At least we remove the ulimit and then try to pack some common file names into output. # At least we remove the ulimit and then try to pack some common file names into output.
ulimit -c unlimited ulimit -c unlimited
cat /proc/sys/kernel/core_pattern cat /proc/sys/kernel/core_pattern

View File

@ -1,4 +1,5 @@
#!/bin/bash #!/bin/bash
set -exu set -exu
trap "exit" INT TERM trap "exit" INT TERM

View File

@ -0,0 +1,30 @@
# docker build -t clickhouse/sqltest .
ARG FROM_TAG=latest
FROM clickhouse/test-base:$FROM_TAG
RUN apt-get update --yes \
&& env DEBIAN_FRONTEND=noninteractive \
apt-get install --yes --no-install-recommends \
wget \
git \
python3 \
python3-dev \
python3-pip \
sudo \
&& apt-get clean
RUN pip3 install \
pyyaml \
clickhouse-driver
ARG sqltest_repo="https://github.com/elliotchance/sqltest/"
RUN git clone ${sqltest_repo}
ENV TZ=UTC
ENV MAX_RUN_TIME=900
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
COPY run.sh /
COPY test.py /
CMD ["/bin/bash", "/run.sh"]

51
docker/test/sqltest/run.sh Executable file
View File

@ -0,0 +1,51 @@
#!/bin/bash
# shellcheck disable=SC2015
set -x
set -e
set -u
set -o pipefail
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-16_debug_none_unsplitted_disable_False_binary"}
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
function wget_with_retry
{
for _ in 1 2 3 4; do
if wget -nv -nd -c "$1";then
return 0
else
sleep 0.5
fi
done
return 1
}
wget_with_retry "$BINARY_URL_TO_DOWNLOAD"
chmod +x clickhouse
./clickhouse install --noninteractive
echo "
users:
default:
access_management: 1" > /etc/clickhouse-server/users.d/access_management.yaml
clickhouse start
# Wait for start
for _ in {1..100}
do
clickhouse-client --query "SELECT 1" && break ||:
sleep 1
done
# Run the test
pushd sqltest/standards/2016/
/test.py
mv report.html test.log /workspace
popd
zstd --threads=0 /var/log/clickhouse-server/clickhouse-server.log
zstd --threads=0 /var/log/clickhouse-server/clickhouse-server.err.log
mv /var/log/clickhouse-server/clickhouse-server.log.zst /var/log/clickhouse-server/clickhouse-server.err.log.zst /workspace

148
docker/test/sqltest/test.py Executable file
View File

@ -0,0 +1,148 @@
#!/usr/bin/env python3
import os
import yaml
import html
import random
import string
from clickhouse_driver import Client
client = Client(host="localhost", port=9000)
settings = {
"default_table_engine": "Memory",
"union_default_mode": "DISTINCT",
"calculate_text_stack_trace": 0,
}
database_name = "sqltest_" + "".join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
client.execute(f"DROP DATABASE IF EXISTS {database_name}", settings=settings)
client.execute(f"CREATE DATABASE {database_name}", settings=settings)
client = Client(host="localhost", port=9000, database=database_name)
summary = {"success": 0, "total": 0, "results": {}}
log_file = open("test.log", "w")
report_html_file = open("report.html", "w")
with open("features.yml", "r") as file:
yaml_content = yaml.safe_load(file)
for category in yaml_content:
log_file.write(category.capitalize() + " features:\n")
summary["results"][category] = {"success": 0, "total": 0, "results": {}}
for test in yaml_content[category]:
log_file.write(test + ": " + yaml_content[category][test] + "\n")
summary["results"][category]["results"][test] = {
"success": 0,
"total": 0,
"description": yaml_content[category][test],
}
test_path = test[0] + "/" + test + ".tests.yml"
if os.path.exists(test_path):
with open(test_path, "r") as test_file:
test_yaml_content = yaml.load_all(test_file, Loader=yaml.FullLoader)
for test_case in test_yaml_content:
queries = test_case["sql"]
if not isinstance(queries, list):
queries = [queries]
for query in queries:
# Example: E011-01
test_group = ""
if "-" in test:
test_group = test.split("-", 1)[0]
summary["results"][category]["results"][test_group][
"total"
] += 1
summary["results"][category]["results"][test]["total"] += 1
summary["results"][category]["total"] += 1
summary["total"] += 1
log_file.write(query + "\n")
try:
result = client.execute(query, settings=settings)
log_file.write(str(result) + "\n")
if test_group:
summary["results"][category]["results"][test_group][
"success"
] += 1
summary["results"][category]["results"][test][
"success"
] += 1
summary["results"][category]["success"] += 1
summary["success"] += 1
except Exception as e:
log_file.write(f"Error occurred: {str(e)}\n")
client.execute(f"DROP DATABASE {database_name}", settings=settings)
def enable_color(ratio):
if ratio == 0:
return "<b style='color: red;'>"
elif ratio < 0.5:
return "<b style='color: orange;'>"
elif ratio < 1:
return "<b style='color: gray;'>"
else:
return "<b style='color: green;'>"
reset_color = "</b>"
def print_ratio(indent, name, success, total, description):
report_html_file.write(
"{}{}: {}{} / {} ({:.1%}){}{}\n".format(
" " * indent,
name.capitalize(),
enable_color(success / total),
success,
total,
success / total,
reset_color,
f" - " + html.escape(description) if description else "",
)
)
report_html_file.write(
"<html><body><pre style='font-size: 16pt; padding: 1em; line-height: 1.25;'>\n"
)
print_ratio(0, "Total", summary["success"], summary["total"], "")
for category in summary["results"]:
cat_summary = summary["results"][category]
if cat_summary["total"] == 0:
continue
print_ratio(2, category, cat_summary["success"], cat_summary["total"], "")
for test in summary["results"][category]["results"]:
test_summary = summary["results"][category]["results"][test]
if test_summary["total"] == 0:
continue
print_ratio(
6 if "-" in test else 4,
test,
test_summary["success"],
test_summary["total"],
test_summary["description"],
)
report_html_file.write("</pre></body></html>\n")

View File

@ -20,6 +20,22 @@ ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log & azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
./setup_minio.sh stateful ./setup_minio.sh stateful
# Setup a cluster for logs export to ClickHouse Cloud
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
echo "
remote_servers:
system_logs_export:
shard:
replica:
secure: 1
user: ci
host: '${CLICKHOUSE_CI_LOGS_HOST}'
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
fi
function start() function start()
{ {
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
@ -65,6 +81,22 @@ function start()
} }
start start
# Initialize export of system logs to ClickHouse Cloud
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
# TODO: Check if the password will appear in the logs.
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
./setup_export_logs.sh
# Unset variables after use
export CONNECTION_PARAMETERS=''
export CLICKHOUSE_CI_LOGS_HOST=''
export CLICKHOUSE_CI_LOGS_PASSWORD=''
fi
# shellcheck disable=SC2086 # No quotes because I want to split it into words. # shellcheck disable=SC2086 # No quotes because I want to split it into words.
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS /s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
chmod 777 -R /var/lib/clickhouse chmod 777 -R /var/lib/clickhouse

View File

@ -87,4 +87,5 @@ RUN npm install -g azurite \
COPY run.sh / COPY run.sh /
COPY setup_minio.sh / COPY setup_minio.sh /
COPY setup_hdfs_minicluster.sh / COPY setup_hdfs_minicluster.sh /
CMD ["/bin/bash", "/run.sh"] CMD ["/bin/bash", "/run.sh"]

View File

@ -36,6 +36,22 @@ fi
./setup_minio.sh stateless ./setup_minio.sh stateless
./setup_hdfs_minicluster.sh ./setup_hdfs_minicluster.sh
# Setup a cluster for logs export to ClickHouse Cloud
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
echo "
remote_servers:
system_logs_export:
shard:
replica:
secure: 1
user: ci
host: '${CLICKHOUSE_CI_LOGS_HOST}'
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
fi
# For flaky check we also enable thread fuzzer # For flaky check we also enable thread fuzzer
if [ "$NUM_TRIES" -gt "1" ]; then if [ "$NUM_TRIES" -gt "1" ]; then
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000 export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
@ -92,7 +108,28 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited) MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
fi fi
sleep 5
# Wait for the server to start, but not for too long.
for _ in {1..100}
do
clickhouse-client --query "SELECT 1" && break
sleep 1
done
# Initialize export of system logs to ClickHouse Cloud
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
# TODO: Check if the password will appear in the logs.
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
./setup_export_logs.sh
# Unset variables after use
export CONNECTION_PARAMETERS=''
export CLICKHOUSE_CI_LOGS_HOST=''
export CLICKHOUSE_CI_LOGS_PASSWORD=''
fi
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01 attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01

View File

@ -51,8 +51,39 @@ configure
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log & azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
./setup_minio.sh stateless # to have a proper environment ./setup_minio.sh stateless # to have a proper environment
# Setup a cluster for logs export to ClickHouse Cloud
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
echo "
remote_servers:
system_logs_export:
shard:
replica:
secure: 1
user: ci
host: '${CLICKHOUSE_CI_LOGS_HOST}'
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
fi
start start
# Initialize export of system logs to ClickHouse Cloud
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
then
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
# TODO: Check if the password will appear in the logs.
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
./setup_export_logs.sh
# Unset variables after use
export CONNECTION_PARAMETERS=''
export CLICKHOUSE_CI_LOGS_HOST=''
export CLICKHOUSE_CI_LOGS_PASSWORD=''
fi
# shellcheck disable=SC2086 # No quotes because I want to split it into words. # shellcheck disable=SC2086 # No quotes because I want to split it into words.
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS /s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
chmod 777 -R /var/lib/clickhouse chmod 777 -R /var/lib/clickhouse
@ -180,6 +211,11 @@ mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/cli
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
sudo cat /etc/clickhouse-server/config.d/logger_trace.xml \
| sed "s|<level>trace</level>|<level>test</level>|" \
> /etc/clickhouse-server/config.d/logger_trace.xml.tmp
mv /etc/clickhouse-server/config.d/logger_trace.xml.tmp /etc/clickhouse-server/config.d/logger_trace.xml
start start
stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \ stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \

View File

@ -1,5 +1,5 @@
# docker build -t clickhouse/style-test . # docker build -t clickhouse/style-test .
FROM ubuntu:20.04 FROM ubuntu:22.04
ARG ACT_VERSION=0.2.33 ARG ACT_VERSION=0.2.33
ARG ACTIONLINT_VERSION=1.6.22 ARG ACTIONLINT_VERSION=1.6.22

View File

@ -36,6 +36,9 @@ then
elif [ "${ARCH}" = "riscv64" ] elif [ "${ARCH}" = "riscv64" ]
then then
DIR="riscv64" DIR="riscv64"
elif [ "${ARCH}" = "s390x" ]
then
DIR="s390x"
fi fi
elif [ "${OS}" = "FreeBSD" ] elif [ "${OS}" = "FreeBSD" ]
then then

View File

@ -0,0 +1,45 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v23.3.9.55-lts (b9c5c8622d3) FIXME as compared to v23.3.8.21-lts (1675f2264f3)
#### Performance Improvement
* Backported in [#52213](https://github.com/ClickHouse/ClickHouse/issues/52213): Do not store blocks in `ANY` hash join if nothing is inserted. [#48633](https://github.com/ClickHouse/ClickHouse/pull/48633) ([vdimir](https://github.com/vdimir)).
* Backported in [#52826](https://github.com/ClickHouse/ClickHouse/issues/52826): Fix incorrect projection analysis which invalidates primary keys. This issue only exists when `query_plan_optimize_primary_key = 1, query_plan_optimize_projection = 1` . This fixes [#48823](https://github.com/ClickHouse/ClickHouse/issues/48823) . This fixes [#51173](https://github.com/ClickHouse/ClickHouse/issues/51173) . [#52308](https://github.com/ClickHouse/ClickHouse/pull/52308) ([Amos Bird](https://github.com/amosbird)).
#### Build/Testing/Packaging Improvement
* Backported in [#53019](https://github.com/ClickHouse/ClickHouse/issues/53019): Packing inline cache into docker images sometimes causes strange special effects. Since we don't use it at all, it's good to go. [#53008](https://github.com/ClickHouse/ClickHouse/pull/53008) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#53288](https://github.com/ClickHouse/ClickHouse/issues/53288): The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud., the second attempt after [#53100](https://github.com/ClickHouse/ClickHouse/issues/53100). [#53213](https://github.com/ClickHouse/ClickHouse/pull/53213) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#53461](https://github.com/ClickHouse/ClickHouse/issues/53461): Preserve environment parameters in `clickhouse start` command. Fixes [#51962](https://github.com/ClickHouse/ClickHouse/issues/51962). [#53418](https://github.com/ClickHouse/ClickHouse/pull/53418) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix optimization to move functions before sorting. [#51481](https://github.com/ClickHouse/ClickHouse/pull/51481) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix Block structure mismatch in Pipe::unitePipes for FINAL [#51492](https://github.com/ClickHouse/ClickHouse/pull/51492) ([Nikita Taranov](https://github.com/nickitat)).
* Fix binary arithmetic for Nullable(IPv4) [#51642](https://github.com/ClickHouse/ClickHouse/pull/51642) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Support IPv4 and IPv6 as dictionary attributes [#51756](https://github.com/ClickHouse/ClickHouse/pull/51756) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Fix ORDER BY tuple of WINDOW functions [#52145](https://github.com/ClickHouse/ClickHouse/pull/52145) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Disable expression templates for time intervals [#52335](https://github.com/ClickHouse/ClickHouse/pull/52335) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix `countSubstrings()` hang with empty needle and a column haystack [#52409](https://github.com/ClickHouse/ClickHouse/pull/52409) ([Sergei Trifonov](https://github.com/serxa)).
* Fixed inserting into Buffer engine [#52440](https://github.com/ClickHouse/ClickHouse/pull/52440) ([Vasily Nemkov](https://github.com/Enmk)).
* The implementation of AnyHash was non-conformant. [#52448](https://github.com/ClickHouse/ClickHouse/pull/52448) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* init and destroy ares channel on demand.. [#52634](https://github.com/ClickHouse/ClickHouse/pull/52634) ([Arthur Passos](https://github.com/arthurpassos)).
* Fix crash in function `tuple` with one sparse column argument [#52659](https://github.com/ClickHouse/ClickHouse/pull/52659) ([Anton Popov](https://github.com/CurtizJ)).
* clickhouse-keeper: fix implementation of server with poll() [#52833](https://github.com/ClickHouse/ClickHouse/pull/52833) ([Andy Fiddaman](https://github.com/citrus-it)).
* Fix password leak in show create mysql table [#52962](https://github.com/ClickHouse/ClickHouse/pull/52962) ([Duc Canh Le](https://github.com/canhld94)).
* Fix incorrect normal projection AST format [#53347](https://github.com/ClickHouse/ClickHouse/pull/53347) ([Amos Bird](https://github.com/amosbird)).
* Fix loading lazy database during system.table select query [#53372](https://github.com/ClickHouse/ClickHouse/pull/53372) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Fix wrong columns order for queries with parallel FINAL. [#53489](https://github.com/ClickHouse/ClickHouse/pull/53489) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix: interpolate expression takes source column instead of same name aliased from select expression. [#53572](https://github.com/ClickHouse/ClickHouse/pull/53572) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Fix crash in comparison functions due to incorrect query analysis [#52172](https://github.com/ClickHouse/ClickHouse/pull/52172) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix deadlocks in StorageTableFunctionProxy [#52626](https://github.com/ClickHouse/ClickHouse/pull/52626) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Disable test_reverse_dns_query/test.py [#53195](https://github.com/ClickHouse/ClickHouse/pull/53195) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Disable test_host_regexp_multiple_ptr_records/test.py [#53211](https://github.com/ClickHouse/ClickHouse/pull/53211) ([Alexander Tokmakov](https://github.com/tavplubix)).

View File

@ -0,0 +1,17 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v23.7.4.5-stable (bd2fcd44553) FIXME as compared to v23.7.3.14-stable (bd9a510550c)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Disable the new parquet encoder [#53130](https://github.com/ClickHouse/ClickHouse/pull/53130) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Revert changes in `ZstdDeflatingAppendableWriteBuffer` [#53111](https://github.com/ClickHouse/ClickHouse/pull/53111) ([Antonio Andelic](https://github.com/antonio2368)).

View File

@ -42,20 +42,20 @@ sudo apt-get install git cmake ccache python3 ninja-build nasm yasm gawk lsb-rel
### Install and Use the Clang compiler ### Install and Use the Clang compiler
On Ubuntu/Debian you can use LLVM's automatic installation script, see [here](https://apt.llvm.org/). On Ubuntu/Debian, you can use LLVM's automatic installation script; see [here](https://apt.llvm.org/).
``` bash ``` bash
sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
``` ```
Note: in case of troubles, you can also use this: Note: in case of trouble, you can also use this:
```bash ```bash
sudo apt-get install software-properties-common sudo apt-get install software-properties-common
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
``` ```
For other Linux distribution - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html). For other Linux distributions - check the availability of LLVM's [prebuild packages](https://releases.llvm.org/download.html).
As of April 2023, clang-16 or higher will work. As of April 2023, clang-16 or higher will work.
GCC as a compiler is not supported. GCC as a compiler is not supported.
@ -92,8 +92,12 @@ cmake -S . -B build
cmake --build build # or: `cd build; ninja` cmake --build build # or: `cd build; ninja`
``` ```
:::tip
In case `cmake` isn't able to detect the number of available logical cores, the build will be done by one thread. To overcome this, you can tweak `cmake` to use a specific number of threads with `-j` flag, for example, `cmake --build build -j 16`. Alternatively, you can generate build files with a specific number of jobs in advance to avoid always setting the flag: `cmake -DPARALLEL_COMPILE_JOBS=16 -S . -B build`, where `16` is the desired number of threads.
:::
To create an executable, run `cmake --build build --target clickhouse` (or: `cd build; ninja clickhouse`). To create an executable, run `cmake --build build --target clickhouse` (or: `cd build; ninja clickhouse`).
This will create executable `build/programs/clickhouse` which can be used with `client` or `server` arguments. This will create an executable `build/programs/clickhouse`, which can be used with `client` or `server` arguments.
## Building on Any Linux {#how-to-build-clickhouse-on-any-linux} ## Building on Any Linux {#how-to-build-clickhouse-on-any-linux}
@ -107,7 +111,7 @@ The build requires the following components:
- Yasm - Yasm
- Gawk - Gawk
If all the components are installed, you may build in the same way as the steps above. If all the components are installed, you may build it in the same way as the steps above.
Example for OpenSUSE Tumbleweed: Example for OpenSUSE Tumbleweed:
@ -123,7 +127,7 @@ Example for Fedora Rawhide:
``` bash ``` bash
sudo yum update sudo yum update
sudo yum --nogpg install git cmake make clang python3 ccache nasm yasm gawk sudo yum --nogpg install git cmake make clang python3 ccache lld nasm yasm gawk
git clone --recursive https://github.com/ClickHouse/ClickHouse.git git clone --recursive https://github.com/ClickHouse/ClickHouse.git
mkdir build mkdir build
cmake -S . -B build cmake -S . -B build

View File

@ -190,7 +190,7 @@ These are the schema conversion manipulations you can do with table overrides fo
* Modify [column TTL](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-column-ttl). * Modify [column TTL](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#mergetree-column-ttl).
* Modify [column compression codec](/docs/en/sql-reference/statements/create/table.md/#codecs). * Modify [column compression codec](/docs/en/sql-reference/statements/create/table.md/#codecs).
* Add [ALIAS columns](/docs/en/sql-reference/statements/create/table.md/#alias). * Add [ALIAS columns](/docs/en/sql-reference/statements/create/table.md/#alias).
* Add [skipping indexes](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-data_skipping-indexes) * Add [skipping indexes](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#table_engine-mergetree-data_skipping-indexes). Note that you need to enable `use_skip_indexes_if_final` setting to make them work (MaterializedMySQL is using `SELECT ... FINAL` by default)
* Add [projections](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#projections). Note that projection optimizations are * Add [projections](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#projections). Note that projection optimizations are
disabled when using `SELECT ... FINAL` (which MaterializedMySQL does by default), so their utility is limited here. disabled when using `SELECT ... FINAL` (which MaterializedMySQL does by default), so their utility is limited here.
`INDEX ... TYPE hypothesis` as [described in the v21.12 blog post]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/) `INDEX ... TYPE hypothesis` as [described in the v21.12 blog post]](https://clickhouse.com/blog/en/2021/clickhouse-v21.12-released/)

View File

@ -21,7 +21,7 @@ CREATE TABLE azure_blob_storage_table (name String, value UInt32)
- `connection_string|storage_account_url` — connection_string includes account name & key ([Create connection string](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json#configure-a-connection-string-for-an-azure-storage-account)) or you could also provide the storage account url here and account name & account key as separate parameters (see parameters account_name & account_key) - `connection_string|storage_account_url` — connection_string includes account name & key ([Create connection string](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&bc=%2Fazure%2Fstorage%2Fblobs%2Fbreadcrumb%2Ftoc.json#configure-a-connection-string-for-an-azure-storage-account)) or you could also provide the storage account url here and account name & account key as separate parameters (see parameters account_name & account_key)
- `container_name` - Container name - `container_name` - Container name
- `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. - `blobpath` - file path. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings.
- `account_name` - if storage_account_url is used, then account name can be specified here - `account_name` - if storage_account_url is used, then account name can be specified here
- `account_key` - if storage_account_url is used, then account key can be specified here - `account_key` - if storage_account_url is used, then account key can be specified here
- `format` — The [format](/docs/en/interfaces/formats.md) of the file. - `format` — The [format](/docs/en/interfaces/formats.md) of the file.

View File

@ -173,6 +173,7 @@ Similar to GraphiteMergeTree, the Kafka engine supports extended configuration u
<!-- Global configuration options for all tables of Kafka engine type --> <!-- Global configuration options for all tables of Kafka engine type -->
<debug>cgrp</debug> <debug>cgrp</debug>
<auto_offset_reset>smallest</auto_offset_reset> <auto_offset_reset>smallest</auto_offset_reset>
<statistics_interval_ms>600</statistics_interval_ms>
<!-- Configuration specific to topics "logs" and "stats" --> <!-- Configuration specific to topics "logs" and "stats" -->
@ -260,3 +261,4 @@ The number of rows in one Kafka message depends on whether the format is row-bas
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns) - [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
- [background_message_broker_schedule_pool_size](../../../operations/server-configuration-parameters/settings.md#background_message_broker_schedule_pool_size) - [background_message_broker_schedule_pool_size](../../../operations/server-configuration-parameters/settings.md#background_message_broker_schedule_pool_size)
- [system.kafka_consumers](../../../operations/system-tables/kafka_consumers.md)

View File

@ -37,7 +37,7 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
### Engine parameters ### Engine parameters
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path). - `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed. - `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
- `format` — The [format](../../../interfaces/formats.md#formats) of the file. - `format` — The [format](../../../interfaces/formats.md#formats) of the file.
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3). - `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
@ -164,6 +164,7 @@ For more information about virtual columns see [here](../../../engines/table-eng
`path` argument can specify multiple files using bash-like wildcards. For being processed file should exist and match to the whole path pattern. Listing of files is determined during `SELECT` (not at `CREATE` moment). `path` argument can specify multiple files using bash-like wildcards. For being processed file should exist and match to the whole path pattern. Listing of files is determined during `SELECT` (not at `CREATE` moment).
- `*` — Substitutes any number of any characters except `/` including empty string. - `*` — Substitutes any number of any characters except `/` including empty string.
- `**` — Substitutes any number of any character include `/` including empty string.
- `?` — Substitutes any single character. - `?` — Substitutes any single character.
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. - `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`.
- `{N..M}` — Substitutes any number in range from N to M including both borders. N and M can have leading zeroes e.g. `000..078`. - `{N..M}` — Substitutes any number in range from N to M including both borders. N and M can have leading zeroes e.g. `000..078`.

View File

@ -27,7 +27,7 @@ CREATE TABLE s3_queue_engine_table (name String, value UInt32)
**Engine parameters** **Engine parameters**
- `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path). - `path` — Bucket url with path to file. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` — numbers, `'abc'`, `'def'` — strings. For more information see [below](#wildcards-in-path).
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed. - `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
- `format` — The [format](../../../interfaces/formats.md#formats) of the file. - `format` — The [format](../../../interfaces/formats.md#formats) of the file.
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3). - `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
@ -213,6 +213,7 @@ For more information about virtual columns see [here](../../../engines/table-eng
`path` argument can specify multiple files using bash-like wildcards. For being processed file should exist and match to the whole path pattern. Listing of files is determined during `SELECT` (not at `CREATE` moment). `path` argument can specify multiple files using bash-like wildcards. For being processed file should exist and match to the whole path pattern. Listing of files is determined during `SELECT` (not at `CREATE` moment).
- `*` — Substitutes any number of any characters except `/` including empty string. - `*` — Substitutes any number of any characters except `/` including empty string.
- `**` — Substitutes any number of any characters include `/` including empty string.
- `?` — Substitutes any single character. - `?` — Substitutes any single character.
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`. - `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`.
- `{N..M}` — Substitutes any number in range from N to M including both borders. N and M can have leading zeroes e.g. `000..078`. - `{N..M}` — Substitutes any number in range from N to M including both borders. N and M can have leading zeroes e.g. `000..078`.

View File

@ -1,4 +1,4 @@
# Approximate Nearest Neighbor Search Indexes [experimental] {#table_engines-ANNIndex} # Approximate Nearest Neighbor Search Indexes [experimental]
Nearest neighborhood search is the problem of finding the M closest points for a given point in an N-dimensional vector space. The most Nearest neighborhood search is the problem of finding the M closest points for a given point in an N-dimensional vector space. The most
straightforward approach to solve this problem is a brute force search where the distance between all points in the vector space and the straightforward approach to solve this problem is a brute force search where the distance between all points in the vector space and the
@ -17,7 +17,7 @@ In terms of SQL, the nearest neighborhood problem can be expressed as follows:
``` sql ``` sql
SELECT * SELECT *
FROM table FROM table_with_ann_index
ORDER BY Distance(vectors, Point) ORDER BY Distance(vectors, Point)
LIMIT N LIMIT N
``` ```
@ -32,7 +32,7 @@ An alternative formulation of the nearest neighborhood search problem looks as f
``` sql ``` sql
SELECT * SELECT *
FROM table FROM table_with_ann_index
WHERE Distance(vectors, Point) < MaxDistance WHERE Distance(vectors, Point) < MaxDistance
LIMIT N LIMIT N
``` ```
@ -45,12 +45,12 @@ With brute force search, both queries are expensive (linear in the number of poi
`Point` must be computed. To speed this process up, Approximate Nearest Neighbor Search Indexes (ANN indexes) store a compact representation `Point` must be computed. To speed this process up, Approximate Nearest Neighbor Search Indexes (ANN indexes) store a compact representation
of the search space (using clustering, search trees, etc.) which allows to compute an approximate answer much quicker (in sub-linear time). of the search space (using clustering, search trees, etc.) which allows to compute an approximate answer much quicker (in sub-linear time).
# Creating and Using ANN Indexes # Creating and Using ANN Indexes {#creating_using_ann_indexes}
Syntax to create an ANN index over an [Array](../../../sql-reference/data-types/array.md) column: Syntax to create an ANN index over an [Array](../../../sql-reference/data-types/array.md) column:
```sql ```sql
CREATE TABLE table CREATE TABLE table_with_ann_index
( (
`id` Int64, `id` Int64,
`vectors` Array(Float32), `vectors` Array(Float32),
@ -63,7 +63,7 @@ ORDER BY id;
Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column: Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column:
```sql ```sql
CREATE TABLE table CREATE TABLE table_with_ann_index
( (
`id` Int64, `id` Int64,
`vectors` Tuple(Float32[, Float32[, ...]]), `vectors` Tuple(Float32[, Float32[, ...]]),
@ -83,7 +83,7 @@ ANN indexes support two types of queries:
``` sql ``` sql
SELECT * SELECT *
FROM table FROM table_with_ann_index
[WHERE ...] [WHERE ...]
ORDER BY Distance(vectors, Point) ORDER BY Distance(vectors, Point)
LIMIT N LIMIT N
@ -93,7 +93,7 @@ ANN indexes support two types of queries:
``` sql ``` sql
SELECT * SELECT *
FROM table FROM table_with_ann_index
WHERE Distance(vectors, Point) < MaxDistance WHERE Distance(vectors, Point) < MaxDistance
LIMIT N LIMIT N
``` ```
@ -103,7 +103,7 @@ To avoid writing out large vectors, you can use [query
parameters](/docs/en/interfaces/cli.md#queries-with-parameters-cli-queries-with-parameters), e.g. parameters](/docs/en/interfaces/cli.md#queries-with-parameters-cli-queries-with-parameters), e.g.
```bash ```bash
clickhouse-client --param_vec='hello' --query="SELECT * FROM table WHERE L2Distance(vectors, {vec: Array(Float32)}) < 1.0" clickhouse-client --param_vec='hello' --query="SELECT * FROM table_with_ann_index WHERE L2Distance(vectors, {vec: Array(Float32)}) < 1.0"
``` ```
::: :::
@ -138,7 +138,7 @@ back to a smaller `GRANULARITY` values only in case of problems like excessive m
was specified for ANN indexes, the default value is 100 million. was specified for ANN indexes, the default value is 100 million.
# Available ANN Indexes # Available ANN Indexes {#available_ann_indexes}
- [Annoy](/docs/en/engines/table-engines/mergetree-family/annindexes.md#annoy-annoy) - [Annoy](/docs/en/engines/table-engines/mergetree-family/annindexes.md#annoy-annoy)
@ -165,7 +165,7 @@ space in random linear surfaces (lines in 2D, planes in 3D etc.).
Syntax to create an Annoy index over an [Array](../../../sql-reference/data-types/array.md) column: Syntax to create an Annoy index over an [Array](../../../sql-reference/data-types/array.md) column:
```sql ```sql
CREATE TABLE table CREATE TABLE table_with_annoy_index
( (
id Int64, id Int64,
vectors Array(Float32), vectors Array(Float32),
@ -178,7 +178,7 @@ ORDER BY id;
Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column: Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column:
```sql ```sql
CREATE TABLE table CREATE TABLE table_with_annoy_index
( (
id Int64, id Int64,
vectors Tuple(Float32[, Float32[, ...]]), vectors Tuple(Float32[, Float32[, ...]]),
@ -188,23 +188,17 @@ ENGINE = MergeTree
ORDER BY id; ORDER BY id;
``` ```
Annoy currently supports `L2Distance` and `cosineDistance` as distance function `Distance`. If no distance function was specified during Annoy currently supports two distance functions:
index creation, `L2Distance` is used as default. Parameter `NumTrees` is the number of trees which the algorithm creates (default if not - `L2Distance`, also called Euclidean distance, is the length of a line segment between two points in Euclidean space
specified: 100). Higher values of `NumTree` mean more accurate search results but slower index creation / query times (approximately ([Wikipedia](https://en.wikipedia.org/wiki/Euclidean_distance)).
linearly) as well as larger index sizes. - `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors
([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)).
`L2Distance` is also called Euclidean distance, the Euclidean distance between two points in Euclidean space is the length of a line segment between the two points. For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no
For example: If we have point P(p1,p2), Q(q1,q2), their distance will be d(p,q) distance function was specified during index creation, `L2Distance` is used as default.
![L2Distance](https://en.wikipedia.org/wiki/Euclidean_distance#/media/File:Euclidean_distance_2d.svg)
`cosineDistance` also called cosine similarity is a measure of similarity between two non-zero vectors defined in an inner product space. Cosine similarity is the cosine of the angle between the vectors; that is, it is the dot product of the vectors divided by the product of their lengths. Parameter `NumTrees` is the number of trees which the algorithm creates (default if not specified: 100). Higher values of `NumTree` mean
![cosineDistance](https://www.tyrrell4innovation.ca/wp-content/uploads/2021/06/rsz_jenny_du_miword.png) more accurate search results but slower index creation / query times (approximately linearly) as well as larger index sizes.
The Euclidean distance corresponds to the L2-norm of a difference between vectors. The cosine similarity is proportional to the dot product of two vectors and inversely proportional to the product of their magnitudes.
![compare](https://www.researchgate.net/publication/320914786/figure/fig2/AS:558221849841664@1510101868614/The-difference-between-Euclidean-distance-and-cosine-similarity.png)
In one sentence: cosine similarity care only about the angle between them, but do not care about the "distance" we normally think.
![L2 distance](https://www.baeldung.com/wp-content/uploads/sites/4/2020/06/4-1.png)
![cosineDistance](https://www.baeldung.com/wp-content/uploads/sites/4/2020/06/5.png)
:::note :::note
Indexes over columns of type `Array` will generally work faster than indexes on `Tuple` columns. All arrays **must** have same length. Use Indexes over columns of type `Array` will generally work faster than indexes on `Tuple` columns. All arrays **must** have same length. Use

View File

@ -323,9 +323,9 @@ clickhouse-client clickhouse://192.168.1.15,192.168.1.25
`clickhouse-client` uses the first existing file of the following: `clickhouse-client` uses the first existing file of the following:
- Defined in the `--config-file` parameter. - Defined in the `--config-file` parameter.
- `./clickhouse-client.xml` - `./clickhouse-client.xml`, `.yaml`, `.yml`
- `~/.clickhouse-client/config.xml` - `~/.clickhouse-client/config.xml`, `.yaml`, `.yml`
- `/etc/clickhouse-client/config.xml` - `/etc/clickhouse-client/config.xml`, `.yaml`, `.yml`
Example of a config file: Example of a config file:
@ -342,6 +342,17 @@ Example of a config file:
</config> </config>
``` ```
Or the same config in a YAML format:
```yaml
user: username
password: 'password'
secure: true
openSSL:
client:
caConfig: '/etc/ssl/cert.pem'
```
### Query ID Format {#query-id-format} ### Query ID Format {#query-id-format}
In interactive mode `clickhouse-client` shows query ID for every query. By default, the ID is formatted like this: In interactive mode `clickhouse-client` shows query ID for every query. By default, the ID is formatted like this:

View File

@ -11,82 +11,83 @@ results of a `SELECT`, and to perform `INSERT`s into a file-backed table.
The supported formats are: The supported formats are:
| Format | Input | Output | | Format | Input | Output |
|-------------------------------------------------------------------------------------------|------|--------| |-------------------------------------------------------------------------------------------|------|-------|
| [TabSeparated](#tabseparated) | ✔ | ✔ | | [TabSeparated](#tabseparated) | ✔ | ✔ |
| [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ | | [TabSeparatedRaw](#tabseparatedraw) | ✔ | ✔ |
| [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ | | [TabSeparatedWithNames](#tabseparatedwithnames) | ✔ | ✔ |
| [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ | | [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes) | ✔ | ✔ |
| [TabSeparatedRawWithNames](#tabseparatedrawwithnames) | ✔ | ✔ | | [TabSeparatedRawWithNames](#tabseparatedrawwithnames) | ✔ | ✔ |
| [TabSeparatedRawWithNamesAndTypes](#tabseparatedrawwithnamesandtypes) | ✔ | ✔ | | [TabSeparatedRawWithNamesAndTypes](#tabseparatedrawwithnamesandtypes) | ✔ | ✔ |
| [Template](#format-template) | ✔ | ✔ | | [Template](#format-template) | ✔ | ✔ |
| [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ | | [TemplateIgnoreSpaces](#templateignorespaces) | ✔ | ✗ |
| [CSV](#csv) | ✔ | ✔ | | [CSV](#csv) | ✔ | ✔ |
| [CSVWithNames](#csvwithnames) | ✔ | ✔ | | [CSVWithNames](#csvwithnames) | ✔ | ✔ |
| [CSVWithNamesAndTypes](#csvwithnamesandtypes) | ✔ | ✔ | | [CSVWithNamesAndTypes](#csvwithnamesandtypes) | ✔ | ✔ |
| [CustomSeparated](#format-customseparated) | ✔ | ✔ | | [CustomSeparated](#format-customseparated) | ✔ | ✔ |
| [CustomSeparatedWithNames](#customseparatedwithnames) | ✔ | ✔ | | [CustomSeparatedWithNames](#customseparatedwithnames) | ✔ | ✔ |
| [CustomSeparatedWithNamesAndTypes](#customseparatedwithnamesandtypes) | ✔ | ✔ | | [CustomSeparatedWithNamesAndTypes](#customseparatedwithnamesandtypes) | ✔ | ✔ |
| [SQLInsert](#sqlinsert) | ✗ | ✔ | | [SQLInsert](#sqlinsert) | ✗ | ✔ |
| [Values](#data-format-values) | ✔ | ✔ | | [Values](#data-format-values) | ✔ | ✔ |
| [Vertical](#vertical) | ✗ | ✔ | | [Vertical](#vertical) | ✗ | ✔ |
| [JSON](#json) | ✔ | ✔ | | [JSON](#json) | ✔ | ✔ |
| [JSONAsString](#jsonasstring) | ✔ | ✗ | | [JSONAsString](#jsonasstring) | ✔ | ✗ |
| [JSONStrings](#jsonstrings) | ✔ | ✔ | | [JSONStrings](#jsonstrings) | ✔ | ✔ |
| [JSONColumns](#jsoncolumns) | ✔ | ✔ | | [JSONColumns](#jsoncolumns) | ✔ | ✔ |
| [JSONColumnsWithMetadata](#jsoncolumnsmonoblock)) | ✔ | ✔ | | [JSONColumnsWithMetadata](#jsoncolumnsmonoblock)) | ✔ | ✔ |
| [JSONCompact](#jsoncompact) | ✔ | ✔ | | [JSONCompact](#jsoncompact) | ✔ | ✔ |
| [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ | | [JSONCompactStrings](#jsoncompactstrings) | ✗ | ✔ |
| [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ | | [JSONCompactColumns](#jsoncompactcolumns) | ✔ | ✔ |
| [JSONEachRow](#jsoneachrow) | ✔ | ✔ | | [JSONEachRow](#jsoneachrow) | ✔ | ✔ |
| [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ | | [PrettyJSONEachRow](#prettyjsoneachrow) | ✗ | ✔ |
| [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ | | [JSONEachRowWithProgress](#jsoneachrowwithprogress) | ✗ | ✔ |
| [JSONStringsEachRow](#jsonstringseachrow) | ✔ | ✔ | | [JSONStringsEachRow](#jsonstringseachrow) | ✔ | ✔ |
| [JSONStringsEachRowWithProgress](#jsonstringseachrowwithprogress) | ✗ | ✔ | | [JSONStringsEachRowWithProgress](#jsonstringseachrowwithprogress) | ✗ | ✔ |
| [JSONCompactEachRow](#jsoncompacteachrow) | ✔ | ✔ | | [JSONCompactEachRow](#jsoncompacteachrow) | ✔ | ✔ |
| [JSONCompactEachRowWithNames](#jsoncompacteachrowwithnames) | ✔ | ✔ | | [JSONCompactEachRowWithNames](#jsoncompacteachrowwithnames) | ✔ | ✔ |
| [JSONCompactEachRowWithNamesAndTypes](#jsoncompacteachrowwithnamesandtypes) | ✔ | ✔ | | [JSONCompactEachRowWithNamesAndTypes](#jsoncompacteachrowwithnamesandtypes) | ✔ | ✔ |
| [JSONCompactStringsEachRow](#jsoncompactstringseachrow) | ✔ | ✔ | | [JSONCompactStringsEachRow](#jsoncompactstringseachrow) | ✔ | ✔ |
| [JSONCompactStringsEachRowWithNames](#jsoncompactstringseachrowwithnames) | ✔ | ✔ | | [JSONCompactStringsEachRowWithNames](#jsoncompactstringseachrowwithnames) | ✔ | ✔ |
| [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ | | [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ |
| [JSONObjectEachRow](#jsonobjecteachrow) | ✔ | ✔ | | [JSONObjectEachRow](#jsonobjecteachrow) | ✔ | ✔ |
| [BSONEachRow](#bsoneachrow) | ✔ | ✔ | | [BSONEachRow](#bsoneachrow) | ✔ | ✔ |
| [TSKV](#tskv) | ✔ | ✔ | | [TSKV](#tskv) | ✔ | ✔ |
| [Pretty](#pretty) | ✗ | ✔ | | [Pretty](#pretty) | ✗ | ✔ |
| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ | | [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ |
| [PrettyMonoBlock](#prettymonoblock) | ✗ | ✔ | | [PrettyMonoBlock](#prettymonoblock) | ✗ | ✔ |
| [PrettyNoEscapesMonoBlock](#prettynoescapesmonoblock) | ✗ | ✔ | | [PrettyNoEscapesMonoBlock](#prettynoescapesmonoblock) | ✗ | ✔ |
| [PrettyCompact](#prettycompact) | ✗ | ✔ | | [PrettyCompact](#prettycompact) | ✗ | ✔ |
| [PrettyCompactNoEscapes](#prettycompactnoescapes) | ✗ | ✔ | | [PrettyCompactNoEscapes](#prettycompactnoescapes) | ✗ | ✔ |
| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ | | [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ |
| [PrettyCompactNoEscapesMonoBlock](#prettycompactnoescapesmonoblock) | ✗ | ✔ | | [PrettyCompactNoEscapesMonoBlock](#prettycompactnoescapesmonoblock) | ✗ | ✔ |
| [PrettySpace](#prettyspace) | ✗ | ✔ | | [PrettySpace](#prettyspace) | ✗ | ✔ |
| [PrettySpaceNoEscapes](#prettyspacenoescapes) | ✗ | ✔ | | [PrettySpaceNoEscapes](#prettyspacenoescapes) | ✗ | ✔ |
| [PrettySpaceMonoBlock](#prettyspacemonoblock) | ✗ | ✔ | | [PrettySpaceMonoBlock](#prettyspacemonoblock) | ✗ | ✔ |
| [PrettySpaceNoEscapesMonoBlock](#prettyspacenoescapesmonoblock) | ✗ | ✔ | | [PrettySpaceNoEscapesMonoBlock](#prettyspacenoescapesmonoblock) | ✗ | ✔ |
| [Prometheus](#prometheus) | ✗ | ✔ | | [Prometheus](#prometheus) | ✗ | ✔ |
| [Protobuf](#protobuf) | ✔ | ✔ | | [Protobuf](#protobuf) | ✔ | ✔ |
| [ProtobufSingle](#protobufsingle) | ✔ | ✔ | | [ProtobufSingle](#protobufsingle) | ✔ | ✔ |
| [Avro](#data-format-avro) | ✔ | ✔ | | [Avro](#data-format-avro) | ✔ | ✔ |
| [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ | | [AvroConfluent](#data-format-avro-confluent) | ✔ | ✗ |
| [Parquet](#data-format-parquet) | ✔ | ✔ | | [Parquet](#data-format-parquet) | ✔ | ✔ |
| [ParquetMetadata](#data-format-parquet-metadata) | ✔ | ✗ | | [ParquetMetadata](#data-format-parquet-metadata) | ✔ | ✗ |
| [Arrow](#data-format-arrow) | ✔ | ✔ | | [Arrow](#data-format-arrow) | ✔ | ✔ |
| [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ | | [ArrowStream](#data-format-arrow-stream) | ✔ | ✔ |
| [ORC](#data-format-orc) | ✔ | ✔ | | [ORC](#data-format-orc) | ✔ | ✔ |
| [RowBinary](#rowbinary) | ✔ | ✔ | | [One](#data-format-one) | ✔ | ✗ |
| [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ | | [RowBinary](#rowbinary) | ✔ | ✔ |
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ | | [RowBinaryWithNames](#rowbinarywithnamesandtypes) | ✔ | ✔ |
| [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✔ | | [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
| [Native](#native) | ✔ | ✔ | | [RowBinaryWithDefaults](#rowbinarywithdefaults) | ✔ | ✔ |
| [Null](#null) | ✗ | ✔ | | [Native](#native) | ✔ | ✔ |
| [XML](#xml) | ✗ | ✔ | | [Null](#null) | ✗ | ✔ |
| [CapnProto](#capnproto) | ✔ | ✔ | | [XML](#xml) | ✗ | ✔ |
| [LineAsString](#lineasstring) | ✔ | ✔ | | [CapnProto](#capnproto) | ✔ | ✔ |
| [Regexp](#data-format-regexp) | ✔ | ✗ | | [LineAsString](#lineasstring) | ✔ | ✔ |
| [RawBLOB](#rawblob) | ✔ | ✔ | | [Regexp](#data-format-regexp) | ✔ | ✗ |
| [MsgPack](#msgpack) | ✔ | ✔ | | [RawBLOB](#rawblob) | ✔ | ✔ |
| [MySQLDump](#mysqldump) | ✔ | ✗ | | [MsgPack](#msgpack) | ✔ | ✔ |
| [Markdown](#markdown) | ✗ | ✔ | | [MySQLDump](#mysqldump) | ✔ | ✗ |
| [Markdown](#markdown) | ✗ | ✔ |
You can control some format processing parameters with the ClickHouse settings. For more information read the [Settings](/docs/en/operations/settings/settings-formats.md) section. You can control some format processing parameters with the ClickHouse settings. For more information read the [Settings](/docs/en/operations/settings/settings-formats.md) section.
@ -195,6 +196,7 @@ SELECT * FROM nestedt FORMAT TSV
- [input_format_tsv_skip_first_lines](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_skip_first_lines) - skip specified number of lines at the beginning of data. Default value - `0`. - [input_format_tsv_skip_first_lines](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_skip_first_lines) - skip specified number of lines at the beginning of data. Default value - `0`.
- [input_format_tsv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_detect_header) - automatically detect header with names and types in TSV format. Default value - `true`. - [input_format_tsv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_detect_header) - automatically detect header with names and types in TSV format. Default value - `true`.
- [input_format_tsv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`. - [input_format_tsv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
- [input_format_tsv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_allow_variable_number_of_columns) - allow variable number of columns in TSV format, ignore extra columns and use default values on missing columns. Default value - `false`.
## TabSeparatedRaw {#tabseparatedraw} ## TabSeparatedRaw {#tabseparatedraw}
@ -472,7 +474,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe
- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`. - [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
- [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`. - [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`.
- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`. - [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`.
- [input_format_csv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_columns) - ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values. Default value - `false`. - [input_format_csv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_columns) - allow variable number of columns in CSV format, ignore extra columns and use default values on missing columns. Default value - `false`.
- [input_format_csv_use_default_on_bad_values](/docs/en/operations/settings/settings-formats.md/#input_format_csv_use_default_on_bad_values) - Allow to set default value to column when CSV field deserialization failed on bad value. Default value - `false`. - [input_format_csv_use_default_on_bad_values](/docs/en/operations/settings/settings-formats.md/#input_format_csv_use_default_on_bad_values) - Allow to set default value to column when CSV field deserialization failed on bad value. Default value - `false`.
## CSVWithNames {#csvwithnames} ## CSVWithNames {#csvwithnames}
@ -501,9 +503,10 @@ the types from input data will be compared with the types of the corresponding c
Similar to [Template](#format-template), but it prints or reads all names and types of columns and uses escaping rule from [format_custom_escaping_rule](/docs/en/operations/settings/settings-formats.md/#format_custom_escaping_rule) setting and delimiters from [format_custom_field_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_field_delimiter), [format_custom_row_before_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_before_delimiter), [format_custom_row_after_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_after_delimiter), [format_custom_row_between_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_between_delimiter), [format_custom_result_before_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_result_before_delimiter) and [format_custom_result_after_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_result_after_delimiter) settings, not from format strings. Similar to [Template](#format-template), but it prints or reads all names and types of columns and uses escaping rule from [format_custom_escaping_rule](/docs/en/operations/settings/settings-formats.md/#format_custom_escaping_rule) setting and delimiters from [format_custom_field_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_field_delimiter), [format_custom_row_before_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_before_delimiter), [format_custom_row_after_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_after_delimiter), [format_custom_row_between_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_between_delimiter), [format_custom_result_before_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_result_before_delimiter) and [format_custom_result_after_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_result_after_delimiter) settings, not from format strings.
If setting [input_format_custom_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_custom_detect_header) is enabled, ClickHouse will automatically detect header with names and types if any. Additional settings:
- [input_format_custom_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_custom_detect_header) - enables automatic detection of header with names and types if any. Default value - `true`.
If setting [input_format_tsv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_custom_detect_header) is enabled, trailing empty lines at the end of file will be skipped. - [input_format_custom_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_custom_skip_trailing_empty_lines) - skip trailing empty lines at the end of file . Default value - `false`.
- [input_format_custom_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_custom_allow_variable_number_of_columns) - allow variable number of columns in CustomSeparated format, ignore extra columns and use default values on missing columns. Default value - `false`.
There is also `CustomSeparatedIgnoreSpaces` format, which is similar to [TemplateIgnoreSpaces](#templateignorespaces). There is also `CustomSeparatedIgnoreSpaces` format, which is similar to [TemplateIgnoreSpaces](#templateignorespaces).
@ -1261,6 +1264,7 @@ SELECT * FROM json_each_row_nested
- [input_format_json_named_tuples_as_objects](/docs/en/operations/settings/settings-formats.md/#input_format_json_named_tuples_as_objects) - parse named tuple columns as JSON objects. Default value - `true`. - [input_format_json_named_tuples_as_objects](/docs/en/operations/settings/settings-formats.md/#input_format_json_named_tuples_as_objects) - parse named tuple columns as JSON objects. Default value - `true`.
- [input_format_json_defaults_for_missing_elements_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_defaults_for_missing_elements_in_named_tuple) - insert default values for missing elements in JSON object while parsing named tuple. Default value - `true`. - [input_format_json_defaults_for_missing_elements_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_defaults_for_missing_elements_in_named_tuple) - insert default values for missing elements in JSON object while parsing named tuple. Default value - `true`.
- [input_format_json_ignore_unknown_keys_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) - Ignore unknown keys in json object for named tuples. Default value - `false`. - [input_format_json_ignore_unknown_keys_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) - Ignore unknown keys in json object for named tuples. Default value - `false`.
- [input_format_json_compact_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_json_compact_allow_variable_number_of_columns) - allow variable number of columns in JSONCompact/JSONCompactEachRow format, ignore extra columns and use default values on missing columns. Default value - `false`.
- [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`. - [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`.
- [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`. - [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`.
- [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`. - [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`.
@ -2131,9 +2135,11 @@ To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/t
- [output_format_parquet_row_group_size](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_row_group_size) - row group size in rows while data output. Default value - `1000000`. - [output_format_parquet_row_group_size](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_row_group_size) - row group size in rows while data output. Default value - `1000000`.
- [output_format_parquet_string_as_string](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_string_as_string) - use Parquet String type instead of Binary for String columns. Default value - `false`. - [output_format_parquet_string_as_string](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_string_as_string) - use Parquet String type instead of Binary for String columns. Default value - `false`.
- [input_format_parquet_import_nested](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_import_nested) - allow inserting array of structs into [Nested](/docs/en/sql-reference/data-types/nested-data-structures/index.md) table in Parquet input format. Default value - `false`.
- [input_format_parquet_case_insensitive_column_matching](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_case_insensitive_column_matching) - ignore case when matching Parquet columns with ClickHouse columns. Default value - `false`. - [input_format_parquet_case_insensitive_column_matching](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_case_insensitive_column_matching) - ignore case when matching Parquet columns with ClickHouse columns. Default value - `false`.
- [input_format_parquet_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_allow_missing_columns) - allow missing columns while reading Parquet data. Default value - `false`. - [input_format_parquet_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_allow_missing_columns) - allow missing columns while reading Parquet data. Default value - `false`.
- [input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Parquet format. Default value - `false`. - [input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Parquet format. Default value - `false`.
- [input_format_parquet_local_file_min_bytes_for_seek](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_local_file_min_bytes_for_seek) - min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format. Default value - `8192`.
- [output_format_parquet_fixed_string_as_fixed_byte_array](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_fixed_string_as_fixed_byte_array) - use Parquet FIXED_LENGTH_BYTE_ARRAY type instead of Binary/String for FixedString columns. Default value - `true`. - [output_format_parquet_fixed_string_as_fixed_byte_array](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_fixed_string_as_fixed_byte_array) - use Parquet FIXED_LENGTH_BYTE_ARRAY type instead of Binary/String for FixedString columns. Default value - `true`.
- [output_format_parquet_version](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_version) - The version of Parquet format used in output format. Default value - `2.latest`. - [output_format_parquet_version](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_version) - The version of Parquet format used in output format. Default value - `2.latest`.
- [output_format_parquet_compression_method](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_compression_method) - compression method used in output Parquet format. Default value - `snappy`. - [output_format_parquet_compression_method](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_compression_method) - compression method used in output Parquet format. Default value - `snappy`.
@ -2407,6 +2413,34 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT ORC" > {filename.
To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/table-engines/integrations/hdfs.md). To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/table-engines/integrations/hdfs.md).
## One {#data-format-one}
Special input format that doesn't read any data from file and returns only one row with column of type `UInt8`, name `dummy` and value `0` (like `system.one` table).
Can be used with virtual columns `_file/_path` to list all files without reading actual data.
Example:
Query:
```sql
SELECT _file FROM file('path/to/files/data*', One);
```
Result:
```text
┌─_file────┐
│ data.csv │
└──────────┘
┌─_file──────┐
│ data.jsonl │
└────────────┘
┌─_file────┐
│ data.tsv │
└──────────┘
┌─_file────────┐
│ data.parquet │
└──────────────┘
```
## LineAsString {#lineasstring} ## LineAsString {#lineasstring}
In this format, every line of input data is interpreted as a single string value. This format can only be parsed for table with a single field of type [String](/docs/en/sql-reference/data-types/string.md). The remaining columns must be set to [DEFAULT](/docs/en/sql-reference/statements/create/table.md/#default) or [MATERIALIZED](/docs/en/sql-reference/statements/create/table.md/#materialized), or omitted. In this format, every line of input data is interpreted as a single string value. This format can only be parsed for table with a single field of type [String](/docs/en/sql-reference/data-types/string.md). The remaining columns must be set to [DEFAULT](/docs/en/sql-reference/statements/create/table.md/#default) or [MATERIALIZED](/docs/en/sql-reference/statements/create/table.md/#materialized), or omitted.

View File

@ -21,6 +21,11 @@ In most cases it is recommended to use an appropriate tool or library instead of
- [ODBC driver](../interfaces/odbc.md) - [ODBC driver](../interfaces/odbc.md)
- [C++ client library](../interfaces/cpp.md) - [C++ client library](../interfaces/cpp.md)
ClickHouse server provides embedded visual interfaces for power users:
- Play UI: open `/play` in the browser;
- Advanced Dashboard: open `/dashboard` in the browser;
There are also a wide range of third-party libraries for working with ClickHouse: There are also a wide range of third-party libraries for working with ClickHouse:
- [Client libraries](../interfaces/third-party/client-libraries.md) - [Client libraries](../interfaces/third-party/client-libraries.md)

View File

@ -83,8 +83,8 @@ ClickHouse, Inc. does **not** maintain the tools and libraries listed below and
- Python - Python
- [SQLAlchemy](https://www.sqlalchemy.org) - [SQLAlchemy](https://www.sqlalchemy.org)
- [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (uses [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm)) - [sqlalchemy-clickhouse](https://github.com/cloudflare/sqlalchemy-clickhouse) (uses [infi.clickhouse_orm](https://github.com/Infinidat/infi.clickhouse_orm))
- [pandas](https://pandas.pydata.org) - [PyArrow/Pandas](https://pandas.pydata.org)
- [pandahouse](https://github.com/kszucs/pandahouse) - [Ibis](https://github.com/ibis-project/ibis)
- PHP - PHP
- [Doctrine](https://www.doctrine-project.org/) - [Doctrine](https://www.doctrine-project.org/)
- [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse) - [dbal-clickhouse](https://packagist.org/packages/friendsofdoctrine/dbal-clickhouse)

View File

@ -169,7 +169,6 @@ host = '127.0.0.1',
port = 3306, port = 3306,
database = 'test', database = 'test',
connection_pool_size = 8, connection_pool_size = 8,
on_duplicate_clause = 1,
replace_query = 1 replace_query = 1
``` ```
@ -185,7 +184,6 @@ replace_query = 1
<port>3306</port> <port>3306</port>
<database>test</database> <database>test</database>
<connection_pool_size>8</connection_pool_size> <connection_pool_size>8</connection_pool_size>
<on_duplicate_clause>1</on_duplicate_clause>
<replace_query>1</replace_query> <replace_query>1</replace_query>
</mymysql> </mymysql>
</named_collections> </named_collections>

View File

@ -1640,7 +1640,7 @@ Keys for server/client settings:
- verificationMode (default: relaxed) The method for checking the nodes certificates. Details are in the description of the [Context](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) class. Possible values: `none`, `relaxed`, `strict`, `once`. - verificationMode (default: relaxed) The method for checking the nodes certificates. Details are in the description of the [Context](https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/Context.h) class. Possible values: `none`, `relaxed`, `strict`, `once`.
- verificationDepth (default: 9) The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value. - verificationDepth (default: 9) The maximum length of the verification chain. Verification will fail if the certificate chain length exceeds the set value.
- loadDefaultCAFile (default: true) Wether built-in CA certificates for OpenSSL will be used. ClickHouse assumes that builtin CA certificates are in the file `/etc/ssl/cert.pem` (resp. the directory `/etc/ssl/certs`) or in file (resp. directory) specified by the environment variable `SSL_CERT_FILE` (resp. `SSL_CERT_DIR`). - loadDefaultCAFile (default: true) Wether built-in CA certificates for OpenSSL will be used. ClickHouse assumes that builtin CA certificates are in the file `/etc/ssl/cert.pem` (resp. the directory `/etc/ssl/certs`) or in file (resp. directory) specified by the environment variable `SSL_CERT_FILE` (resp. `SSL_CERT_DIR`).
- cipherList (default: `ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH`) - Supported OpenSSL encryptions. - cipherList (default: `ALL:!ADH:!LOW:!EXP:!MD5:!3DES:@STRENGTH`) - Supported OpenSSL encryptions.
- cacheSessions (default: false) Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. Acceptable values: `true`, `false`. - cacheSessions (default: false) Enables or disables caching sessions. Must be used in combination with `sessionIdContext`. Acceptable values: `true`, `false`.
- sessionIdContext (default: `${application.name}`) A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. This parameter is always recommended since it helps avoid problems both if the server caches the session and if the client requested caching. Default value: `${application.name}`. - sessionIdContext (default: `${application.name}`) A unique set of random characters that the server appends to each generated identifier. The length of the string must not exceed `SSL_MAX_SSL_SESSION_ID_LENGTH`. This parameter is always recommended since it helps avoid problems both if the server caches the session and if the client requested caching. Default value: `${application.name}`.
- sessionCacheSize (default: [1024\*20](https://github.com/ClickHouse/boringssl/blob/master/include/openssl/ssl.h#L1978)) The maximum number of sessions that the server caches. A value of 0 means unlimited sessions. - sessionCacheSize (default: [1024\*20](https://github.com/ClickHouse/boringssl/blob/master/include/openssl/ssl.h#L1978)) The maximum number of sessions that the server caches. A value of 0 means unlimited sessions.

View File

@ -7,6 +7,10 @@ pagination_next: en/operations/settings/settings
# Settings Overview # Settings Overview
:::note
XML-based Settings Profiles and [configuration files](https://clickhouse.com/docs/en/operations/configuration-files) are currently not supported for ClickHouse Cloud. To specify settings for your ClickHouse Cloud service, you must use [SQL-driven Settings Profiles](https://clickhouse.com/docs/en/operations/access-rights#settings-profiles-management).
:::
There are two main groups of ClickHouse settings: There are two main groups of ClickHouse settings:
- Global server settings - Global server settings

View File

@ -56,11 +56,11 @@ Possible values:
- Any positive integer. - Any positive integer.
Default value: 300. Default value: 3000.
To achieve maximum performance of `SELECT` queries, it is necessary to minimize the number of parts processed, see [Merge Tree](../../development/architecture.md#merge-tree). To achieve maximum performance of `SELECT` queries, it is necessary to minimize the number of parts processed, see [Merge Tree](../../development/architecture.md#merge-tree).
You can set a larger value to 600 (1200), this will reduce the probability of the `Too many parts` error, but at the same time `SELECT` performance might degrade. Also in case of a merge issue (for example, due to insufficient disk space) you will notice it later than it could be with the original 300. Prior to 23.6 this setting was set to 300. You can set a higher different value, it will reduce the probability of the `Too many parts` error, but at the same time `SELECT` performance might degrade. Also in case of a merge issue (for example, due to insufficient disk space) you will notice it later than it could be with the original 300.
## parts_to_delay_insert {#parts-to-delay-insert} ## parts_to_delay_insert {#parts-to-delay-insert}

View File

@ -627,6 +627,13 @@ Column type should be String. If value is empty, default names `row_{i}`will be
Default value: ''. Default value: ''.
### input_format_json_compact_allow_variable_number_of_columns {#input_format_json_compact_allow_variable_number_of_columns}
Allow variable number of columns in rows in JSONCompact/JSONCompactEachRow input formats.
Ignore extra columns in rows with more columns than expected and treat missing columns as default values.
Disabled by default.
## TSV format settings {#tsv-format-settings} ## TSV format settings {#tsv-format-settings}
### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default} ### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default}
@ -764,6 +771,13 @@ When enabled, trailing empty lines at the end of TSV file will be skipped.
Disabled by default. Disabled by default.
### input_format_tsv_allow_variable_number_of_columns {#input_format_tsv_allow_variable_number_of_columns}
Allow variable number of columns in rows in TSV input format.
Ignore extra columns in rows with more columns than expected and treat missing columns as default values.
Disabled by default.
## CSV format settings {#csv-format-settings} ## CSV format settings {#csv-format-settings}
### format_csv_delimiter {#format_csv_delimiter} ### format_csv_delimiter {#format_csv_delimiter}
@ -955,9 +969,11 @@ Result
```text ```text
" string " " string "
``` ```
### input_format_csv_allow_variable_number_of_columns {#input_format_csv_allow_variable_number_of_columns} ### input_format_csv_allow_variable_number_of_columns {#input_format_csv_allow_variable_number_of_columns}
ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values. Allow variable number of columns in rows in CSV input format.
Ignore extra columns in rows with more columns than expected and treat missing columns as default values.
Disabled by default. Disabled by default.
@ -1223,6 +1239,12 @@ Allow skipping columns with unsupported types while schema inference for format
Disabled by default. Disabled by default.
### input_format_parquet_local_file_min_bytes_for_seek {#input_format_parquet_local_file_min_bytes_for_seek}
min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format.
Default value - `8192`.
### output_format_parquet_string_as_string {#output_format_parquet_string_as_string} ### output_format_parquet_string_as_string {#output_format_parquet_string_as_string}
Use Parquet String type instead of Binary for String columns. Use Parquet String type instead of Binary for String columns.
@ -1565,6 +1587,13 @@ When enabled, trailing empty lines at the end of file in CustomSeparated format
Disabled by default. Disabled by default.
### input_format_custom_allow_variable_number_of_columns {#input_format_custom_allow_variable_number_of_columns}
Allow variable number of columns in rows in CustomSeparated input format.
Ignore extra columns in rows with more columns than expected and treat missing columns as default values.
Disabled by default.
## Regexp format settings {#regexp-format-settings} ## Regexp format settings {#regexp-format-settings}
### format_regexp_escaping_rule {#format_regexp_escaping_rule} ### format_regexp_escaping_rule {#format_regexp_escaping_rule}

View File

@ -98,6 +98,18 @@ Default value: 0.
</profiles> </profiles>
``` ```
## mutations_execute_nondeterministic_on_initiator {#mutations_execute_nondeterministic_on_initiator}
If true constant nondeterministic functions (e.g. function `now()`) are executed on initiator and replaced to literals in `UPDATE` and `DELETE` queries. It helps to keep data in sync on replicas while executing mutations with constant nondeterministic functions. Default value: `false`.
## mutations_execute_subqueries_on_initiator {#mutations_execute_subqueries_on_initiator}
If true scalar subqueries are executed on initiator and replaced to literals in `UPDATE` and `DELETE` queries. Default value: `false`.
## mutations_max_literal_size_to_replace {#mutations_max_literal_size_to_replace}
The maximum size of serialized literal in bytes to replace in `UPDATE` and `DELETE` queries. Takes effect only if at least one the two settings above is enabled. Default value: 16384 (16 KiB).
## distributed_product_mode {#distributed-product-mode} ## distributed_product_mode {#distributed-product-mode}
Changes the behaviour of [distributed subqueries](../../sql-reference/operators/in.md). Changes the behaviour of [distributed subqueries](../../sql-reference/operators/in.md).
@ -4309,7 +4321,7 @@ Use this setting only for backward compatibility if your use cases depend on old
## session_timezone {#session_timezone} ## session_timezone {#session_timezone}
Sets the implicit time zone of the current session or query. Sets the implicit time zone of the current session or query.
The implicit time zone is the time zone applied to values of type DateTime/DateTime64 which have no explicitly specified time zone. The implicit time zone is the time zone applied to values of type DateTime/DateTime64 which have no explicitly specified time zone.
The setting takes precedence over the globally configured (server-level) implicit time zone. The setting takes precedence over the globally configured (server-level) implicit time zone.
A value of '' (empty string) means that the implicit time zone of the current session or query is equal to the [server time zone](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone). A value of '' (empty string) means that the implicit time zone of the current session or query is equal to the [server time zone](../server-configuration-parameters/settings.md#server_configuration_parameters-timezone).
@ -4344,7 +4356,7 @@ SELECT toDateTime64(toDateTime64('1999-12-12 23:23:23.123', 3), 3, 'Europe/Zuric
``` ```
:::warning :::warning
Not all functions that parse DateTime/DateTime64 respect `session_timezone`. This can lead to subtle errors. Not all functions that parse DateTime/DateTime64 respect `session_timezone`. This can lead to subtle errors.
See the following example and explanation. See the following example and explanation.
::: :::

View File

@ -0,0 +1,58 @@
---
slug: /en/operations/system-tables/kafka_consumers
---
# kafka_consumers
Contains information about Kafka consumers.
Applicable for [Kafka table engine](../../engines/table-engines/integrations/kafka) (native ClickHouse integration)
Columns:
- `database` (String) - database of the table with Kafka Engine.
- `table` (String) - name of the table with Kafka Engine.
- `consumer_id` (String) - Kafka consumer identifier. Note, that a table can have many consumers. Specified by `kafka_num_consumers` parameter.
- `assignments.topic` (Array(String)) - Kafka topic.
- `assignments.partition_id` (Array(Int32)) - Kafka partition id. Note, that only one consumer can be assigned to a partition.
- `assignments.current_offset` (Array(Int64)) - current offset.
- `exceptions.time`, (Array(DateTime)) - timestamp when the 10 most recent exceptions were generated.
- `exceptions.text`, (Array(String)) - text of 10 most recent exceptions.
- `last_poll_time`, (DateTime) - timestamp of the most recent poll.
- `num_messages_read`, (UInt64) - number of messages read by the consumer.
- `last_commit_time`, (DateTime) - timestamp of the most recent poll.
- `num_commits`, (UInt64) - total number of commits for the consumer.
- `last_rebalance_time`, (DateTime) - timestamp of the most recent Kafka rebalance
- `num_rebalance_revocations`, (UInt64) - number of times the consumer was revoked its partitions
- `num_rebalance_assignments`, (UInt64) - number of times the consumer was assigned to Kafka cluster
- `is_currently_used`, (UInt8) - consumer is in use
- `rdkafka_stat` (String) - library internal statistic. See https://github.com/ClickHouse/librdkafka/blob/master/STATISTICS.md . Set `statistics_interval_ms` to 0 disable, default is 3000 (once in three seconds).
Example:
``` sql
SELECT *
FROM system.kafka_consumers
FORMAT Vertical
```
``` text
Row 1:
──────
database: test
table: kafka
consumer_id: ClickHouse-instance-test-kafka-1caddc7f-f917-4bb1-ac55-e28bd103a4a0
assignments.topic: ['system_kafka_cons']
assignments.partition_id: [0]
assignments.current_offset: [18446744073709550615]
exceptions.time: []
exceptions.text: []
last_poll_time: 2006-11-09 18:47:47
num_messages_read: 4
last_commit_time: 2006-11-10 04:39:40
num_commits: 1
last_rebalance_time: 1970-01-01 00:00:00
num_rebalance_revocations: 0
num_rebalance_assignments: 1
is_currently_used: 1
rdkafka_stat: {...}
```

View File

@ -45,9 +45,14 @@ keeper foo bar
- `ls [path]` -- Lists the nodes for the given path (default: cwd) - `ls [path]` -- Lists the nodes for the given path (default: cwd)
- `cd [path]` -- Change the working path (default `.`) - `cd [path]` -- Change the working path (default `.`)
- `set <path> <value> [version]` -- Updates the node's value. Only update if version matches (default: -1) - `set <path> <value> [version]` -- Updates the node's value. Only update if version matches (default: -1)
- `create <path> <value>` -- Creates new node - `create <path> <value> [mode]` -- Creates new node with the set value
- `touch <path>` -- Creates new node with an empty string as value. Doesn't throw an exception if the node already exists
- `get <path>` -- Returns the node's value - `get <path>` -- Returns the node's value
- `remove <path>` -- Remove the node - `remove <path>` -- Remove the node
- `rmr <path>` -- Recursively deletes path. Confirmation required - `rmr <path>` -- Recursively deletes path. Confirmation required
- `flwc <command>` -- Executes four-letter-word command - `flwc <command>` -- Executes four-letter-word command
- `help` -- Prints this message - `help` -- Prints this message
- `get_stat [path]` -- Returns the node's stat (default `.`)
- `find_super_nodes <threshold> [path]` -- Finds nodes with number of children larger than some threshold for the given path (default `.`)
- `delete_stale_backups` -- Deletes ClickHouse nodes used for backups that are now inactive
- `find_big_family [path] [n]` -- Returns the top n nodes with the biggest family in the subtree (default path = `.` and n = 10)

View File

@ -26,9 +26,9 @@ SELECT p, toTypeName(p) FROM geo_point;
Result: Result:
``` text ``` text
┌─p─────┬─toTypeName(p)─┐ ┌─p───────┬─toTypeName(p)─┐
│ (10,10) │ Point │ │ (10,10) │ Point │
└───────┴───────────────┘ └─────────┴───────────────┘
``` ```
## Ring ## Ring

View File

@ -4,7 +4,7 @@ sidebar_position: 54
sidebar_label: Tuple(T1, T2, ...) sidebar_label: Tuple(T1, T2, ...)
--- ---
# Tuple(t1, T2, …) # Tuple(T1, T2, …)
A tuple of elements, each having an individual [type](../../sql-reference/data-types/index.md#data_types). Tuple must contain at least one element. A tuple of elements, each having an individual [type](../../sql-reference/data-types/index.md#data_types). Tuple must contain at least one element.

View File

@ -6,42 +6,42 @@ sidebar_label: UUID
# UUID # UUID
A universally unique identifier (UUID) is a 16-byte number used to identify records. For detailed information about the UUID, see [Wikipedia](https://en.wikipedia.org/wiki/Universally_unique_identifier). A Universally Unique Identifier (UUID) is a 16-byte value used to identify records. For detailed information about UUIDs, see [Wikipedia](https://en.wikipedia.org/wiki/Universally_unique_identifier).
The example of UUID type value is represented below: While different UUID variants exist (see [here](https://datatracker.ietf.org/doc/html/draft-ietf-uuidrev-rfc4122bis)), ClickHouse does not validate that inserted UUIDs conform to a particular variant. UUIDs are internally treated as a sequence of 16 random bytes with [8-4-4-4-12 representation](https://en.wikipedia.org/wiki/Universally_unique_identifier#Textual_representation) at SQL level.
Example UUID value:
``` text ``` text
61f0c404-5cb3-11e7-907b-a6006ad3dba0 61f0c404-5cb3-11e7-907b-a6006ad3dba0
``` ```
If you do not specify the UUID column value when inserting a new record, the UUID value is filled with zero: The default UUID is all-zero. It is used, for example, when a new record is inserted but no value for a UUID column is specified:
``` text ``` text
00000000-0000-0000-0000-000000000000 00000000-0000-0000-0000-000000000000
``` ```
## How to Generate ## Generating UUIDs
To generate the UUID value, ClickHouse provides the [generateUUIDv4](../../sql-reference/functions/uuid-functions.md) function. ClickHouse provides the [generateUUIDv4](../../sql-reference/functions/uuid-functions.md) function to generate random UUID version 4 values.
## Usage Example ## Usage Example
**Example 1** **Example 1**
This example demonstrates creating a table with the UUID type column and inserting a value into the table. This example demonstrates the creation of a table with a UUID column and the insertion of a value into the table.
``` sql ``` sql
CREATE TABLE t_uuid (x UUID, y String) ENGINE=TinyLog CREATE TABLE t_uuid (x UUID, y String) ENGINE=TinyLog
```
``` sql
INSERT INTO t_uuid SELECT generateUUIDv4(), 'Example 1' INSERT INTO t_uuid SELECT generateUUIDv4(), 'Example 1'
```
``` sql
SELECT * FROM t_uuid SELECT * FROM t_uuid
``` ```
Result:
``` text ``` text
┌────────────────────────────────────x─┬─y─────────┐ ┌────────────────────────────────────x─┬─y─────────┐
│ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │ │ 417ddc5d-e556-4d27-95dd-a34d84e46a50 │ Example 1 │
@ -50,13 +50,11 @@ SELECT * FROM t_uuid
**Example 2** **Example 2**
In this example, the UUID column value is not specified when inserting a new record. In this example, no UUID column value is specified when the record is inserted, i.e. the default UUID value is inserted:
``` sql ``` sql
INSERT INTO t_uuid (y) VALUES ('Example 2') INSERT INTO t_uuid (y) VALUES ('Example 2')
```
``` sql
SELECT * FROM t_uuid SELECT * FROM t_uuid
``` ```

View File

@ -1092,7 +1092,7 @@ Types of sources (`source_type`):
- [Local file](#local_file) - [Local file](#local_file)
- [Executable File](#executable) - [Executable File](#executable)
- [Executable Pool](#executable_pool) - [Executable Pool](#executable_pool)
- [HTTP(s)](#http) - [HTTP(S)](#http)
- DBMS - DBMS
- [ODBC](#odbc) - [ODBC](#odbc)
- [MySQL](#mysql) - [MySQL](#mysql)
@ -1102,7 +1102,7 @@ Types of sources (`source_type`):
- [Cassandra](#cassandra) - [Cassandra](#cassandra)
- [PostgreSQL](#postgresql) - [PostgreSQL](#postgresql)
## Local File {#local_file} ### Local File {#local_file}
Example of settings: Example of settings:
@ -1132,7 +1132,7 @@ When a dictionary with source `FILE` is created via DDL command (`CREATE DICTION
- [Dictionary function](../../sql-reference/table-functions/dictionary.md#dictionary-function) - [Dictionary function](../../sql-reference/table-functions/dictionary.md#dictionary-function)
## Executable File {#executable} ### Executable File {#executable}
Working with executable files depends on [how the dictionary is stored in memory](#storig-dictionaries-in-memory). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request to the executable files STDIN. Otherwise, ClickHouse starts the executable file and treats its output as dictionary data. Working with executable files depends on [how the dictionary is stored in memory](#storig-dictionaries-in-memory). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request to the executable files STDIN. Otherwise, ClickHouse starts the executable file and treats its output as dictionary data.
@ -1161,7 +1161,7 @@ Setting fields:
That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled; otherwise, the DB user would be able to execute arbitrary binaries on the ClickHouse node. That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled; otherwise, the DB user would be able to execute arbitrary binaries on the ClickHouse node.
## Executable Pool {#executable_pool} ### Executable Pool {#executable_pool}
Executable pool allows loading data from pool of processes. This source does not work with dictionary layouts that need to load all data from source. Executable pool works if the dictionary [is stored](#ways-to-store-dictionaries-in-memory) using `cache`, `complex_key_cache`, `ssd_cache`, `complex_key_ssd_cache`, `direct`, or `complex_key_direct` layouts. Executable pool allows loading data from pool of processes. This source does not work with dictionary layouts that need to load all data from source. Executable pool works if the dictionary [is stored](#ways-to-store-dictionaries-in-memory) using `cache`, `complex_key_cache`, `ssd_cache`, `complex_key_ssd_cache`, `direct`, or `complex_key_direct` layouts.
@ -1196,9 +1196,9 @@ Setting fields:
That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled, otherwise, the DB user would be able to execute arbitrary binary on ClickHouse node. That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled, otherwise, the DB user would be able to execute arbitrary binary on ClickHouse node.
## Http(s) {#https} ### HTTP(S) {#https}
Working with an HTTP(s) server depends on [how the dictionary is stored in memory](#storig-dictionaries-in-memory). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request via the `POST` method. Working with an HTTP(S) server depends on [how the dictionary is stored in memory](#storig-dictionaries-in-memory). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request via the `POST` method.
Example of settings: Example of settings:
@ -1248,7 +1248,55 @@ Setting fields:
When creating a dictionary using the DDL command (`CREATE DICTIONARY ...`) remote hosts for HTTP dictionaries are checked against the contents of `remote_url_allow_hosts` section from config to prevent database users to access arbitrary HTTP server. When creating a dictionary using the DDL command (`CREATE DICTIONARY ...`) remote hosts for HTTP dictionaries are checked against the contents of `remote_url_allow_hosts` section from config to prevent database users to access arbitrary HTTP server.
### Known Vulnerability of the ODBC Dictionary Functionality ### DBMS
#### ODBC
You can use this method to connect any database that has an ODBC driver.
Example of settings:
``` xml
<source>
<odbc>
<db>DatabaseName</db>
<table>ShemaName.TableName</table>
<connection_string>DSN=some_parameters</connection_string>
<invalidate_query>SQL_QUERY</invalidate_query>
<query>SELECT id, value_1, value_2 FROM ShemaName.TableName</query>
</odbc>
</source>
```
or
``` sql
SOURCE(ODBC(
db 'DatabaseName'
table 'SchemaName.TableName'
connection_string 'DSN=some_parameters'
invalidate_query 'SQL_QUERY'
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
))
```
Setting fields:
- `db` Name of the database. Omit it if the database name is set in the `<connection_string>` parameters.
- `table` Name of the table and schema if exists.
- `connection_string` Connection string.
- `invalidate_query` Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](#dictionary-updates).
- `query` The custom query. Optional parameter.
:::note
The `table` and `query` fields cannot be used together. And either one of the `table` or `query` fields must be declared.
:::
ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so its necessary to set table name accordingly to table name case in database.
If you have a problems with encodings when using Oracle, see the corresponding [FAQ](/knowledgebase/oracle-odbc) item.
##### Known Vulnerability of the ODBC Dictionary Functionality
:::note :::note
When connecting to the database through the ODBC driver connection parameter `Servername` can be substituted. In this case values of `USERNAME` and `PASSWORD` from `odbc.ini` are sent to the remote server and can be compromised. When connecting to the database through the ODBC driver connection parameter `Servername` can be substituted. In this case values of `USERNAME` and `PASSWORD` from `odbc.ini` are sent to the remote server and can be compromised.
@ -1277,7 +1325,7 @@ SELECT * FROM odbc('DSN=gregtest;Servername=some-server.com', 'test_db');
ODBC driver will send values of `USERNAME` and `PASSWORD` from `odbc.ini` to `some-server.com`. ODBC driver will send values of `USERNAME` and `PASSWORD` from `odbc.ini` to `some-server.com`.
### Example of Connecting Postgresql ##### Example of Connecting Postgresql
Ubuntu OS. Ubuntu OS.
@ -1358,7 +1406,7 @@ LIFETIME(MIN 300 MAX 360)
You may need to edit `odbc.ini` to specify the full path to the library with the driver `DRIVER=/usr/local/lib/psqlodbcw.so`. You may need to edit `odbc.ini` to specify the full path to the library with the driver `DRIVER=/usr/local/lib/psqlodbcw.so`.
### Example of Connecting MS SQL Server ##### Example of Connecting MS SQL Server
Ubuntu OS. Ubuntu OS.
@ -1462,55 +1510,7 @@ LAYOUT(FLAT())
LIFETIME(MIN 300 MAX 360) LIFETIME(MIN 300 MAX 360)
``` ```
## DBMS #### Mysql
### ODBC
You can use this method to connect any database that has an ODBC driver.
Example of settings:
``` xml
<source>
<odbc>
<db>DatabaseName</db>
<table>ShemaName.TableName</table>
<connection_string>DSN=some_parameters</connection_string>
<invalidate_query>SQL_QUERY</invalidate_query>
<query>SELECT id, value_1, value_2 FROM ShemaName.TableName</query>
</odbc>
</source>
```
or
``` sql
SOURCE(ODBC(
db 'DatabaseName'
table 'SchemaName.TableName'
connection_string 'DSN=some_parameters'
invalidate_query 'SQL_QUERY'
query 'SELECT id, value_1, value_2 FROM db_name.table_name'
))
```
Setting fields:
- `db` Name of the database. Omit it if the database name is set in the `<connection_string>` parameters.
- `table` Name of the table and schema if exists.
- `connection_string` Connection string.
- `invalidate_query` Query for checking the dictionary status. Optional parameter. Read more in the section [Updating dictionaries](#dictionary-updates).
- `query` The custom query. Optional parameter.
:::note
The `table` and `query` fields cannot be used together. And either one of the `table` or `query` fields must be declared.
:::
ClickHouse receives quoting symbols from ODBC-driver and quote all settings in queries to driver, so its necessary to set table name accordingly to table name case in database.
If you have a problems with encodings when using Oracle, see the corresponding [FAQ](/knowledgebase/oracle-odbc) item.
### Mysql
Example of settings: Example of settings:
@ -1627,7 +1627,7 @@ SOURCE(MYSQL(
)) ))
``` ```
### ClickHouse #### ClickHouse
Example of settings: Example of settings:
@ -1680,7 +1680,7 @@ Setting fields:
The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared. The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared.
::: :::
### Mongodb #### Mongodb
Example of settings: Example of settings:
@ -1723,7 +1723,7 @@ Setting fields:
- `options` - MongoDB connection string options (optional parameter). - `options` - MongoDB connection string options (optional parameter).
### Redis #### Redis
Example of settings: Example of settings:
@ -1756,7 +1756,7 @@ Setting fields:
- `storage_type` The structure of internal Redis storage using for work with keys. `simple` is for simple sources and for hashed single key sources, `hash_map` is for hashed sources with two keys. Ranged sources and cache sources with complex key are unsupported. May be omitted, default value is `simple`. - `storage_type` The structure of internal Redis storage using for work with keys. `simple` is for simple sources and for hashed single key sources, `hash_map` is for hashed sources with two keys. Ranged sources and cache sources with complex key are unsupported. May be omitted, default value is `simple`.
- `db_index` The specific numeric index of Redis logical database. May be omitted, default value is 0. - `db_index` The specific numeric index of Redis logical database. May be omitted, default value is 0.
### Cassandra #### Cassandra
Example of settings: Example of settings:
@ -1798,7 +1798,7 @@ Setting fields:
The `column_family` or `where` fields cannot be used together with the `query` field. And either one of the `column_family` or `query` fields must be declared. The `column_family` or `where` fields cannot be used together with the `query` field. And either one of the `column_family` or `query` fields must be declared.
::: :::
### PostgreSQL #### PostgreSQL
Example of settings: Example of settings:
@ -1855,7 +1855,7 @@ Setting fields:
The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared. The `table` or `where` fields cannot be used together with the `query` field. And either one of the `table` or `query` fields must be declared.
::: :::
## Null ### Null
A special source that can be used to create dummy (empty) dictionaries. Such dictionaries can useful for tests or with setups with separated data and query nodes at nodes with Distributed tables. A special source that can be used to create dummy (empty) dictionaries. Such dictionaries can useful for tests or with setups with separated data and query nodes at nodes with Distributed tables.
@ -2476,52 +2476,3 @@ Dictionary updates (other than loading at first use) do not block queries. Durin
We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server. We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server.
There are also functions for working with OS identifiers and search engines, but they shouldnt be used. There are also functions for working with OS identifiers and search engines, but they shouldnt be used.
## Embedded Dictionaries
<SelfManaged />
ClickHouse contains a built-in feature for working with a geobase.
This allows you to:
- Use a regions ID to get its name in the desired language.
- Use a regions ID to get the ID of a city, area, federal district, country, or continent.
- Check whether a region is part of another region.
- Get a chain of parent regions.
All the functions support “translocality,” the ability to simultaneously use different perspectives on region ownership. For more information, see the section “Functions for working with web analytics dictionaries”.
The internal dictionaries are disabled in the default package.
To enable them, uncomment the parameters `path_to_regions_hierarchy_file` and `path_to_regions_names_files` in the server configuration file.
The geobase is loaded from text files.
Place the `regions_hierarchy*.txt` files into the `path_to_regions_hierarchy_file` directory. This configuration parameter must contain the path to the `regions_hierarchy.txt` file (the default regional hierarchy), and the other files (`regions_hierarchy_ua.txt`) must be located in the same directory.
Put the `regions_names_*.txt` files in the `path_to_regions_names_files` directory.
You can also create these files yourself. The file format is as follows:
`regions_hierarchy*.txt`: TabSeparated (no header), columns:
- region ID (`UInt32`)
- parent region ID (`UInt32`)
- region type (`UInt8`): 1 - continent, 3 - country, 4 - federal district, 5 - region, 6 - city; other types do not have values
- population (`UInt32`) — optional column
`regions_names_*.txt`: TabSeparated (no header), columns:
- region ID (`UInt32`)
- region name (`String`) — Cant contain tabs or line feeds, even escaped ones.
A flat array is used for storing in RAM. For this reason, IDs shouldnt be more than a million.
Dictionaries can be updated without restarting the server. However, the set of available dictionaries is not updated.
For updates, the file modification times are checked. If a file has changed, the dictionary is updated.
The interval to check for changes is configured in the `builtin_dictionaries_reload_interval` parameter.
Dictionary updates (other than loading at first use) do not block queries. During updates, queries use the old versions of dictionaries. If an error occurs during an update, the error is written to the server log, and queries continue using the old version of dictionaries.
We recommend periodically updating the dictionaries with the geobase. During an update, generate new files and write them to a separate location. When everything is ready, rename them to the files used by the server.
There are also functions for working with OS identifiers and search engines, but they shouldnt be used.

View File

@ -183,9 +183,8 @@ arrayConcat(arrays)
**Arguments** **Arguments**
- `arrays` Arbitrary number of arguments of [Array](../../sql-reference/data-types/array.md) type. - `arrays` Arbitrary number of arguments of [Array](../../sql-reference/data-types/array.md) type.
**Example**
<!-- --> **Example**
``` sql ``` sql
SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res

View File

@ -1819,6 +1819,72 @@ Result:
└────────────────────────────────────┘ └────────────────────────────────────┘
``` ```
## toUTCTimestamp
Convert DateTime/DateTime64 type value from other time zone to UTC timezone timestamp
**Syntax**
``` sql
toUTCTimestamp(time_val, time_zone)
```
**Arguments**
- `time_val` — A DateTime/DateTime64 type const value or a expression . [DateTime/DateTime64 types](../../sql-reference/data-types/datetime.md)
- `time_zone` — A String type const value or a expression represent the time zone. [String types](../../sql-reference/data-types/string.md)
**Returned value**
- DateTime/DateTime64 in text form
**Example**
``` sql
SELECT toUTCTimestamp(toDateTime('2023-03-16'), 'Asia/Shanghai');
```
Result:
``` text
┌─toUTCTimestamp(toDateTime('2023-03-16'),'Asia/Shanghai')┐
│ 2023-03-15 16:00:00 │
└─────────────────────────────────────────────────────────┘
```
## fromUTCTimestamp
Convert DateTime/DateTime64 type value from UTC timezone to other time zone timestamp
**Syntax**
``` sql
fromUTCTimestamp(time_val, time_zone)
```
**Arguments**
- `time_val` — A DateTime/DateTime64 type const value or a expression . [DateTime/DateTime64 types](../../sql-reference/data-types/datetime.md)
- `time_zone` — A String type const value or a expression represent the time zone. [String types](../../sql-reference/data-types/string.md)
**Returned value**
- DateTime/DateTime64 in text form
**Example**
``` sql
SELECT fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00', 3), 'Asia/Shanghai');
```
Result:
``` text
┌─fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00',3),'Asia/Shanghai')─┐
│ 2023-03-16 18:00:00.000 │
└─────────────────────────────────────────────────────────────────────────┘
```
## Related content ## Related content
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse) - Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)

View File

@ -18,7 +18,7 @@ file(path[, default])
**Arguments** **Arguments**
- `path` — The path of the file relative to [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Supports wildcards `*`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` are numbers and `'abc', 'def'` are strings. - `path` — The path of the file relative to [user_files_path](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-user_files_path). Supports wildcards `*`, `**`, `?`, `{abc,def}` and `{N..M}` where `N`, `M` are numbers and `'abc', 'def'` are strings.
- `default` — The value returned if the file does not exist or cannot be accessed. Supported data types: [String](../../sql-reference/data-types/string.md) and [NULL](../../sql-reference/syntax.md#null-literal). - `default` — The value returned if the file does not exist or cannot be accessed. Supported data types: [String](../../sql-reference/data-types/string.md) and [NULL](../../sql-reference/syntax.md#null-literal).
**Example** **Example**

View File

@ -51,7 +51,7 @@ Calculates the MD5 from a string and returns the resulting set of bytes as Fixed
If you do not need MD5 in particular, but you need a decent cryptographic 128-bit hash, use the sipHash128 function instead. If you do not need MD5 in particular, but you need a decent cryptographic 128-bit hash, use the sipHash128 function instead.
If you want to get the same result as output by the md5sum utility, use lower(hex(MD5(s))). If you want to get the same result as output by the md5sum utility, use lower(hex(MD5(s))).
## sipHash64 (#hash_functions-siphash64) ## sipHash64 {#hash_functions-siphash64}
Produces a 64-bit [SipHash](https://en.wikipedia.org/wiki/SipHash) hash value. Produces a 64-bit [SipHash](https://en.wikipedia.org/wiki/SipHash) hash value.
@ -63,9 +63,9 @@ This is a cryptographic hash function. It works at least three times faster than
The function [interprets](/docs/en/sql-reference/functions/type-conversion-functions.md/#type_conversion_functions-reinterpretAsString) all the input parameters as strings and calculates the hash value for each of them. It then combines the hashes by the following algorithm: The function [interprets](/docs/en/sql-reference/functions/type-conversion-functions.md/#type_conversion_functions-reinterpretAsString) all the input parameters as strings and calculates the hash value for each of them. It then combines the hashes by the following algorithm:
1. The first and the second hash value are concatenated to an array which is hashed. 1. The first and the second hash value are concatenated to an array which is hashed.
2. The previously calculated hash value and the hash of the third input parameter are hashed in a similar way. 2. The previously calculated hash value and the hash of the third input parameter are hashed in a similar way.
3. This calculation is repeated for all remaining hash values of the original input. 3. This calculation is repeated for all remaining hash values of the original input.
**Arguments** **Arguments**

View File

@ -729,6 +729,30 @@ Returns whether string `str` ends with `suffix`.
endsWith(str, suffix) endsWith(str, suffix)
``` ```
## endsWithUTF8
Returns whether string `str` ends with `suffix`, the difference between `endsWithUTF8` and `endsWith` is that `endsWithUTF8` match `str` and `suffix` by UTF-8 characters.
**Syntax**
```sql
endsWithUTF8(str, suffix)
```
**Example**
``` sql
SELECT endsWithUTF8('中国', '\xbd'), endsWith('中国', '\xbd')
```
Result:
```result
┌─endsWithUTF8('中国', '½')─┬─endsWith('中国', '½')─┐
│ 0 │ 1 │
└──────────────────────────┴──────────────────────┘
```
## startsWith ## startsWith
Returns whether string `str` starts with `prefix`. Returns whether string `str` starts with `prefix`.
@ -745,6 +769,25 @@ startsWith(str, prefix)
SELECT startsWith('Spider-Man', 'Spi'); SELECT startsWith('Spider-Man', 'Spi');
``` ```
## startsWithUTF8
Returns whether string `str` starts with `prefix`, the difference between `startsWithUTF8` and `startsWith` is that `startsWithUTF8` match `str` and `suffix` by UTF-8 characters.
**Example**
``` sql
SELECT startsWithUTF8('中国', '\xe4'), startsWith('中国', '\xe4')
```
Result:
```result
┌─startsWithUTF8('中国', '⥩─┬─startsWith('中国', '⥩─┐
│ 0 │ 1 │
└────────────────────────────┴────────────────────────┘
```
## trim ## trim
Removes the specified characters from the start or end of a string. If not specified otherwise, the function removes whitespace (ASCII-character 32). Removes the specified characters from the start or end of a string. If not specified otherwise, the function removes whitespace (ASCII-character 32).

View File

@ -559,6 +559,29 @@ Result:
└────────────────────────────┘ └────────────────────────────┘
``` ```
## tupleConcat
Combines tuples passed as arguments.
``` sql
tupleConcat(tuples)
```
**Arguments**
- `tuples` Arbitrary number of arguments of [Tuple](../../sql-reference/data-types/tuple.md) type.
**Example**
``` sql
SELECT tupleConcat((1, 2), (3, 4), (true, false)) AS res
```
``` text
┌─res──────────────────┐
│ (1,2,3,4,true,false) │
└──────────────────────┘
```
## Distance functions ## Distance functions

View File

@ -11,6 +11,7 @@ Syntax:
``` sql ``` sql
CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name] CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]
[IN access_storage_type]
[KEYED BY {user_name | ip_address | client_key | client_key,user_name | client_key,ip_address} | NOT KEYED] [KEYED BY {user_name | ip_address | client_key | client_key,user_name | client_key,ip_address} | NOT KEYED]
[FOR [RANDOMIZED] INTERVAL number {second | minute | hour | day | week | month | quarter | year} [FOR [RANDOMIZED] INTERVAL number {second | minute | hour | day | week | month | quarter | year}
{MAX { {queries | query_selects | query_inserts | errors | result_rows | result_bytes | read_rows | read_bytes | execution_time} = number } [,...] | {MAX { {queries | query_selects | query_inserts | errors | result_rows | result_bytes | read_rows | read_bytes | execution_time} = number } [,...] |

View File

@ -11,6 +11,7 @@ Syntax:
``` sql ``` sql
CREATE ROLE [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1] [, name2 [ON CLUSTER cluster_name2] ...] CREATE ROLE [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1] [, name2 [ON CLUSTER cluster_name2] ...]
[IN access_storage_type]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [CONST|READONLY|WRITABLE|CHANGEABLE_IN_READONLY] | PROFILE 'profile_name'] [,...] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [CONST|READONLY|WRITABLE|CHANGEABLE_IN_READONLY] | PROFILE 'profile_name'] [,...]
``` ```

View File

@ -16,6 +16,7 @@ Syntax:
``` sql ``` sql
CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1|db1.* CREATE [ROW] POLICY [IF NOT EXISTS | OR REPLACE] policy_name1 [ON CLUSTER cluster_name1] ON [db1.]table1|db1.*
[, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2|db2.* ...] [, policy_name2 [ON CLUSTER cluster_name2] ON [db2.]table2|db2.* ...]
[IN access_storage_type]
[FOR SELECT] USING condition [FOR SELECT] USING condition
[AS {PERMISSIVE | RESTRICTIVE}] [AS {PERMISSIVE | RESTRICTIVE}]
[TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}] [TO {role1 [, role2 ...] | ALL | ALL EXCEPT role1 [, role2 ...]}]

View File

@ -12,6 +12,7 @@ Syntax:
``` sql ``` sql
CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1] CREATE SETTINGS PROFILE [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
[, name2 [ON CLUSTER cluster_name2] ...] [, name2 [ON CLUSTER cluster_name2] ...]
[IN access_storage_type]
[SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [CONST|READONLY|WRITABLE|CHANGEABLE_IN_READONLY] | INHERIT 'profile_name'] [,...] [SETTINGS variable [= value] [MIN [=] min_value] [MAX [=] max_value] [CONST|READONLY|WRITABLE|CHANGEABLE_IN_READONLY] | INHERIT 'profile_name'] [,...]
``` ```

View File

@ -14,6 +14,7 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
[, name2 [ON CLUSTER cluster_name2] ...] [, name2 [ON CLUSTER cluster_name2] ...]
[NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name'}] [NOT IDENTIFIED | IDENTIFIED {[WITH {no_password | plaintext_password | sha256_password | sha256_hash | double_sha1_password | double_sha1_hash}] BY {'password' | 'hash'}} | {WITH ldap SERVER 'server_name'} | {WITH kerberos [REALM 'realm']} | {WITH ssl_certificate CN 'common_name'}]
[HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE] [HOST {LOCAL | NAME 'name' | REGEXP 'name_regexp' | IP 'address' | LIKE 'pattern'} [,...] | ANY | NONE]
[IN access_storage_type]
[DEFAULT ROLE role [,...]] [DEFAULT ROLE role [,...]]
[DEFAULT DATABASE database | NONE] [DEFAULT DATABASE database | NONE]
[GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]] [GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]]

View File

@ -49,7 +49,7 @@ Deletes a user.
Syntax: Syntax:
``` sql ``` sql
DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name] DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
``` ```
## DROP ROLE ## DROP ROLE
@ -59,7 +59,7 @@ Deletes a role. The deleted role is revoked from all the entities where it was a
Syntax: Syntax:
``` sql ``` sql
DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
``` ```
## DROP ROW POLICY ## DROP ROW POLICY
@ -69,7 +69,7 @@ Deletes a row policy. Deleted row policy is revoked from all the entities where
Syntax: Syntax:
``` sql ``` sql
DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name] DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
``` ```
## DROP QUOTA ## DROP QUOTA
@ -79,7 +79,7 @@ Deletes a quota. The deleted quota is revoked from all the entities where it was
Syntax: Syntax:
``` sql ``` sql
DROP QUOTA [IF EXISTS] name [,...] [ON CLUSTER cluster_name] DROP QUOTA [IF EXISTS] name [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
``` ```
## DROP SETTINGS PROFILE ## DROP SETTINGS PROFILE
@ -89,7 +89,7 @@ Deletes a settings profile. The deleted settings profile is revoked from all the
Syntax: Syntax:
``` sql ``` sql
DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name] [FROM access_storage_type]
``` ```
## DROP VIEW ## DROP VIEW

View File

@ -11,7 +11,7 @@ Inserts data into a table.
**Syntax** **Syntax**
``` sql ``` sql
INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ... INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
``` ```
You can specify a list of columns to insert using the `(c1, c2, c3)`. You can also use an expression with column [matcher](../../sql-reference/statements/select/index.md#asterisk) such as `*` and/or [modifiers](../../sql-reference/statements/select/index.md#select-modifiers) such as [APPLY](../../sql-reference/statements/select/index.md#apply-modifier), [EXCEPT](../../sql-reference/statements/select/index.md#except-modifier), [REPLACE](../../sql-reference/statements/select/index.md#replace-modifier). You can specify a list of columns to insert using the `(c1, c2, c3)`. You can also use an expression with column [matcher](../../sql-reference/statements/select/index.md#asterisk) such as `*` and/or [modifiers](../../sql-reference/statements/select/index.md#select-modifiers) such as [APPLY](../../sql-reference/statements/select/index.md#apply-modifier), [EXCEPT](../../sql-reference/statements/select/index.md#except-modifier), [REPLACE](../../sql-reference/statements/select/index.md#replace-modifier).
@ -107,7 +107,7 @@ If table has [constraints](../../sql-reference/statements/create/table.md#constr
**Syntax** **Syntax**
``` sql ``` sql
INSERT INTO [db.]table [(c1, c2, c3)] SELECT ... INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] SELECT ...
``` ```
Columns are mapped according to their position in the SELECT clause. However, their names in the SELECT expression and the table for INSERT may differ. If necessary, type casting is performed. Columns are mapped according to their position in the SELECT clause. However, their names in the SELECT expression and the table for INSERT may differ. If necessary, type casting is performed.
@ -126,7 +126,7 @@ To insert a default value instead of `NULL` into a column with not nullable data
**Syntax** **Syntax**
``` sql ``` sql
INSERT INTO [db.]table [(c1, c2, c3)] FROM INFILE file_name [COMPRESSION type] FORMAT format_name INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] FROM INFILE file_name [COMPRESSION type] FORMAT format_name
``` ```
Use the syntax above to insert data from a file, or files, stored on the **client** side. `file_name` and `type` are string literals. Input file [format](../../interfaces/formats.md) must be set in the `FORMAT` clause. Use the syntax above to insert data from a file, or files, stored on the **client** side. `file_name` and `type` are string literals. Input file [format](../../interfaces/formats.md) must be set in the `FORMAT` clause.

View File

@ -0,0 +1,32 @@
---
slug: /en/sql-reference/statements/move
sidebar_position: 54
sidebar_label: MOVE
---
# MOVE access entity statement
This statement allows to move an access entity from one access storage to another.
Syntax:
```sql
MOVE {USER, ROLE, QUOTA, SETTINGS PROFILE, ROW POLICY} name1 [, name2, ...] TO access_storage_type
```
Currently, there are five access storages in ClickHouse:
- `local_directory`
- `memory`
- `replicated`
- `users_xml` (ro)
- `ldap` (ro)
Examples:
```sql
MOVE USER test TO local_directory
```
```sql
MOVE ROLE test TO memory
```

Some files were not shown because too many files have changed in this diff Show More