mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
Merge remote-tracking branch 'blessed/master' into local-default-database-name
This commit is contained in:
commit
e1b353d19e
6
.github/workflows/backport_branches.yml
vendored
6
.github/workflows/backport_branches.yml
vendored
@ -157,7 +157,8 @@ jobs:
|
|||||||
##################################### BUILD REPORTER #######################################
|
##################################### BUILD REPORTER #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
BuilderReport:
|
BuilderReport:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
# run report check for failed builds to indicate the CI error
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
- RunConfig
|
- RunConfig
|
||||||
- BuilderDebAarch64
|
- BuilderDebAarch64
|
||||||
@ -177,7 +178,8 @@ jobs:
|
|||||||
run_command: |
|
run_command: |
|
||||||
python3 build_report_check.py "$CHECK_NAME"
|
python3 build_report_check.py "$CHECK_NAME"
|
||||||
BuilderSpecialReport:
|
BuilderSpecialReport:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
# run report check for failed builds to indicate the CI error
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
- RunConfig
|
- RunConfig
|
||||||
- BuilderBinDarwin
|
- BuilderBinDarwin
|
||||||
|
6
.github/workflows/master.yml
vendored
6
.github/workflows/master.yml
vendored
@ -262,6 +262,8 @@ jobs:
|
|||||||
##################################### BUILD REPORTER #######################################
|
##################################### BUILD REPORTER #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
BuilderReport:
|
BuilderReport:
|
||||||
|
# run report check for failed builds to indicate the CI error
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
- RunConfig
|
- RunConfig
|
||||||
- BuilderBinRelease
|
- BuilderBinRelease
|
||||||
@ -272,7 +274,6 @@ jobs:
|
|||||||
- BuilderDebRelease
|
- BuilderDebRelease
|
||||||
- BuilderDebTsan
|
- BuilderDebTsan
|
||||||
- BuilderDebUBsan
|
- BuilderDebUBsan
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: ClickHouse build check
|
test_name: ClickHouse build check
|
||||||
@ -285,7 +286,8 @@ jobs:
|
|||||||
run_command: |
|
run_command: |
|
||||||
python3 build_report_check.py "$CHECK_NAME"
|
python3 build_report_check.py "$CHECK_NAME"
|
||||||
BuilderSpecialReport:
|
BuilderSpecialReport:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
# run report check for failed builds to indicate the CI error
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
- RunConfig
|
- RunConfig
|
||||||
- BuilderBinAarch64
|
- BuilderBinAarch64
|
||||||
|
6
.github/workflows/pull_request.yml
vendored
6
.github/workflows/pull_request.yml
vendored
@ -291,6 +291,8 @@ jobs:
|
|||||||
##################################### BUILD REPORTER #######################################
|
##################################### BUILD REPORTER #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
BuilderReport:
|
BuilderReport:
|
||||||
|
# run report check for failed builds to indicate the CI error
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
- RunConfig
|
- RunConfig
|
||||||
- BuilderBinRelease
|
- BuilderBinRelease
|
||||||
@ -301,7 +303,6 @@ jobs:
|
|||||||
- BuilderDebRelease
|
- BuilderDebRelease
|
||||||
- BuilderDebTsan
|
- BuilderDebTsan
|
||||||
- BuilderDebUBsan
|
- BuilderDebUBsan
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: ClickHouse build check
|
test_name: ClickHouse build check
|
||||||
@ -314,7 +315,8 @@ jobs:
|
|||||||
run_command: |
|
run_command: |
|
||||||
python3 build_report_check.py "$CHECK_NAME"
|
python3 build_report_check.py "$CHECK_NAME"
|
||||||
BuilderSpecialReport:
|
BuilderSpecialReport:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
# run report check for failed builds to indicate the CI error
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
- RunConfig
|
- RunConfig
|
||||||
- BuilderBinAarch64
|
- BuilderBinAarch64
|
||||||
|
6
.github/workflows/release_branches.yml
vendored
6
.github/workflows/release_branches.yml
vendored
@ -172,6 +172,8 @@ jobs:
|
|||||||
##################################### BUILD REPORTER #######################################
|
##################################### BUILD REPORTER #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
BuilderReport:
|
BuilderReport:
|
||||||
|
# run report check for failed builds to indicate the CI error
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
- RunConfig
|
- RunConfig
|
||||||
- BuilderDebRelease
|
- BuilderDebRelease
|
||||||
@ -181,7 +183,6 @@ jobs:
|
|||||||
- BuilderDebUBsan
|
- BuilderDebUBsan
|
||||||
- BuilderDebMsan
|
- BuilderDebMsan
|
||||||
- BuilderDebDebug
|
- BuilderDebDebug
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: ClickHouse build check
|
test_name: ClickHouse build check
|
||||||
@ -194,7 +195,8 @@ jobs:
|
|||||||
run_command: |
|
run_command: |
|
||||||
python3 build_report_check.py "$CHECK_NAME"
|
python3 build_report_check.py "$CHECK_NAME"
|
||||||
BuilderSpecialReport:
|
BuilderSpecialReport:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
# run report check for failed builds to indicate the CI error
|
||||||
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
- RunConfig
|
- RunConfig
|
||||||
- BuilderDebRelease
|
- BuilderDebRelease
|
||||||
|
2
.github/workflows/reusable_build.yml
vendored
2
.github/workflows/reusable_build.yml
vendored
@ -76,6 +76,8 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
python3 "$GITHUB_WORKSPACE/tests/ci/build_check.py" "$BUILD_NAME"
|
python3 "$GITHUB_WORKSPACE/tests/ci/build_check.py" "$BUILD_NAME"
|
||||||
- name: Post
|
- name: Post
|
||||||
|
# it still be build report to upload for failed build job
|
||||||
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.build_name}}'
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.build_name}}'
|
||||||
- name: Mark as done
|
- name: Mark as done
|
||||||
|
10
.gitmessage
Normal file
10
.gitmessage
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
|
||||||
|
|
||||||
|
## To avoid merge commit in CI run (add a leading space to apply):
|
||||||
|
#no-merge-commit
|
||||||
|
|
||||||
|
## Running specified job (add a leading space to apply):
|
||||||
|
#job_<JOB NAME>
|
||||||
|
#job_stateless_tests_release
|
||||||
|
#job_package_debug
|
||||||
|
#job_integration_tests_asan
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -360,3 +360,6 @@
|
|||||||
[submodule "contrib/sqids-cpp"]
|
[submodule "contrib/sqids-cpp"]
|
||||||
path = contrib/sqids-cpp
|
path = contrib/sqids-cpp
|
||||||
url = https://github.com/sqids/sqids-cpp.git
|
url = https://github.com/sqids/sqids-cpp.git
|
||||||
|
[submodule "contrib/idna"]
|
||||||
|
path = contrib/idna
|
||||||
|
url = https://github.com/ada-url/idna.git
|
||||||
|
@ -69,6 +69,9 @@
|
|||||||
// init() is called in the MyIOS constructor.
|
// init() is called in the MyIOS constructor.
|
||||||
// Therefore we replace each call to init() with
|
// Therefore we replace each call to init() with
|
||||||
// the poco_ios_init macro defined below.
|
// the poco_ios_init macro defined below.
|
||||||
|
//
|
||||||
|
// Also this macro will adjust exceptions() flags, since by default std::ios
|
||||||
|
// will hide exceptions, while in ClickHouse it is better to pass them through.
|
||||||
|
|
||||||
|
|
||||||
#if !defined(POCO_IOS_INIT_HACK)
|
#if !defined(POCO_IOS_INIT_HACK)
|
||||||
@ -79,7 +82,10 @@
|
|||||||
#if defined(POCO_IOS_INIT_HACK)
|
#if defined(POCO_IOS_INIT_HACK)
|
||||||
# define poco_ios_init(buf)
|
# define poco_ios_init(buf)
|
||||||
#else
|
#else
|
||||||
# define poco_ios_init(buf) init(buf)
|
# define poco_ios_init(buf) do { \
|
||||||
|
init(buf); \
|
||||||
|
this->exceptions(std::ios::failbit | std::ios::badbit); \
|
||||||
|
} while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
@ -70,6 +70,15 @@ public:
|
|||||||
int queryConvert(const unsigned char * bytes, int length) const;
|
int queryConvert(const unsigned char * bytes, int length) const;
|
||||||
int sequenceLength(const unsigned char * bytes, int length) const;
|
int sequenceLength(const unsigned char * bytes, int length) const;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
static int safeToInt(Poco::UInt32 value)
|
||||||
|
{
|
||||||
|
if (value <= 0x10FFFF)
|
||||||
|
return static_cast<int>(value);
|
||||||
|
else
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool _flipBytes;
|
bool _flipBytes;
|
||||||
static const char * _names[];
|
static const char * _names[];
|
||||||
|
@ -30,22 +30,22 @@ const char* UTF32Encoding::_names[] =
|
|||||||
|
|
||||||
const TextEncoding::CharacterMap UTF32Encoding::_charMap =
|
const TextEncoding::CharacterMap UTF32Encoding::_charMap =
|
||||||
{
|
{
|
||||||
/* 00 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* 00 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* 10 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* 10 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* 20 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* 20 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* 30 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* 30 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* 40 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* 40 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* 50 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* 50 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* 60 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* 60 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* 70 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* 70 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* 80 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* 80 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* 90 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* 90 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* a0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* a0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* b0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* b0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* c0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* c0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* d0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* d0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* e0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* e0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
/* f0 */ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2,
|
/* f0 */ -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -118,7 +118,7 @@ const TextEncoding::CharacterMap& UTF32Encoding::characterMap() const
|
|||||||
int UTF32Encoding::convert(const unsigned char* bytes) const
|
int UTF32Encoding::convert(const unsigned char* bytes) const
|
||||||
{
|
{
|
||||||
UInt32 uc;
|
UInt32 uc;
|
||||||
unsigned char* p = (unsigned char*) &uc;
|
unsigned char* p = reinterpret_cast<unsigned char*>(&uc);
|
||||||
*p++ = *bytes++;
|
*p++ = *bytes++;
|
||||||
*p++ = *bytes++;
|
*p++ = *bytes++;
|
||||||
*p++ = *bytes++;
|
*p++ = *bytes++;
|
||||||
@ -129,7 +129,7 @@ int UTF32Encoding::convert(const unsigned char* bytes) const
|
|||||||
ByteOrder::flipBytes(uc);
|
ByteOrder::flipBytes(uc);
|
||||||
}
|
}
|
||||||
|
|
||||||
return uc;
|
return safeToInt(uc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -138,7 +138,7 @@ int UTF32Encoding::convert(int ch, unsigned char* bytes, int length) const
|
|||||||
if (bytes && length >= 4)
|
if (bytes && length >= 4)
|
||||||
{
|
{
|
||||||
UInt32 ch1 = _flipBytes ? ByteOrder::flipBytes((UInt32) ch) : (UInt32) ch;
|
UInt32 ch1 = _flipBytes ? ByteOrder::flipBytes((UInt32) ch) : (UInt32) ch;
|
||||||
unsigned char* p = (unsigned char*) &ch1;
|
unsigned char* p = reinterpret_cast<unsigned char*>(&ch1);
|
||||||
*bytes++ = *p++;
|
*bytes++ = *p++;
|
||||||
*bytes++ = *p++;
|
*bytes++ = *p++;
|
||||||
*bytes++ = *p++;
|
*bytes++ = *p++;
|
||||||
@ -155,14 +155,14 @@ int UTF32Encoding::queryConvert(const unsigned char* bytes, int length) const
|
|||||||
if (length >= 4)
|
if (length >= 4)
|
||||||
{
|
{
|
||||||
UInt32 uc;
|
UInt32 uc;
|
||||||
unsigned char* p = (unsigned char*) &uc;
|
unsigned char* p = reinterpret_cast<unsigned char*>(&uc);
|
||||||
*p++ = *bytes++;
|
*p++ = *bytes++;
|
||||||
*p++ = *bytes++;
|
*p++ = *bytes++;
|
||||||
*p++ = *bytes++;
|
*p++ = *bytes++;
|
||||||
*p++ = *bytes++;
|
*p++ = *bytes++;
|
||||||
if (_flipBytes)
|
if (_flipBytes)
|
||||||
ByteOrder::flipBytes(uc);
|
ByteOrder::flipBytes(uc);
|
||||||
return uc;
|
ret = safeToInt(uc);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
1
contrib/CMakeLists.txt
vendored
1
contrib/CMakeLists.txt
vendored
@ -154,6 +154,7 @@ add_contrib (libpqxx-cmake libpqxx)
|
|||||||
add_contrib (libpq-cmake libpq)
|
add_contrib (libpq-cmake libpq)
|
||||||
add_contrib (nuraft-cmake NuRaft)
|
add_contrib (nuraft-cmake NuRaft)
|
||||||
add_contrib (fast_float-cmake fast_float)
|
add_contrib (fast_float-cmake fast_float)
|
||||||
|
add_contrib (idna-cmake idna)
|
||||||
add_contrib (datasketches-cpp-cmake datasketches-cpp)
|
add_contrib (datasketches-cpp-cmake datasketches-cpp)
|
||||||
add_contrib (incbin-cmake incbin)
|
add_contrib (incbin-cmake incbin)
|
||||||
add_contrib (sqids-cpp-cmake sqids-cpp)
|
add_contrib (sqids-cpp-cmake sqids-cpp)
|
||||||
|
2
contrib/azure
vendored
2
contrib/azure
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 352ff0a61cb319ac1cc38c4058443ddf70147530
|
Subproject commit 060c54dfb0abe869c065143303a9d3e9c54c29e3
|
@ -8,37 +8,21 @@ endif()
|
|||||||
set(AZURE_DIR "${ClickHouse_SOURCE_DIR}/contrib/azure")
|
set(AZURE_DIR "${ClickHouse_SOURCE_DIR}/contrib/azure")
|
||||||
set(AZURE_SDK_LIBRARY_DIR "${AZURE_DIR}/sdk")
|
set(AZURE_SDK_LIBRARY_DIR "${AZURE_DIR}/sdk")
|
||||||
|
|
||||||
file(GLOB AZURE_SDK_CORE_SRC
|
file(GLOB AZURE_SDK_SRC
|
||||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/*.cpp"
|
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/*.cpp"
|
||||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/cryptography/*.cpp"
|
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/cryptography/*.cpp"
|
||||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/*.cpp"
|
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/*.cpp"
|
||||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/curl/*.hpp"
|
|
||||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/curl/*.cpp"
|
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/http/curl/*.cpp"
|
||||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/winhttp/*.cpp"
|
|
||||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/io/*.cpp"
|
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/io/*.cpp"
|
||||||
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/private/*.hpp"
|
"${AZURE_SDK_LIBRARY_DIR}/core/azure-core/src/tracing/*.cpp"
|
||||||
)
|
|
||||||
|
|
||||||
file(GLOB AZURE_SDK_IDENTITY_SRC
|
|
||||||
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/src/*.cpp"
|
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/src/*.cpp"
|
||||||
"${AZURE_SDK_LIBRARY_DIR}/identity/azure-identity/src/private/*.hpp"
|
|
||||||
)
|
|
||||||
|
|
||||||
file(GLOB AZURE_SDK_STORAGE_COMMON_SRC
|
|
||||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/*.cpp"
|
|
||||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/private/*.cpp"
|
|
||||||
)
|
|
||||||
|
|
||||||
file(GLOB AZURE_SDK_STORAGE_BLOBS_SRC
|
|
||||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/*.cpp"
|
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/*.cpp"
|
||||||
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/private/*.hpp"
|
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-blobs/src/private/*.cpp"
|
||||||
|
"${AZURE_SDK_LIBRARY_DIR}/storage/azure-storage-common/src/*.cpp"
|
||||||
)
|
)
|
||||||
|
|
||||||
file(GLOB AZURE_SDK_UNIFIED_SRC
|
file(GLOB AZURE_SDK_UNIFIED_SRC
|
||||||
${AZURE_SDK_CORE_SRC}
|
${AZURE_SDK_SRC}
|
||||||
${AZURE_SDK_IDENTITY_SRC}
|
|
||||||
${AZURE_SDK_STORAGE_COMMON_SRC}
|
|
||||||
${AZURE_SDK_STORAGE_BLOBS_SRC}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
set(AZURE_SDK_INCLUDES
|
set(AZURE_SDK_INCLUDES
|
||||||
|
2
contrib/boringssl
vendored
2
contrib/boringssl
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 8061ac62d67953e61b793042e33baf1352e67510
|
Subproject commit aa6d2f865a2eab01cf94f197e11e36b6de47b5b4
|
1
contrib/idna
vendored
Submodule
1
contrib/idna
vendored
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit 3c8be01d42b75649f1ac9b697d0ef757eebfe667
|
24
contrib/idna-cmake/CMakeLists.txt
Normal file
24
contrib/idna-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
option(ENABLE_IDNA "Enable idna support" ${ENABLE_LIBRARIES})
|
||||||
|
if ((NOT ENABLE_IDNA))
|
||||||
|
message (STATUS "Not using idna")
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/idna")
|
||||||
|
|
||||||
|
set (SRCS
|
||||||
|
"${LIBRARY_DIR}/src/idna.cpp"
|
||||||
|
"${LIBRARY_DIR}/src/mapping.cpp"
|
||||||
|
"${LIBRARY_DIR}/src/mapping_tables.cpp"
|
||||||
|
"${LIBRARY_DIR}/src/normalization.cpp"
|
||||||
|
"${LIBRARY_DIR}/src/normalization_tables.cpp"
|
||||||
|
"${LIBRARY_DIR}/src/punycode.cpp"
|
||||||
|
"${LIBRARY_DIR}/src/to_ascii.cpp"
|
||||||
|
"${LIBRARY_DIR}/src/to_unicode.cpp"
|
||||||
|
"${LIBRARY_DIR}/src/unicode_transcoding.cpp"
|
||||||
|
"${LIBRARY_DIR}/src/validity.cpp"
|
||||||
|
)
|
||||||
|
|
||||||
|
add_library (_idna ${SRCS})
|
||||||
|
target_include_directories(_idna PUBLIC "${LIBRARY_DIR}/include")
|
||||||
|
|
||||||
|
add_library (ch_contrib::idna ALIAS _idna)
|
@ -11,7 +11,9 @@ option (ENABLE_EMBEDDED_COMPILER "Enable support for JIT compilation during quer
|
|||||||
|
|
||||||
option (ENABLE_DWARF_PARSER "Enable support for DWARF input format (uses LLVM library)" ${ENABLE_DWARF_PARSER_DEFAULT})
|
option (ENABLE_DWARF_PARSER "Enable support for DWARF input format (uses LLVM library)" ${ENABLE_DWARF_PARSER_DEFAULT})
|
||||||
|
|
||||||
if (NOT ENABLE_EMBEDDED_COMPILER AND NOT ENABLE_DWARF_PARSER)
|
option (ENABLE_BLAKE3 "Enable BLAKE3 function" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
|
if (NOT ENABLE_EMBEDDED_COMPILER AND NOT ENABLE_DWARF_PARSER AND NOT ENABLE_BLAKE3)
|
||||||
message(STATUS "Not using LLVM")
|
message(STATUS "Not using LLVM")
|
||||||
return()
|
return()
|
||||||
endif()
|
endif()
|
||||||
@ -26,6 +28,23 @@ set (LLVM_LIBRARY_DIRS "${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm")
|
|||||||
# and llvm cannot be compiled with bundled libcxx and 20 standard.
|
# and llvm cannot be compiled with bundled libcxx and 20 standard.
|
||||||
set (CMAKE_CXX_STANDARD 14)
|
set (CMAKE_CXX_STANDARD 14)
|
||||||
|
|
||||||
|
if (ARCH_AMD64)
|
||||||
|
set (LLVM_TARGETS_TO_BUILD "X86" CACHE INTERNAL "")
|
||||||
|
elseif (ARCH_AARCH64)
|
||||||
|
set (LLVM_TARGETS_TO_BUILD "AArch64" CACHE INTERNAL "")
|
||||||
|
elseif (ARCH_PPC64LE)
|
||||||
|
set (LLVM_TARGETS_TO_BUILD "PowerPC" CACHE INTERNAL "")
|
||||||
|
elseif (ARCH_S390X)
|
||||||
|
set (LLVM_TARGETS_TO_BUILD "SystemZ" CACHE INTERNAL "")
|
||||||
|
elseif (ARCH_RISCV64)
|
||||||
|
set (LLVM_TARGETS_TO_BUILD "RISCV" CACHE INTERNAL "")
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
|
||||||
|
if (NOT ENABLE_EMBEDDED_COMPILER AND NOT ENABLE_DWARF_PARSER)
|
||||||
|
# Only compiling blake3
|
||||||
|
set (REQUIRED_LLVM_LIBRARIES LLVMSupport)
|
||||||
|
else()
|
||||||
# This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles.
|
# This list was generated by listing all LLVM libraries, compiling the binary and removing all libraries while it still compiles.
|
||||||
set (REQUIRED_LLVM_LIBRARIES
|
set (REQUIRED_LLVM_LIBRARIES
|
||||||
LLVMExecutionEngine
|
LLVMExecutionEngine
|
||||||
@ -61,25 +80,22 @@ set (REQUIRED_LLVM_LIBRARIES
|
|||||||
LLVMDemangle
|
LLVMDemangle
|
||||||
)
|
)
|
||||||
|
|
||||||
# Skip useless "install" instructions from CMake:
|
|
||||||
set (LLVM_INSTALL_TOOLCHAIN_ONLY 1 CACHE INTERNAL "")
|
|
||||||
|
|
||||||
if (ARCH_AMD64)
|
if (ARCH_AMD64)
|
||||||
set (LLVM_TARGETS_TO_BUILD "X86" CACHE INTERNAL "")
|
|
||||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen)
|
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen)
|
||||||
elseif (ARCH_AARCH64)
|
elseif (ARCH_AARCH64)
|
||||||
set (LLVM_TARGETS_TO_BUILD "AArch64" CACHE INTERNAL "")
|
|
||||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen)
|
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen)
|
||||||
elseif (ARCH_PPC64LE)
|
elseif (ARCH_PPC64LE)
|
||||||
set (LLVM_TARGETS_TO_BUILD "PowerPC" CACHE INTERNAL "")
|
|
||||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMPowerPCInfo LLVMPowerPCDesc LLVMPowerPCCodeGen)
|
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMPowerPCInfo LLVMPowerPCDesc LLVMPowerPCCodeGen)
|
||||||
elseif (ARCH_S390X)
|
elseif (ARCH_S390X)
|
||||||
set (LLVM_TARGETS_TO_BUILD "SystemZ" CACHE INTERNAL "")
|
|
||||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMSystemZInfo LLVMSystemZDesc LLVMSystemZCodeGen)
|
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMSystemZInfo LLVMSystemZDesc LLVMSystemZCodeGen)
|
||||||
elseif (ARCH_RISCV64)
|
elseif (ARCH_RISCV64)
|
||||||
set (LLVM_TARGETS_TO_BUILD "RISCV" CACHE INTERNAL "")
|
|
||||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMRISCVInfo LLVMRISCVDesc LLVMRISCVCodeGen)
|
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMRISCVInfo LLVMRISCVDesc LLVMRISCVCodeGen)
|
||||||
endif ()
|
endif ()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
|
||||||
|
# Skip useless "install" instructions from CMake:
|
||||||
|
set (LLVM_INSTALL_TOOLCHAIN_ONLY 1 CACHE INTERNAL "")
|
||||||
|
|
||||||
message (STATUS "LLVM TARGETS TO BUILD ${LLVM_TARGETS_TO_BUILD}")
|
message (STATUS "LLVM TARGETS TO BUILD ${LLVM_TARGETS_TO_BUILD}")
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.11.2.11"
|
ARG VERSION="23.11.3.23"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.11.2.11"
|
ARG VERSION="23.11.3.23"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="23.11.2.11"
|
ARG VERSION="23.11.3.23"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -34,7 +34,7 @@ services:
|
|||||||
|
|
||||||
# Empty container to run proxy resolver.
|
# Empty container to run proxy resolver.
|
||||||
resolver:
|
resolver:
|
||||||
image: clickhouse/python-bottle
|
image: clickhouse/python-bottle:${DOCKER_PYTHON_BOTTLE_TAG:-latest}
|
||||||
expose:
|
expose:
|
||||||
- "8080"
|
- "8080"
|
||||||
tty: true
|
tty: true
|
||||||
|
@ -40,6 +40,12 @@ if [ "$cache_policy" = "SLRU" ]; then
|
|||||||
mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||||
|
# It is not needed, we will explicitly create tables on s3.
|
||||||
|
# We do not have statefull tests with s3 storage run in public repository, but this is needed for another repository.
|
||||||
|
rm /etc/clickhouse-server/config.d/s3_storage_policy_for_merge_tree_by_default.xml
|
||||||
|
fi
|
||||||
|
|
||||||
function start()
|
function start()
|
||||||
{
|
{
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
@ -123,8 +129,76 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
|
|||||||
else
|
else
|
||||||
clickhouse-client --query "CREATE DATABASE test"
|
clickhouse-client --query "CREATE DATABASE test"
|
||||||
clickhouse-client --query "SHOW TABLES FROM test"
|
clickhouse-client --query "SHOW TABLES FROM test"
|
||||||
|
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||||
|
clickhouse-client --query "CREATE TABLE test.hits (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16,
|
||||||
|
EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32,
|
||||||
|
UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String,
|
||||||
|
RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16),
|
||||||
|
URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8,
|
||||||
|
FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16,
|
||||||
|
UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8,
|
||||||
|
MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16,
|
||||||
|
SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16,
|
||||||
|
ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32,
|
||||||
|
SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8,
|
||||||
|
FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8,
|
||||||
|
HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8,
|
||||||
|
GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32,
|
||||||
|
HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String,
|
||||||
|
HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32,
|
||||||
|
FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32,
|
||||||
|
LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32,
|
||||||
|
RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String,
|
||||||
|
ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String,
|
||||||
|
OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String,
|
||||||
|
UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64,
|
||||||
|
URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String,
|
||||||
|
ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64),
|
||||||
|
IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate)
|
||||||
|
ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||||
|
clickhouse-client --query "CREATE TABLE test.visits (CounterID UInt32, StartDate Date, Sign Int8, IsNew UInt8,
|
||||||
|
VisitID UInt64, UserID UInt64, StartTime DateTime, Duration UInt32, UTCStartTime DateTime, PageViews Int32,
|
||||||
|
Hits Int32, IsBounce UInt8, Referer String, StartURL String, RefererDomain String, StartURLDomain String,
|
||||||
|
EndURL String, LinkURL String, IsDownload UInt8, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String,
|
||||||
|
AdvEngineID UInt8, PlaceID Int32, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32),
|
||||||
|
RefererRegions Array(UInt32), IsYandex UInt8, GoalReachesDepth Int32, GoalReachesURL Int32, GoalReachesAny Int32,
|
||||||
|
SocialSourceNetworkID UInt8, SocialSourcePage String, MobilePhoneModel String, ClientEventTime DateTime, RegionID UInt32,
|
||||||
|
ClientIP UInt32, ClientIP6 FixedString(16), RemoteIP UInt32, RemoteIP6 FixedString(16), IPNetworkID UInt32,
|
||||||
|
SilverlightVersion3 UInt32, CodeVersion UInt32, ResolutionWidth UInt16, ResolutionHeight UInt16, UserAgentMajor UInt16,
|
||||||
|
UserAgentMinor UInt16, WindowClientWidth UInt16, WindowClientHeight UInt16, SilverlightVersion2 UInt8, SilverlightVersion4 UInt16,
|
||||||
|
FlashVersion3 UInt16, FlashVersion4 UInt16, ClientTimeZone Int16, OS UInt8, UserAgent UInt8, ResolutionDepth UInt8,
|
||||||
|
FlashMajor UInt8, FlashMinor UInt8, NetMajor UInt8, NetMinor UInt8, MobilePhone UInt8, SilverlightVersion1 UInt8,
|
||||||
|
Age UInt8, Sex UInt8, Income UInt8, JavaEnable UInt8, CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8,
|
||||||
|
BrowserLanguage UInt16, BrowserCountry UInt16, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16),
|
||||||
|
Params Array(String), Goals Nested(ID UInt32, Serial UInt32, EventTime DateTime, Price Int64, OrderID String, CurrencyID UInt32),
|
||||||
|
WatchIDs Array(UInt64), ParamSumPrice Int64, ParamCurrency FixedString(3), ParamCurrencyID UInt16, ClickLogID UInt64,
|
||||||
|
ClickEventID Int32, ClickGoodEvent Int32, ClickEventTime DateTime, ClickPriorityID Int32, ClickPhraseID Int32, ClickPageID Int32,
|
||||||
|
ClickPlaceID Int32, ClickTypeID Int32, ClickResourceID Int32, ClickCost UInt32, ClickClientIP UInt32, ClickDomainID UInt32,
|
||||||
|
ClickURL String, ClickAttempt UInt8, ClickOrderID UInt32, ClickBannerID UInt32, ClickMarketCategoryID UInt32, ClickMarketPP UInt32,
|
||||||
|
ClickMarketCategoryName String, ClickMarketPPName String, ClickAWAPSCampaignName String, ClickPageName String, ClickTargetType UInt16,
|
||||||
|
ClickTargetPhraseID UInt64, ClickContextType UInt8, ClickSelectType Int8, ClickOptions String, ClickGroupBannerID Int32,
|
||||||
|
OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String,
|
||||||
|
UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, FirstVisit DateTime,
|
||||||
|
PredLastVisit Date, LastVisit Date, TotalVisits UInt32, TraficSource Nested(ID Int8, SearchEngineID UInt16, AdvEngineID UInt8,
|
||||||
|
PlaceID UInt16, SocialSourceNetworkID UInt8, Domain String, SearchPhrase String, SocialSourcePage String), Attendance FixedString(16),
|
||||||
|
CLID UInt32, YCLID UInt64, NormalizedRefererHash UInt64, SearchPhraseHash UInt64, RefererDomainHash UInt64, NormalizedStartURLHash UInt64,
|
||||||
|
StartURLDomainHash UInt64, NormalizedEndURLHash UInt64, TopLevelDomain UInt64, URLScheme UInt64, OpenstatServiceNameHash UInt64,
|
||||||
|
OpenstatCampaignIDHash UInt64, OpenstatAdIDHash UInt64, OpenstatSourceIDHash UInt64, UTMSourceHash UInt64, UTMMediumHash UInt64,
|
||||||
|
UTMCampaignHash UInt64, UTMContentHash UInt64, UTMTermHash UInt64, FromHash UInt64, WebVisorEnabled UInt8, WebVisorActivity UInt32,
|
||||||
|
ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64),
|
||||||
|
Market Nested(Type UInt8, GoalID UInt32, OrderID String, OrderPrice Int64, PP UInt32, DirectPlaceID UInt32, DirectOrderID UInt32,
|
||||||
|
DirectBannerID UInt32, GoodID String, GoodName String, GoodQuantity Int32, GoodPrice Int64), IslandID FixedString(16))
|
||||||
|
ENGINE = CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||||
|
SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||||
|
|
||||||
|
clickhouse-client --query "INSERT INTO test.hits SELECT * FROM datasets.hits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
||||||
|
clickhouse-client --query "INSERT INTO test.visits SELECT * FROM datasets.visits_v1 SETTINGS enable_filesystem_cache_on_write_operations=0"
|
||||||
|
clickhouse-client --query "DROP TABLE datasets.visits_v1 SYNC"
|
||||||
|
clickhouse-client --query "DROP TABLE datasets.hits_v1 SYNC"
|
||||||
|
else
|
||||||
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
|
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
|
||||||
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||||
|
fi
|
||||||
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
clickhouse-client --query "CREATE TABLE test.hits_s3 (WatchID UInt64, JavaEnable UInt8, Title String, GoodEvent Int16, EventTime DateTime, EventDate Date, CounterID UInt32, ClientIP UInt32, ClientIP6 FixedString(16), RegionID UInt32, UserID UInt64, CounterClass Int8, OS UInt8, UserAgent UInt8, URL String, Referer String, URLDomain String, RefererDomain String, Refresh UInt8, IsRobot UInt8, RefererCategories Array(UInt16), URLCategories Array(UInt16), URLRegions Array(UInt32), RefererRegions Array(UInt32), ResolutionWidth UInt16, ResolutionHeight UInt16, ResolutionDepth UInt8, FlashMajor UInt8, FlashMinor UInt8, FlashMinor2 String, NetMajor UInt8, NetMinor UInt8, UserAgentMajor UInt16, UserAgentMinor FixedString(2), CookieEnable UInt8, JavascriptEnable UInt8, IsMobile UInt8, MobilePhone UInt8, MobilePhoneModel String, Params String, IPNetworkID UInt32, TraficSourceID Int8, SearchEngineID UInt16, SearchPhrase String, AdvEngineID UInt8, IsArtifical UInt8, WindowClientWidth UInt16, WindowClientHeight UInt16, ClientTimeZone Int16, ClientEventTime DateTime, SilverlightVersion1 UInt8, SilverlightVersion2 UInt8, SilverlightVersion3 UInt32, SilverlightVersion4 UInt16, PageCharset String, CodeVersion UInt32, IsLink UInt8, IsDownload UInt8, IsNotBounce UInt8, FUniqID UInt64, HID UInt32, IsOldCounter UInt8, IsEvent UInt8, IsParameter UInt8, DontCountHits UInt8, WithHash UInt8, HitColor FixedString(1), UTCEventTime DateTime, Age UInt8, Sex UInt8, Income UInt8, Interests UInt16, Robotness UInt8, GeneralInterests Array(UInt16), RemoteIP UInt32, RemoteIP6 FixedString(16), WindowName Int32, OpenerName Int32, HistoryLength Int16, BrowserLanguage FixedString(2), BrowserCountry FixedString(2), SocialNetwork String, SocialAction String, HTTPError UInt16, SendTiming Int32, DNSTiming Int32, ConnectTiming Int32, ResponseStartTiming Int32, ResponseEndTiming Int32, FetchTiming Int32, RedirectTiming Int32, DOMInteractiveTiming Int32, DOMContentLoadedTiming Int32, DOMCompleteTiming Int32, LoadEventStartTiming Int32, LoadEventEndTiming Int32, NSToDOMContentLoadedTiming Int32, FirstPaintTiming Int32, RedirectCount Int8, SocialSourceNetworkID UInt8, SocialSourcePage String, ParamPrice Int64, ParamOrderID String, ParamCurrency FixedString(3), ParamCurrencyID UInt16, GoalsReached Array(UInt32), OpenstatServiceName String, OpenstatCampaignID String, OpenstatAdID String, OpenstatSourceID String, UTMSource String, UTMMedium String, UTMCampaign String, UTMContent String, UTMTerm String, FromTag String, HasGCLID UInt8, RefererHash UInt64, URLHash UInt64, CLID UInt32, YCLID UInt64, ShareService String, ShareURL String, ShareTitle String, ParsedParams Nested(Key1 String, Key2 String, Key3 String, Key4 String, Key5 String, ValueDouble Float64), IslandID FixedString(16), RequestNum UInt32, RequestTry UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192, storage_policy='s3_cache'"
|
||||||
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0"
|
clickhouse-client --query "INSERT INTO test.hits_s3 SELECT * FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0"
|
||||||
fi
|
fi
|
||||||
@ -144,6 +218,10 @@ function run_tests()
|
|||||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||||
|
ADDITIONAL_OPTIONS+=('--s3-storage')
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ -n "$USE_DATABASE_ORDINARY" ]] && [[ "$USE_DATABASE_ORDINARY" -eq 1 ]]; then
|
if [[ -n "$USE_DATABASE_ORDINARY" ]] && [[ "$USE_DATABASE_ORDINARY" -eq 1 ]]; then
|
||||||
ADDITIONAL_OPTIONS+=('--db-engine=Ordinary')
|
ADDITIONAL_OPTIONS+=('--db-engine=Ordinary')
|
||||||
fi
|
fi
|
||||||
|
@ -58,6 +58,7 @@ if [[ -n "$BUGFIX_VALIDATE_CHECK" ]] && [[ "$BUGFIX_VALIDATE_CHECK" -eq 1 ]]; th
|
|||||||
|
|
||||||
# it contains some new settings, but we can safely remove it
|
# it contains some new settings, but we can safely remove it
|
||||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||||
|
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# For flaky check we also enable thread fuzzer
|
# For flaky check we also enable thread fuzzer
|
||||||
@ -216,11 +217,11 @@ export -f run_tests
|
|||||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||||
# We don't run tests with Ordinary database in PRs, only in master.
|
# We don't run tests with Ordinary database in PRs, only in master.
|
||||||
# So run new/changed tests with Ordinary at least once in flaky check.
|
# So run new/changed tests with Ordinary at least once in flaky check.
|
||||||
timeout "$MAX_RUN_TIME" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \
|
timeout_with_logging "$MAX_RUN_TIME" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \
|
||||||
| sed 's/All tests have finished//' | sed 's/No tests were run//' ||:
|
| sed 's/All tests have finished//' | sed 's/No tests were run//' ||:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
timeout "$MAX_RUN_TIME" bash -c run_tests ||:
|
timeout_with_logging "$MAX_RUN_TIME" bash -c run_tests ||:
|
||||||
|
|
||||||
echo "Files in current directory"
|
echo "Files in current directory"
|
||||||
ls -la ./
|
ls -la ./
|
||||||
|
@ -35,4 +35,17 @@ function fn_exists() {
|
|||||||
declare -F "$1" > /dev/null;
|
declare -F "$1" > /dev/null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function timeout_with_logging() {
|
||||||
|
local exit_code=0
|
||||||
|
|
||||||
|
timeout "${@}" || exit_code="${?}"
|
||||||
|
|
||||||
|
if [[ "${exit_code}" -eq "124" ]]
|
||||||
|
then
|
||||||
|
echo "The command 'timeout ${*}' has been killed by timeout"
|
||||||
|
fi
|
||||||
|
|
||||||
|
return $exit_code
|
||||||
|
}
|
||||||
|
|
||||||
# vi: ft=bash
|
# vi: ft=bash
|
||||||
|
26
docs/changelogs/v23.11.3.23-stable.md
Normal file
26
docs/changelogs/v23.11.3.23-stable.md
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.11.3.23-stable (a14ab450b0e) FIXME as compared to v23.11.2.11-stable (6e5411358c8)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix invalid memory access in BLAKE3 (Rust) [#57876](https://github.com/ClickHouse/ClickHouse/pull/57876) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Normalize function names in CREATE INDEX [#57906](https://github.com/ClickHouse/ClickHouse/pull/57906) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix handling of unavailable replicas before first request happened [#57933](https://github.com/ClickHouse/ClickHouse/pull/57933) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Revert "Fix bug window functions: revert [#39631](https://github.com/ClickHouse/ClickHouse/issues/39631)" [#58031](https://github.com/ClickHouse/ClickHouse/pull/58031) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
|
||||||
|
#### NO CL CATEGORY
|
||||||
|
|
||||||
|
* Backported in [#57918](https://github.com/ClickHouse/ClickHouse/issues/57918):. [#57909](https://github.com/ClickHouse/ClickHouse/pull/57909) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Remove heavy rust stable toolchain [#57905](https://github.com/ClickHouse/ClickHouse/pull/57905) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix docker image for integration tests (fixes CI) [#57952](https://github.com/ClickHouse/ClickHouse/pull/57952) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Always use `pread` for reading cache segments [#57970](https://github.com/ClickHouse/ClickHouse/pull/57970) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
|
@ -67,7 +67,6 @@ Engines in the family:
|
|||||||
Engines in the family:
|
Engines in the family:
|
||||||
|
|
||||||
- [Distributed](../../engines/table-engines/special/distributed.md#distributed)
|
- [Distributed](../../engines/table-engines/special/distributed.md#distributed)
|
||||||
- [MaterializedView](../../engines/table-engines/special/materializedview.md#materializedview)
|
|
||||||
- [Dictionary](../../engines/table-engines/special/dictionary.md#dictionary)
|
- [Dictionary](../../engines/table-engines/special/dictionary.md#dictionary)
|
||||||
- [Merge](../../engines/table-engines/special/merge.md#merge)
|
- [Merge](../../engines/table-engines/special/merge.md#merge)
|
||||||
- [File](../../engines/table-engines/special/file.md#file)
|
- [File](../../engines/table-engines/special/file.md#file)
|
||||||
|
@ -212,5 +212,5 @@ ORDER BY key ASC
|
|||||||
```
|
```
|
||||||
|
|
||||||
### More information on Joins
|
### More information on Joins
|
||||||
- [`join_algorithm` setting](/docs/en/operations/settings/settings.md#settings-join_algorithm)
|
- [`join_algorithm` setting](/docs/en/operations/settings/settings.md#join_algorithm)
|
||||||
- [JOIN clause](/docs/en/sql-reference/statements/select/join.md)
|
- [JOIN clause](/docs/en/sql-reference/statements/select/join.md)
|
||||||
|
@ -236,7 +236,7 @@ libhdfs3 support HDFS namenode HA.
|
|||||||
|
|
||||||
## Storage Settings {#storage-settings}
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ Optional parameters:
|
|||||||
|
|
||||||
- `kafka_schema` — Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
|
- `kafka_schema` — Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object.
|
||||||
- `kafka_num_consumers` — The number of consumers per table. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition, and must not be greater than the number of physical cores on the server where ClickHouse is deployed. Default: `1`.
|
- `kafka_num_consumers` — The number of consumers per table. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition, and must not be greater than the number of physical cores on the server where ClickHouse is deployed. Default: `1`.
|
||||||
- `kafka_max_block_size` — The maximum batch size (in messages) for poll. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size).
|
- `kafka_max_block_size` — The maximum batch size (in messages) for poll. Default: [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size).
|
||||||
- `kafka_skip_broken_messages` — Kafka message parser tolerance to schema-incompatible messages per block. If `kafka_skip_broken_messages = N` then the engine skips *N* Kafka messages that cannot be parsed (a message equals a row of data). Default: `0`.
|
- `kafka_skip_broken_messages` — Kafka message parser tolerance to schema-incompatible messages per block. If `kafka_skip_broken_messages = N` then the engine skips *N* Kafka messages that cannot be parsed (a message equals a row of data). Default: `0`.
|
||||||
- `kafka_commit_every_batch` — Commit every consumed and handled batch instead of a single commit after writing a whole block. Default: `0`.
|
- `kafka_commit_every_batch` — Commit every consumed and handled batch instead of a single commit after writing a whole block. Default: `0`.
|
||||||
- `kafka_client_id` — Client identifier. Empty by default.
|
- `kafka_client_id` — Client identifier. Empty by default.
|
||||||
@ -151,7 +151,7 @@ Example:
|
|||||||
|
|
||||||
SELECT level, sum(total) FROM daily GROUP BY level;
|
SELECT level, sum(total) FROM daily GROUP BY level;
|
||||||
```
|
```
|
||||||
To improve performance, received messages are grouped into blocks the size of [max_insert_block_size](../../../operations/settings/settings.md#settings-max_insert_block_size). If the block wasn’t formed within [stream_flush_interval_ms](../../../operations/settings/settings.md/#stream-flush-interval-ms) milliseconds, the data will be flushed to the table regardless of the completeness of the block.
|
To improve performance, received messages are grouped into blocks the size of [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size). If the block wasn’t formed within [stream_flush_interval_ms](../../../operations/settings/settings.md/#stream-flush-interval-ms) milliseconds, the data will be flushed to the table regardless of the completeness of the block.
|
||||||
|
|
||||||
To stop receiving topic data or to change the conversion logic, detach the materialized view:
|
To stop receiving topic data or to change the conversion logic, detach the materialized view:
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ Optional parameters:
|
|||||||
- `nats_reconnect_wait` – Amount of time in milliseconds to sleep between each reconnect attempt. Default: `5000`.
|
- `nats_reconnect_wait` – Amount of time in milliseconds to sleep between each reconnect attempt. Default: `5000`.
|
||||||
- `nats_server_list` - Server list for connection. Can be specified to connect to NATS cluster.
|
- `nats_server_list` - Server list for connection. Can be specified to connect to NATS cluster.
|
||||||
- `nats_skip_broken_messages` - NATS message parser tolerance to schema-incompatible messages per block. Default: `0`. If `nats_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data).
|
- `nats_skip_broken_messages` - NATS message parser tolerance to schema-incompatible messages per block. Default: `0`. If `nats_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data).
|
||||||
- `nats_max_block_size` - Number of row collected by poll(s) for flushing data from NATS. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size).
|
- `nats_max_block_size` - Number of row collected by poll(s) for flushing data from NATS. Default: [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size).
|
||||||
- `nats_flush_interval_ms` - Timeout for flushing data read from NATS. Default: [stream_flush_interval_ms](../../../operations/settings/settings.md#stream-flush-interval-ms).
|
- `nats_flush_interval_ms` - Timeout for flushing data read from NATS. Default: [stream_flush_interval_ms](../../../operations/settings/settings.md#stream-flush-interval-ms).
|
||||||
- `nats_username` - NATS username.
|
- `nats_username` - NATS username.
|
||||||
- `nats_password` - NATS password.
|
- `nats_password` - NATS password.
|
||||||
|
@ -65,7 +65,7 @@ Optional parameters:
|
|||||||
- `rabbitmq_deadletter_exchange` - Specify name for a [dead letter exchange](https://www.rabbitmq.com/dlx.html). You can create another table with this exchange name and collect messages in cases when they are republished to dead letter exchange. By default dead letter exchange is not specified.
|
- `rabbitmq_deadletter_exchange` - Specify name for a [dead letter exchange](https://www.rabbitmq.com/dlx.html). You can create another table with this exchange name and collect messages in cases when they are republished to dead letter exchange. By default dead letter exchange is not specified.
|
||||||
- `rabbitmq_persistent` - If set to 1 (true), in insert query delivery mode will be set to 2 (marks messages as 'persistent'). Default: `0`.
|
- `rabbitmq_persistent` - If set to 1 (true), in insert query delivery mode will be set to 2 (marks messages as 'persistent'). Default: `0`.
|
||||||
- `rabbitmq_skip_broken_messages` – RabbitMQ message parser tolerance to schema-incompatible messages per block. If `rabbitmq_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data). Default: `0`.
|
- `rabbitmq_skip_broken_messages` – RabbitMQ message parser tolerance to schema-incompatible messages per block. If `rabbitmq_skip_broken_messages = N` then the engine skips *N* RabbitMQ messages that cannot be parsed (a message equals a row of data). Default: `0`.
|
||||||
- `rabbitmq_max_block_size` - Number of row collected before flushing data from RabbitMQ. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size).
|
- `rabbitmq_max_block_size` - Number of row collected before flushing data from RabbitMQ. Default: [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size).
|
||||||
- `rabbitmq_flush_interval_ms` - Timeout for flushing data from RabbitMQ. Default: [stream_flush_interval_ms](../../../operations/settings/settings.md#stream-flush-interval-ms).
|
- `rabbitmq_flush_interval_ms` - Timeout for flushing data from RabbitMQ. Default: [stream_flush_interval_ms](../../../operations/settings/settings.md#stream-flush-interval-ms).
|
||||||
- `rabbitmq_queue_settings_list` - allows to set RabbitMQ settings when creating a queue. Available settings: `x-max-length`, `x-max-length-bytes`, `x-message-ttl`, `x-expires`, `x-priority`, `x-max-priority`, `x-overflow`, `x-dead-letter-exchange`, `x-queue-type`. The `durable` setting is enabled automatically for the queue.
|
- `rabbitmq_queue_settings_list` - allows to set RabbitMQ settings when creating a queue. Available settings: `x-max-length`, `x-max-length-bytes`, `x-message-ttl`, `x-expires`, `x-priority`, `x-max-priority`, `x-overflow`, `x-dead-letter-exchange`, `x-queue-type`. The `durable` setting is enabled automatically for the queue.
|
||||||
- `rabbitmq_address` - Address for connection. Use ether this setting or `rabbitmq_host_port`.
|
- `rabbitmq_address` - Address for connection. Use ether this setting or `rabbitmq_host_port`.
|
||||||
|
@ -222,7 +222,7 @@ CREATE TABLE table_with_asterisk (name String, value UInt32)
|
|||||||
|
|
||||||
## Storage Settings {#storage-settings}
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ In most cases you do not need a partition key, and in most other cases you do no
|
|||||||
You should never use too granular of partitioning. Don't partition your data by client identifiers or names. Instead, make a client identifier or name the first column in the ORDER BY expression.
|
You should never use too granular of partitioning. Don't partition your data by client identifiers or names. Instead, make a client identifier or name the first column in the ORDER BY expression.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
Partitioning is available for the [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) family tables (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables). [Materialized views](../../../engines/table-engines/special/materializedview.md#materializedview) based on MergeTree tables support partitioning, as well.
|
Partitioning is available for the [MergeTree family tables](../../../engines/table-engines/mergetree-family/mergetree.md), including [replicated tables](../../../engines/table-engines/mergetree-family/replication.md) and [materialized views](../../../sql-reference/statements/create/view.md#materialized-view).
|
||||||
|
|
||||||
A partition is a logical combination of records in a table by a specified criterion. You can set a partition by an arbitrary criterion, such as by month, by day, or by event type. Each partition is stored separately to simplify manipulations of this data. When accessing the data, ClickHouse uses the smallest subset of partitions possible. Partitions improve performance for queries containing a partitioning key because ClickHouse will filter for that partition before selecting the parts and granules within the partition.
|
A partition is a logical combination of records in a table by a specified criterion. You can set a partition by an arbitrary criterion, such as by month, by day, or by event type. Each partition is stored separately to simplify manipulations of this data. When accessing the data, ClickHouse uses the smallest subset of partitions possible. Partitions improve performance for queries containing a partitioning key because ClickHouse will filter for that partition before selecting the parts and granules within the partition.
|
||||||
|
|
||||||
|
@ -112,7 +112,7 @@ Specifying the `sharding_key` is necessary for the following:
|
|||||||
For **Insert limit settings** (`..._insert`) see also:
|
For **Insert limit settings** (`..._insert`) see also:
|
||||||
|
|
||||||
- [distributed_foreground_insert](../../../operations/settings/settings.md#distributed_foreground_insert) setting
|
- [distributed_foreground_insert](../../../operations/settings/settings.md#distributed_foreground_insert) setting
|
||||||
- [prefer_localhost_replica](../../../operations/settings/settings.md#settings-prefer-localhost-replica) setting
|
- [prefer_localhost_replica](../../../operations/settings/settings.md#prefer-localhost-replica) setting
|
||||||
- `bytes_to_throw_insert` handled before `bytes_to_delay_insert`, so you should not set it to the value less then `bytes_to_delay_insert`
|
- `bytes_to_throw_insert` handled before `bytes_to_delay_insert`, so you should not set it to the value less then `bytes_to_delay_insert`
|
||||||
:::
|
:::
|
||||||
|
|
||||||
@ -198,7 +198,7 @@ The parameters `host`, `port`, and optionally `user`, `password`, `secure`, `com
|
|||||||
- `secure` - Whether to use a secure SSL/TLS connection. Usually also requires specifying the port (the default secure port is `9440`). The server should listen on `<tcp_port_secure>9440</tcp_port_secure>` and be configured with correct certificates.
|
- `secure` - Whether to use a secure SSL/TLS connection. Usually also requires specifying the port (the default secure port is `9440`). The server should listen on `<tcp_port_secure>9440</tcp_port_secure>` and be configured with correct certificates.
|
||||||
- `compression` - Use data compression. Default value: `true`.
|
- `compression` - Use data compression. Default value: `true`.
|
||||||
|
|
||||||
When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [load_balancing](../../../operations/settings/settings.md#settings-load_balancing) setting. If the connection with the server is not established, there will be an attempt to connect with a short timeout. If the connection failed, the next replica will be selected, and so on for all the replicas. If the connection attempt failed for all the replicas, the attempt will be repeated the same way, several times. This works in favour of resiliency, but does not provide complete fault tolerance: a remote server might accept the connection, but might not work, or work poorly.
|
When specifying replicas, one of the available replicas will be selected for each of the shards when reading. You can configure the algorithm for load balancing (the preference for which replica to access) – see the [load_balancing](../../../operations/settings/settings.md#load_balancing) setting. If the connection with the server is not established, there will be an attempt to connect with a short timeout. If the connection failed, the next replica will be selected, and so on for all the replicas. If the connection attempt failed for all the replicas, the attempt will be repeated the same way, several times. This works in favour of resiliency, but does not provide complete fault tolerance: a remote server might accept the connection, but might not work, or work poorly.
|
||||||
|
|
||||||
You can specify just one of the shards (in this case, query processing should be called remote, rather than distributed) or up to any number of shards. In each shard, you can specify from one to any number of replicas. You can specify a different number of replicas for each shard.
|
You can specify just one of the shards (in this case, query processing should be called remote, rather than distributed) or up to any number of shards. In each shard, you can specify from one to any number of replicas. You can specify a different number of replicas for each shard.
|
||||||
|
|
||||||
@ -243,7 +243,7 @@ If the server ceased to exist or had a rough restart (for example, due to a hard
|
|||||||
|
|
||||||
When querying a `Distributed` table, `SELECT` queries are sent to all shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you do not have to transfer old data into it. Instead, you can write new data to it by using a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently.
|
When querying a `Distributed` table, `SELECT` queries are sent to all shards and work regardless of how data is distributed across the shards (they can be distributed completely randomly). When you add a new shard, you do not have to transfer old data into it. Instead, you can write new data to it by using a heavier weight – the data will be distributed slightly unevenly, but queries will work correctly and efficiently.
|
||||||
|
|
||||||
When the `max_parallel_replicas` option is enabled, query processing is parallelized across all replicas within a single shard. For more information, see the section [max_parallel_replicas](../../../operations/settings/settings.md#settings-max_parallel_replicas).
|
When the `max_parallel_replicas` option is enabled, query processing is parallelized across all replicas within a single shard. For more information, see the section [max_parallel_replicas](../../../operations/settings/settings.md#max_parallel_replicas).
|
||||||
|
|
||||||
To learn more about how distributed `in` and `global in` queries are processed, refer to [this](../../../sql-reference/operators/in.md#select-distributed-subqueries) documentation.
|
To learn more about how distributed `in` and `global in` queries are processed, refer to [this](../../../sql-reference/operators/in.md#select-distributed-subqueries) documentation.
|
||||||
|
|
||||||
|
@ -101,8 +101,8 @@ For partitioning by month, use the `toYYYYMM(date_column)` expression, where `da
|
|||||||
|
|
||||||
## Settings {#settings}
|
## Settings {#settings}
|
||||||
|
|
||||||
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
|
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-empty_if-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
|
||||||
- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-empty_if-not-exists) - method of reading data from storage file, one of: `read`, `pread`, `mmap`. The mmap method does not apply to clickhouse-server (it's intended for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
||||||
|
@ -41,7 +41,7 @@ Optional parameters:
|
|||||||
|
|
||||||
- `poll_timeout_ms` - Timeout for single poll from log file. Default: [stream_poll_timeout_ms](../../../operations/settings/settings.md#stream_poll_timeout_ms).
|
- `poll_timeout_ms` - Timeout for single poll from log file. Default: [stream_poll_timeout_ms](../../../operations/settings/settings.md#stream_poll_timeout_ms).
|
||||||
- `poll_max_batch_size` — Maximum amount of records to be polled in a single poll. Default: [max_block_size](../../../operations/settings/settings.md#setting-max_block_size).
|
- `poll_max_batch_size` — Maximum amount of records to be polled in a single poll. Default: [max_block_size](../../../operations/settings/settings.md#setting-max_block_size).
|
||||||
- `max_block_size` — The maximum batch size (in records) for poll. Default: [max_insert_block_size](../../../operations/settings/settings.md#setting-max_insert_block_size).
|
- `max_block_size` — The maximum batch size (in records) for poll. Default: [max_insert_block_size](../../../operations/settings/settings.md#max_insert_block_size).
|
||||||
- `max_threads` - Number of max threads to parse files, default is 0, which means the number will be max(1, physical_cpu_cores / 4).
|
- `max_threads` - Number of max threads to parse files, default is 0, which means the number will be max(1, physical_cpu_cores / 4).
|
||||||
- `poll_directory_watch_events_backoff_init` - The initial sleep value for watch directory thread. Default: `500`.
|
- `poll_directory_watch_events_backoff_init` - The initial sleep value for watch directory thread. Default: `500`.
|
||||||
- `poll_directory_watch_events_backoff_max` - The max sleep value for watch directory thread. Default: `32000`.
|
- `poll_directory_watch_events_backoff_max` - The max sleep value for watch directory thread. Default: `32000`.
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
slug: /en/engines/table-engines/special/materializedview
|
|
||||||
sidebar_position: 100
|
|
||||||
sidebar_label: MaterializedView
|
|
||||||
---
|
|
||||||
|
|
||||||
# MaterializedView Table Engine
|
|
||||||
|
|
||||||
Used for implementing materialized views (for more information, see [CREATE VIEW](../../../sql-reference/statements/create/view.md#materialized)). For storing data, it uses a different engine that was specified when creating the view. When reading from a table, it just uses that engine.
|
|
@ -167,7 +167,7 @@ For successful requests that do not return a data table, an empty response body
|
|||||||
|
|
||||||
You can use compression to reduce network traffic when transmitting a large amount of data or for creating dumps that are immediately compressed.
|
You can use compression to reduce network traffic when transmitting a large amount of data or for creating dumps that are immediately compressed.
|
||||||
|
|
||||||
You can use the internal ClickHouse compression format when transmitting data. The compressed data has a non-standard format, and you need `clickhouse-compressor` program to work with it. It is installed with the `clickhouse-client` package. To increase the efficiency of data insertion, you can disable server-side checksum verification by using the [http_native_compression_disable_checksumming_on_decompress](../operations/settings/settings.md#settings-http_native_compression_disable_checksumming_on_decompress) setting.
|
You can use the internal ClickHouse compression format when transmitting data. The compressed data has a non-standard format, and you need `clickhouse-compressor` program to work with it. It is installed with the `clickhouse-client` package. To increase the efficiency of data insertion, you can disable server-side checksum verification by using the [http_native_compression_disable_checksumming_on_decompress](../operations/settings/settings.md#http_native_compression_disable_checksumming_on_decompress) setting.
|
||||||
|
|
||||||
If you specify `compress=1` in the URL, the server will compress the data it sends to you. If you specify `decompress=1` in the URL, the server will decompress the data which you pass in the `POST` method.
|
If you specify `compress=1` in the URL, the server will compress the data it sends to you. If you specify `decompress=1` in the URL, the server will decompress the data which you pass in the `POST` method.
|
||||||
|
|
||||||
@ -183,7 +183,7 @@ You can also choose to use [HTTP compression](https://en.wikipedia.org/wiki/HTTP
|
|||||||
- `snappy`
|
- `snappy`
|
||||||
|
|
||||||
To send a compressed `POST` request, append the request header `Content-Encoding: compression_method`.
|
To send a compressed `POST` request, append the request header `Content-Encoding: compression_method`.
|
||||||
In order for ClickHouse to compress the response, enable compression with [enable_http_compression](../operations/settings/settings.md#settings-enable_http_compression) setting and append `Accept-Encoding: compression_method` header to the request. You can configure the data compression level in the [http_zlib_compression_level](../operations/settings/settings.md#settings-http_zlib_compression_level) setting for all compression methods.
|
In order for ClickHouse to compress the response, enable compression with [enable_http_compression](../operations/settings/settings.md#enable_http_compression) setting and append `Accept-Encoding: compression_method` header to the request. You can configure the data compression level in the [http_zlib_compression_level](../operations/settings/settings.md#http_zlib_compression_level) setting for all compression methods.
|
||||||
|
|
||||||
:::info
|
:::info
|
||||||
Some HTTP clients might decompress data from the server by default (with `gzip` and `deflate`) and you might get decompressed data even if you use the compression settings correctly.
|
Some HTTP clients might decompress data from the server by default (with `gzip` and `deflate`) and you might get decompressed data even if you use the compression settings correctly.
|
||||||
@ -285,7 +285,7 @@ For information about other parameters, see the section “SET”.
|
|||||||
|
|
||||||
Similarly, you can use ClickHouse sessions in the HTTP protocol. To do this, you need to add the `session_id` GET parameter to the request. You can use any string as the session ID. By default, the session is terminated after 60 seconds of inactivity. To change this timeout, modify the `default_session_timeout` setting in the server configuration, or add the `session_timeout` GET parameter to the request. To check the session status, use the `session_check=1` parameter. Only one query at a time can be executed within a single session.
|
Similarly, you can use ClickHouse sessions in the HTTP protocol. To do this, you need to add the `session_id` GET parameter to the request. You can use any string as the session ID. By default, the session is terminated after 60 seconds of inactivity. To change this timeout, modify the `default_session_timeout` setting in the server configuration, or add the `session_timeout` GET parameter to the request. To check the session status, use the `session_check=1` parameter. Only one query at a time can be executed within a single session.
|
||||||
|
|
||||||
You can receive information about the progress of a query in `X-ClickHouse-Progress` response headers. To do this, enable [send_progress_in_http_headers](../operations/settings/settings.md#settings-send_progress_in_http_headers). Example of the header sequence:
|
You can receive information about the progress of a query in `X-ClickHouse-Progress` response headers. To do this, enable [send_progress_in_http_headers](../operations/settings/settings.md#send_progress_in_http_headers). Example of the header sequence:
|
||||||
|
|
||||||
``` text
|
``` text
|
||||||
X-ClickHouse-Progress: {"read_rows":"2752512","read_bytes":"240570816","total_rows_to_read":"8880128","elapsed_ns":"662334"}
|
X-ClickHouse-Progress: {"read_rows":"2752512","read_bytes":"240570816","total_rows_to_read":"8880128","elapsed_ns":"662334"}
|
||||||
@ -496,7 +496,7 @@ Next are the configuration methods for different `type`.
|
|||||||
|
|
||||||
`query` value is a predefined query of `predefined_query_handler`, which is executed by ClickHouse when an HTTP request is matched and the result of the query is returned. It is a must configuration.
|
`query` value is a predefined query of `predefined_query_handler`, which is executed by ClickHouse when an HTTP request is matched and the result of the query is returned. It is a must configuration.
|
||||||
|
|
||||||
The following example defines the values of [max_threads](../operations/settings/settings.md#settings-max_threads) and `max_final_threads` settings, then queries the system table to check whether these settings were set successfully.
|
The following example defines the values of [max_threads](../operations/settings/settings.md#max_threads) and `max_final_threads` settings, then queries the system table to check whether these settings were set successfully.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
To keep the default `handlers` such as` query`, `play`,` ping`, add the `<defaults/>` rule.
|
To keep the default `handlers` such as` query`, `play`,` ping`, add the `<defaults/>` rule.
|
||||||
@ -539,7 +539,7 @@ In `dynamic_query_handler`, the query is written in the form of parameter of the
|
|||||||
|
|
||||||
ClickHouse extracts and executes the value corresponding to the `query_param_name` value in the URL of the HTTP request. The default value of `query_param_name` is `/query` . It is an optional configuration. If there is no definition in the configuration file, the parameter is not passed in.
|
ClickHouse extracts and executes the value corresponding to the `query_param_name` value in the URL of the HTTP request. The default value of `query_param_name` is `/query` . It is an optional configuration. If there is no definition in the configuration file, the parameter is not passed in.
|
||||||
|
|
||||||
To experiment with this functionality, the example defines the values of [max_threads](../operations/settings/settings.md#settings-max_threads) and `max_final_threads` and `queries` whether the settings were set successfully.
|
To experiment with this functionality, the example defines the values of [max_threads](../operations/settings/settings.md#max_threads) and `max_final_threads` and `queries` whether the settings were set successfully.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
|
@ -25,6 +25,7 @@ ClickHouse server provides embedded visual interfaces for power users:
|
|||||||
|
|
||||||
- Play UI: open `/play` in the browser;
|
- Play UI: open `/play` in the browser;
|
||||||
- Advanced Dashboard: open `/dashboard` in the browser;
|
- Advanced Dashboard: open `/dashboard` in the browser;
|
||||||
|
- Binary symbols viewer for ClickHouse engineers: open `/binary` in the browser;
|
||||||
|
|
||||||
There are also a wide range of third-party libraries for working with ClickHouse:
|
There are also a wide range of third-party libraries for working with ClickHouse:
|
||||||
|
|
||||||
|
@ -64,4 +64,4 @@ You can configure ClickHouse to export metrics to [Prometheus](https://prometheu
|
|||||||
|
|
||||||
Additionally, you can monitor server availability through the HTTP API. Send the `HTTP GET` request to `/ping`. If the server is available, it responds with `200 OK`.
|
Additionally, you can monitor server availability through the HTTP API. Send the `HTTP GET` request to `/ping`. If the server is available, it responds with `200 OK`.
|
||||||
|
|
||||||
To monitor servers in a cluster configuration, you should set the [max_replica_delay_for_distributed_queries](../operations/settings/settings.md#settings-max_replica_delay_for_distributed_queries) parameter and use the HTTP resource `/replicas_status`. A request to `/replicas_status` returns `200 OK` if the replica is available and is not delayed behind the other replicas. If a replica is delayed, it returns `503 HTTP_SERVICE_UNAVAILABLE` with information about the gap.
|
To monitor servers in a cluster configuration, you should set the [max_replica_delay_for_distributed_queries](../operations/settings/settings.md#max_replica_delay_for_distributed_queries) parameter and use the HTTP resource `/replicas_status`. A request to `/replicas_status` returns `200 OK` if the replica is available and is not delayed behind the other replicas. If a replica is delayed, it returns `503 HTTP_SERVICE_UNAVAILABLE` with information about the gap.
|
||||||
|
@ -42,7 +42,7 @@ To analyze the `trace_log` system table:
|
|||||||
|
|
||||||
- Install the `clickhouse-common-static-dbg` package. See [Install from DEB Packages](../../getting-started/install.md#install-from-deb-packages).
|
- Install the `clickhouse-common-static-dbg` package. See [Install from DEB Packages](../../getting-started/install.md#install-from-deb-packages).
|
||||||
|
|
||||||
- Allow introspection functions by the [allow_introspection_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) setting.
|
- Allow introspection functions by the [allow_introspection_functions](../../operations/settings/settings.md#allow_introspection_functions) setting.
|
||||||
|
|
||||||
For security reasons, introspection functions are disabled by default.
|
For security reasons, introspection functions are disabled by default.
|
||||||
|
|
||||||
|
@ -29,6 +29,10 @@ Transactionally inconsistent caching is traditionally provided by client tools o
|
|||||||
the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side.
|
the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side.
|
||||||
This reduces maintenance effort and avoids redundancy.
|
This reduces maintenance effort and avoids redundancy.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
Security consideration: The cached query result is tied to the user executing it. Authorization checks are performed when the query is executed. This means that if there are any alterations to the user's role or permissions between the time the query is cached and when the cache is accessed, the result will not reflect these changes. We recommend using different users to distinguish between different levels of access, instead of actively toggling roles for a single user between queries, as this practice may lead to unexpected query results.
|
||||||
|
:::
|
||||||
|
|
||||||
## Configuration Settings and Usage
|
## Configuration Settings and Usage
|
||||||
|
|
||||||
Setting [use_query_cache](settings/settings.md#use-query-cache) can be used to control whether a specific query or all queries of the
|
Setting [use_query_cache](settings/settings.md#use-query-cache) can be used to control whether a specific query or all queries of the
|
||||||
@ -99,7 +103,7 @@ It is also possible to limit the cache usage of individual users using [settings
|
|||||||
constraints](settings/constraints-on-settings.md). More specifically, you can restrict the maximum amount of memory (in bytes) a user may
|
constraints](settings/constraints-on-settings.md). More specifically, you can restrict the maximum amount of memory (in bytes) a user may
|
||||||
allocate in the query cache and the maximum number of stored query results. For that, first provide configurations
|
allocate in the query cache and the maximum number of stored query results. For that, first provide configurations
|
||||||
[query_cache_max_size_in_bytes](settings/settings.md#query-cache-max-size-in-bytes) and
|
[query_cache_max_size_in_bytes](settings/settings.md#query-cache-max-size-in-bytes) and
|
||||||
[query_cache_max_entries](settings/settings.md#query-cache-size-max-entries) in a user profile in `users.xml`, then make both settings
|
[query_cache_max_entries](settings/settings.md#query-cache-max-entries) in a user profile in `users.xml`, then make both settings
|
||||||
readonly:
|
readonly:
|
||||||
|
|
||||||
``` xml
|
``` xml
|
||||||
@ -140,7 +144,7 @@ value can be specified at session, profile or query level using setting [query_c
|
|||||||
Entries in the query cache are compressed by default. This reduces the overall memory consumption at the cost of slower writes into / reads
|
Entries in the query cache are compressed by default. This reduces the overall memory consumption at the cost of slower writes into / reads
|
||||||
from the query cache. To disable compression, use setting [query_cache_compress_entries](settings/settings.md#query-cache-compress-entries).
|
from the query cache. To disable compression, use setting [query_cache_compress_entries](settings/settings.md#query-cache-compress-entries).
|
||||||
|
|
||||||
ClickHouse reads table data in blocks of [max_block_size](settings/settings.md#settings-max_block_size) rows. Due to filtering, aggregation,
|
ClickHouse reads table data in blocks of [max_block_size](settings/settings.md#setting-max_block_size) rows. Due to filtering, aggregation,
|
||||||
etc., result blocks are typically much smaller than 'max_block_size' but there are also cases where they are much bigger. Setting
|
etc., result blocks are typically much smaller than 'max_block_size' but there are also cases where they are much bigger. Setting
|
||||||
[query_cache_squash_partial_results](settings/settings.md#query-cache-squash-partial-results) (enabled by default) controls if result blocks
|
[query_cache_squash_partial_results](settings/settings.md#query-cache-squash-partial-results) (enabled by default) controls if result blocks
|
||||||
are squashed (if they are tiny) or split (if they are large) into blocks of 'max_block_size' size before insertion into the query result
|
are squashed (if they are tiny) or split (if they are large) into blocks of 'max_block_size' size before insertion into the query result
|
||||||
|
@ -2009,7 +2009,7 @@ Data for the query cache is allocated in DRAM. If memory is scarce, make sure to
|
|||||||
|
|
||||||
## query_thread_log {#query_thread_log}
|
## query_thread_log {#query_thread_log}
|
||||||
|
|
||||||
Setting for logging threads of queries received with the [log_query_threads=1](../../operations/settings/settings.md#settings-log-query-threads) setting.
|
Setting for logging threads of queries received with the [log_query_threads=1](../../operations/settings/settings.md#log-query-threads) setting.
|
||||||
|
|
||||||
Queries are logged in the [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below).
|
Queries are logged in the [system.query_thread_log](../../operations/system-tables/query_thread_log.md#system_tables-query_thread_log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below).
|
||||||
|
|
||||||
@ -2051,7 +2051,7 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
|||||||
|
|
||||||
## query_views_log {#query_views_log}
|
## query_views_log {#query_views_log}
|
||||||
|
|
||||||
Setting for logging views (live, materialized etc) dependant of queries received with the [log_query_views=1](../../operations/settings/settings.md#settings-log-query-views) setting.
|
Setting for logging views (live, materialized etc) dependant of queries received with the [log_query_views=1](../../operations/settings/settings.md#log-query-views) setting.
|
||||||
|
|
||||||
Queries are logged in the [system.query_views_log](../../operations/system-tables/query_views_log.md#system_tables-query_views_log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below).
|
Queries are logged in the [system.query_views_log](../../operations/system-tables/query_views_log.md#system_tables-query_views_log) table, not in a separate file. You can change the name of the table in the `table` parameter (see below).
|
||||||
|
|
||||||
@ -2331,7 +2331,7 @@ For the value of the `incl` attribute, see the section “[Configuration files](
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [skip_unavailable_shards](../../operations/settings/settings.md#settings-skip_unavailable_shards)
|
- [skip_unavailable_shards](../../operations/settings/settings.md#skip_unavailable_shards)
|
||||||
- [Cluster Discovery](../../operations/cluster-discovery.md)
|
- [Cluster Discovery](../../operations/cluster-discovery.md)
|
||||||
- [Replicated database engine](../../engines/database-engines/replicated.md)
|
- [Replicated database engine](../../engines/database-engines/replicated.md)
|
||||||
|
|
||||||
|
@ -139,7 +139,7 @@ Limit on the number of bytes in the result. The same as the previous setting.
|
|||||||
|
|
||||||
What to do if the volume of the result exceeds one of the limits: ‘throw’ or ‘break’. By default, throw.
|
What to do if the volume of the result exceeds one of the limits: ‘throw’ or ‘break’. By default, throw.
|
||||||
|
|
||||||
Using ‘break’ is similar to using LIMIT. `Break` interrupts execution only at the block level. This means that amount of returned rows is greater than [max_result_rows](#setting-max_result_rows), multiple of [max_block_size](../../operations/settings/settings.md#setting-max_block_size) and depends on [max_threads](../../operations/settings/settings.md#settings-max_threads).
|
Using ‘break’ is similar to using LIMIT. `Break` interrupts execution only at the block level. This means that amount of returned rows is greater than [max_result_rows](#setting-max_result_rows), multiple of [max_block_size](../../operations/settings/settings.md#setting-max_block_size) and depends on [max_threads](../../operations/settings/settings.md#max_threads).
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
|
@ -460,6 +460,12 @@ Possible values:
|
|||||||
|
|
||||||
Default value: 1048576.
|
Default value: 1048576.
|
||||||
|
|
||||||
|
## http_make_head_request {#http-make-head-request}
|
||||||
|
|
||||||
|
The `http_make_head_request` setting allows the execution of a `HEAD` request while reading data from HTTP to retrieve information about the file to be read, such as its size. Since it's enabled by default, it may be desirable to disable this setting in cases where the server does not support `HEAD` requests.
|
||||||
|
|
||||||
|
Default value: `true`.
|
||||||
|
|
||||||
## table_function_remote_max_addresses {#table_function_remote_max_addresses}
|
## table_function_remote_max_addresses {#table_function_remote_max_addresses}
|
||||||
|
|
||||||
Sets the maximum number of addresses generated from patterns for the [remote](../../sql-reference/table-functions/remote.md) function.
|
Sets the maximum number of addresses generated from patterns for the [remote](../../sql-reference/table-functions/remote.md) function.
|
||||||
@ -1710,7 +1716,7 @@ Default value: `1`
|
|||||||
|
|
||||||
## query_cache_squash_partial_results {#query-cache-squash-partial-results}
|
## query_cache_squash_partial_results {#query-cache-squash-partial-results}
|
||||||
|
|
||||||
Squash partial result blocks to blocks of size [max_block_size](#setting-max_block_size). Reduces performance of inserts into the [query cache](../query-cache.md) but improves the compressability of cache entries (see [query_cache_compress-entries](#query_cache_compress_entries)).
|
Squash partial result blocks to blocks of size [max_block_size](#setting-max_block_size). Reduces performance of inserts into the [query cache](../query-cache.md) but improves the compressability of cache entries (see [query_cache_compress-entries](#query-cache-compress-entries)).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -2480,7 +2486,7 @@ See also:
|
|||||||
- [load_balancing](#load_balancing-round_robin)
|
- [load_balancing](#load_balancing-round_robin)
|
||||||
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
||||||
- [distributed_replica_error_cap](#distributed_replica_error_cap)
|
- [distributed_replica_error_cap](#distributed_replica_error_cap)
|
||||||
- [distributed_replica_error_half_life](#settings-distributed_replica_error_half_life)
|
- [distributed_replica_error_half_life](#distributed_replica_error_half_life)
|
||||||
|
|
||||||
## distributed_background_insert_sleep_time_ms {#distributed_background_insert_sleep_time_ms}
|
## distributed_background_insert_sleep_time_ms {#distributed_background_insert_sleep_time_ms}
|
||||||
|
|
||||||
@ -4158,6 +4164,41 @@ Result:
|
|||||||
└─────┴─────┴───────┘
|
└─────┴─────┴───────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## enable_order_by_all {#enable-order-by-all}
|
||||||
|
|
||||||
|
Enables or disables sorting by `ALL` columns, i.e. [ORDER BY](../../sql-reference/statements/select/order-by.md)
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- 0 — Disable ORDER BY ALL.
|
||||||
|
- 1 — Enable ORDER BY ALL.
|
||||||
|
|
||||||
|
Default value: `1`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE TAB(C1 Int, C2 Int, ALL Int) ENGINE=Memory();
|
||||||
|
|
||||||
|
INSERT INTO TAB VALUES (10, 20, 30), (20, 20, 10), (30, 10, 20);
|
||||||
|
|
||||||
|
SELECT * FROM TAB ORDER BY ALL; -- returns an error that ALL is ambiguous
|
||||||
|
|
||||||
|
SELECT * FROM TAB ORDER BY ALL SETTINGS enable_order_by_all;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─C1─┬─C2─┬─ALL─┐
|
||||||
|
│ 20 │ 20 │ 10 │
|
||||||
|
│ 30 │ 10 │ 20 │
|
||||||
|
│ 10 │ 20 │ 30 │
|
||||||
|
└────┴────┴─────┘
|
||||||
|
```
|
||||||
|
|
||||||
## splitby_max_substrings_includes_remaining_string {#splitby_max_substrings_includes_remaining_string}
|
## splitby_max_substrings_includes_remaining_string {#splitby_max_substrings_includes_remaining_string}
|
||||||
|
|
||||||
Controls whether function [splitBy*()](../../sql-reference/functions/splitting-merging-functions.md) with argument `max_substrings` > 0 will include the remaining string in the last element of the result array.
|
Controls whether function [splitBy*()](../../sql-reference/functions/splitting-merging-functions.md) with argument `max_substrings` > 0 will include the remaining string in the last element of the result array.
|
||||||
@ -4674,7 +4715,7 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `false`.
|
Default value: `false`.
|
||||||
|
|
||||||
## rename_files_after_processing
|
## rename_files_after_processing {#rename_files_after_processing}
|
||||||
|
|
||||||
- **Type:** String
|
- **Type:** String
|
||||||
|
|
||||||
@ -5093,3 +5134,25 @@ When set to `true` than for all s3 requests first two attempts are made with low
|
|||||||
When set to `false` than all attempts are made with identical timeouts.
|
When set to `false` than all attempts are made with identical timeouts.
|
||||||
|
|
||||||
Default value: `true`.
|
Default value: `true`.
|
||||||
|
|
||||||
|
## max_partition_size_to_drop
|
||||||
|
|
||||||
|
Restriction on dropping partitions in query time.
|
||||||
|
|
||||||
|
Default value: 50 GB.
|
||||||
|
The value 0 means that you can drop partitions without any restrictions.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
This query setting overwrites its server setting equivalent, see [max_partition_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-partition-size-to-drop)
|
||||||
|
:::
|
||||||
|
|
||||||
|
## max_table_size_to_drop
|
||||||
|
|
||||||
|
Restriction on deleting tables in query time.
|
||||||
|
|
||||||
|
Default value: 50 GB.
|
||||||
|
The value 0 means that you can delete all tables without any restrictions.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
This query setting overwrites its server setting equivalent, see [max_table_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-table-size-to-drop)
|
||||||
|
:::
|
@ -239,6 +239,10 @@ The amount of virtual memory mapped for the pages of machine code of the server
|
|||||||
|
|
||||||
The amount of virtual memory mapped for the use of stack and for the allocated memory, in bytes. It is unspecified whether it includes the per-thread stacks and most of the allocated memory, that is allocated with the 'mmap' system call. This metric exists only for completeness reasons. I recommend to use the `MemoryResident` metric for monitoring.
|
The amount of virtual memory mapped for the use of stack and for the allocated memory, in bytes. It is unspecified whether it includes the per-thread stacks and most of the allocated memory, that is allocated with the 'mmap' system call. This metric exists only for completeness reasons. I recommend to use the `MemoryResident` metric for monitoring.
|
||||||
|
|
||||||
|
### MemoryResidentMax
|
||||||
|
|
||||||
|
Maximum amount of physical memory used by the server process, in bytes.
|
||||||
|
|
||||||
### MemoryResident
|
### MemoryResident
|
||||||
|
|
||||||
The amount of physical memory used by the server process, in bytes.
|
The amount of physical memory used by the server process, in bytes.
|
||||||
|
@ -78,5 +78,5 @@ is_active: NULL
|
|||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
- [Table engine Distributed](../../engines/table-engines/special/distributed.md)
|
||||||
- [distributed_replica_error_cap setting](../../operations/settings/settings.md#settings-distributed_replica_error_cap)
|
- [distributed_replica_error_cap setting](../../operations/settings/settings.md#distributed_replica_error_cap)
|
||||||
- [distributed_replica_error_half_life setting](../../operations/settings/settings.md#settings-distributed_replica_error_half_life)
|
- [distributed_replica_error_half_life setting](../../operations/settings/settings.md#distributed_replica_error_half_life)
|
||||||
|
@ -11,7 +11,7 @@ This table does not contain the ingested data for `INSERT` queries.
|
|||||||
|
|
||||||
You can change settings of queries logging in the [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) section of the server configuration.
|
You can change settings of queries logging in the [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) section of the server configuration.
|
||||||
|
|
||||||
You can disable queries logging by setting [log_queries = 0](../../operations/settings/settings.md#settings-log-queries). We do not recommend to turn off logging because information in this table is important for solving issues.
|
You can disable queries logging by setting [log_queries = 0](../../operations/settings/settings.md#log-queries). We do not recommend to turn off logging because information in this table is important for solving issues.
|
||||||
|
|
||||||
The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query.
|
The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query-log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query.
|
||||||
|
|
||||||
@ -30,7 +30,7 @@ Each query creates one or two rows in the `query_log` table, depending on the st
|
|||||||
|
|
||||||
You can use the [log_queries_probability](../../operations/settings/settings.md#log-queries-probability) setting to reduce the number of queries, registered in the `query_log` table.
|
You can use the [log_queries_probability](../../operations/settings/settings.md#log-queries-probability) setting to reduce the number of queries, registered in the `query_log` table.
|
||||||
|
|
||||||
You can use the [log_formatted_queries](../../operations/settings/settings.md#settings-log-formatted-queries) setting to log formatted queries to the `formatted_query` column.
|
You can use the [log_formatted_queries](../../operations/settings/settings.md#log-formatted-queries) setting to log formatted queries to the `formatted_query` column.
|
||||||
|
|
||||||
Columns:
|
Columns:
|
||||||
|
|
||||||
@ -101,7 +101,7 @@ Columns:
|
|||||||
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
|
- `revision` ([UInt32](../../sql-reference/data-types/int-uint.md)) — ClickHouse revision.
|
||||||
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/map.md)) — ProfileEvents that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
|
- `ProfileEvents` ([Map(String, UInt64)](../../sql-reference/data-types/map.md)) — ProfileEvents that measure different metrics. The description of them could be found in the table [system.events](../../operations/system-tables/events.md#system_tables-events)
|
||||||
- `Settings` ([Map(String, String)](../../sql-reference/data-types/map.md)) — Settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
- `Settings` ([Map(String, String)](../../sql-reference/data-types/map.md)) — Settings that were changed when the client ran the query. To enable logging changes to settings, set the `log_query_settings` parameter to 1.
|
||||||
- `log_comment` ([String](../../sql-reference/data-types/string.md)) — Log comment. It can be set to arbitrary string no longer than [max_query_size](../../operations/settings/settings.md#settings-max_query_size). An empty string if it is not defined.
|
- `log_comment` ([String](../../sql-reference/data-types/string.md)) — Log comment. It can be set to arbitrary string no longer than [max_query_size](../../operations/settings/settings.md#max_query_size). An empty string if it is not defined.
|
||||||
- `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Thread ids that are participating in query execution. These threads may not have run simultaneously.
|
- `thread_ids` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Thread ids that are participating in query execution. These threads may not have run simultaneously.
|
||||||
- `peak_threads_usage` ([UInt64)](../../sql-reference/data-types/int-uint.md)) — Maximum count of simultaneous threads executing the query.
|
- `peak_threads_usage` ([UInt64)](../../sql-reference/data-types/int-uint.md)) — Maximum count of simultaneous threads executing the query.
|
||||||
- `used_aggregate_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions`, which were used during query execution.
|
- `used_aggregate_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `aggregate functions`, which were used during query execution.
|
||||||
|
@ -8,7 +8,7 @@ Contains information about threads that execute queries, for example, thread nam
|
|||||||
To start logging:
|
To start logging:
|
||||||
|
|
||||||
1. Configure parameters in the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) section.
|
1. Configure parameters in the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) section.
|
||||||
2. Set [log_query_threads](../../operations/settings/settings.md#settings-log-query-threads) to 1.
|
2. Set [log_query_threads](../../operations/settings/settings.md#log-query-threads) to 1.
|
||||||
|
|
||||||
The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query.
|
The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query_thread_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_thread_log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query.
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ Contains information about the dependent views executed when running a query, fo
|
|||||||
To start logging:
|
To start logging:
|
||||||
|
|
||||||
1. Configure parameters in the [query_views_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_views_log) section.
|
1. Configure parameters in the [query_views_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_views_log) section.
|
||||||
2. Set [log_query_views](../../operations/settings/settings.md#settings-log-query-views) to 1.
|
2. Set [log_query_views](../../operations/settings/settings.md#log-query-views) to 1.
|
||||||
|
|
||||||
The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query_views_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_views_log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query.
|
The flushing period of data is set in `flush_interval_milliseconds` parameter of the [query_views_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-query_views_log) server settings section. To force flushing, use the [SYSTEM FLUSH LOGS](../../sql-reference/statements/system.md#query_language-system-flush_logs) query.
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ This table contains the following columns (the column type is shown in brackets)
|
|||||||
- `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` and `SAMPLE_BY`.
|
- `supports_sort_order` (UInt8) — Flag that indicates if table engine supports clauses `PARTITION_BY`, `PRIMARY_KEY`, `ORDER_BY` and `SAMPLE_BY`.
|
||||||
- `supports_replication` (UInt8) — Flag that indicates if table engine supports [data replication](../../engines/table-engines/mergetree-family/replication.md).
|
- `supports_replication` (UInt8) — Flag that indicates if table engine supports [data replication](../../engines/table-engines/mergetree-family/replication.md).
|
||||||
- `supports_duduplication` (UInt8) — Flag that indicates if table engine supports data deduplication.
|
- `supports_duduplication` (UInt8) — Flag that indicates if table engine supports data deduplication.
|
||||||
- `supports_parallel_insert` (UInt8) — Flag that indicates if table engine supports parallel insert (see [`max_insert_threads`](../../operations/settings/settings.md#settings-max-insert-threads) setting).
|
- `supports_parallel_insert` (UInt8) — Flag that indicates if table engine supports parallel insert (see [`max_insert_threads`](../../operations/settings/settings.md#max-insert-threads) setting).
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Columns:
|
|||||||
|
|
||||||
- `dependencies_database` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Database dependencies.
|
- `dependencies_database` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Database dependencies.
|
||||||
|
|
||||||
- `dependencies_table` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Table dependencies ([MaterializedView](../../engines/table-engines/special/materializedview.md) tables based on the current table).
|
- `dependencies_table` ([Array](../../sql-reference/data-types/array.md)([String](../../sql-reference/data-types/string.md))) - Table dependencies ([materialized views](../../sql-reference/statements/create/view.md#materialized-view) the current table).
|
||||||
|
|
||||||
- `create_table_query` ([String](../../sql-reference/data-types/string.md)) - The query that was used to create the table.
|
- `create_table_query` ([String](../../sql-reference/data-types/string.md)) - The query that was used to create the table.
|
||||||
|
|
||||||
@ -57,6 +57,8 @@ Columns:
|
|||||||
- If the table stores data on disk, returns used space on disk (i.e. compressed).
|
- If the table stores data on disk, returns used space on disk (i.e. compressed).
|
||||||
- If the table stores data in memory, returns approximated number of used bytes in memory.
|
- If the table stores data in memory, returns approximated number of used bytes in memory.
|
||||||
|
|
||||||
|
- `total_bytes_uncompressed` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of uncompressed bytes, if it's possible to quickly determine the exact number of bytes from the part checksums for the table on storage, otherwise `NULL` (does not take underlying storage (if any) into account).
|
||||||
|
|
||||||
- `lifetime_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows INSERTed since server start (only for `Buffer` tables).
|
- `lifetime_rows` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of rows INSERTed since server start (only for `Buffer` tables).
|
||||||
|
|
||||||
- `lifetime_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes INSERTed since server start (only for `Buffer` tables).
|
- `lifetime_bytes` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) - Total number of bytes INSERTed since server start (only for `Buffer` tables).
|
||||||
|
@ -28,7 +28,7 @@ In both cases the type of the returned value is [UInt64](../../../sql-reference/
|
|||||||
|
|
||||||
**Details**
|
**Details**
|
||||||
|
|
||||||
ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this construction depends on the [count_distinct_implementation](../../../operations/settings/settings.md#settings-count_distinct_implementation) setting. It defines which of the [uniq\*](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) functions is used to perform the operation. The default is the [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) function.
|
ClickHouse supports the `COUNT(DISTINCT ...)` syntax. The behavior of this construction depends on the [count_distinct_implementation](../../../operations/settings/settings.md#count_distinct_implementation) setting. It defines which of the [uniq\*](../../../sql-reference/aggregate-functions/reference/uniq.md#agg_function-uniq) functions is used to perform the operation. The default is the [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md#agg_function-uniqexact) function.
|
||||||
|
|
||||||
The `SELECT count() FROM table` query is optimized by default using metadata from MergeTree. If you need to use row-level security, disable optimization using the [optimize_trivial_count_query](../../../operations/settings/settings.md#optimize-trivial-count-query) setting.
|
The `SELECT count() FROM table` query is optimized by default using metadata from MergeTree. If you need to use row-level security, disable optimization using the [optimize_trivial_count_query](../../../operations/settings/settings.md#optimize-trivial-count-query) setting.
|
||||||
|
|
||||||
|
@ -394,7 +394,7 @@ Configuration example:
|
|||||||
or
|
or
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
LAYOUT(HASHED_ARRAY())
|
LAYOUT(HASHED_ARRAY([SHARDS 1]))
|
||||||
```
|
```
|
||||||
|
|
||||||
### complex_key_hashed_array
|
### complex_key_hashed_array
|
||||||
@ -412,7 +412,7 @@ Configuration example:
|
|||||||
or
|
or
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
LAYOUT(COMPLEX_KEY_HASHED_ARRAY())
|
LAYOUT(COMPLEX_KEY_HASHED_ARRAY([SHARDS 1]))
|
||||||
```
|
```
|
||||||
|
|
||||||
### range_hashed {#range_hashed}
|
### range_hashed {#range_hashed}
|
||||||
|
@ -143,7 +143,7 @@ range([start, ] end [, step])
|
|||||||
**Implementation details**
|
**Implementation details**
|
||||||
|
|
||||||
- All arguments `start`, `end`, `step` must be below data types: `UInt8`, `UInt16`, `UInt32`, `UInt64`,`Int8`, `Int16`, `Int32`, `Int64`, as well as elements of the returned array, which's type is a super type of all arguments.
|
- All arguments `start`, `end`, `step` must be below data types: `UInt8`, `UInt16`, `UInt32`, `UInt64`,`Int8`, `Int16`, `Int32`, `Int64`, as well as elements of the returned array, which's type is a super type of all arguments.
|
||||||
- An exception is thrown if query results in arrays with a total length of more than number of elements specified by the [function_range_max_elements_in_block](../../operations/settings/settings.md#settings-function_range_max_elements_in_block) setting.
|
- An exception is thrown if query results in arrays with a total length of more than number of elements specified by the [function_range_max_elements_in_block](../../operations/settings/settings.md#function_range_max_elements_in_block) setting.
|
||||||
- Returns Null if any argument has Nullable(Nothing) type. An exception is thrown if any argument has Null value (Nullable(T) type).
|
- Returns Null if any argument has Nullable(Nothing) type. An exception is thrown if any argument has Null value (Nullable(T) type).
|
||||||
|
|
||||||
**Examples**
|
**Examples**
|
||||||
|
@ -16,7 +16,7 @@ For proper operation of introspection functions:
|
|||||||
|
|
||||||
- Install the `clickhouse-common-static-dbg` package.
|
- Install the `clickhouse-common-static-dbg` package.
|
||||||
|
|
||||||
- Set the [allow_introspection_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) setting to 1.
|
- Set the [allow_introspection_functions](../../operations/settings/settings.md#allow_introspection_functions) setting to 1.
|
||||||
|
|
||||||
For security reasons introspection functions are disabled by default.
|
For security reasons introspection functions are disabled by default.
|
||||||
|
|
||||||
|
@ -2831,3 +2831,92 @@ Result:
|
|||||||
│ SELECT a, b FROM tab WHERE (a > 3) AND (b < 3) │
|
│ SELECT a, b FROM tab WHERE (a > 3) AND (b < 3) │
|
||||||
└─────────────────────────────────────────────────────────────────────────┘
|
└─────────────────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## minSampleSizeConversion
|
||||||
|
|
||||||
|
Calculates minimum required sample size for an A/B test comparing conversions (proportions) in two samples.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
minSampleSizeConversion(baseline, mde, power, alpha)
|
||||||
|
```
|
||||||
|
|
||||||
|
Uses the formula described in [this article](https://towardsdatascience.com/required-sample-size-for-a-b-testing-6f6608dd330a). Assumes equal sizes of treatment and control groups. Returns the sample size required for one group (i.e. the sample size required for the whole experiment is twice the returned value).
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `baseline` — Baseline conversion. [Float](../data-types/float.md).
|
||||||
|
- `mde` — Minimum detectable effect (MDE) as percentage points (e.g. for a baseline conversion 0.25 the MDE 0.03 means an expected change to 0.25 ± 0.03). [Float](../data-types/float.md).
|
||||||
|
- `power` — Required statistical power of a test (1 - probability of Type II error). [Float](../data-types/float.md).
|
||||||
|
- `alpha` — Required significance level of a test (probability of Type I error). [Float](../data-types/float.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
A named [Tuple](../data-types/tuple.md) with 3 elements:
|
||||||
|
|
||||||
|
- `"minimum_sample_size"` — Required sample size. [Float64](../data-types/float.md).
|
||||||
|
- `"detect_range_lower"` — Lower bound of the range of values not detectable with the returned required sample size (i.e. all values less than or equal to `"detect_range_lower"` are detectable with the provided `alpha` and `power`). Calculated as `baseline - mde`. [Float64](../data-types/float.md).
|
||||||
|
- `"detect_range_upper"` — Upper bound of the range of values not detectable with the returned required sample size (i.e. all values greater than or equal to `"detect_range_upper"` are detectable with the provided `alpha` and `power`). Calculated as `baseline + mde`. [Float64](../data-types/float.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
The following query calculates the required sample size for an A/B test with baseline conversion of 25%, MDE of 3%, significance level of 5%, and the desired statistical power of 80%:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT minSampleSizeConversion(0.25, 0.03, 0.80, 0.05) AS sample_size;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─sample_size───────────────────┐
|
||||||
|
│ (3396.077603219163,0.22,0.28) │
|
||||||
|
└───────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## minSampleSizeContinuous
|
||||||
|
|
||||||
|
Calculates minimum required sample size for an A/B test comparing means of a continuous metric in two samples.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
minSampleSizeContinous(baseline, sigma, mde, power, alpha)
|
||||||
|
```
|
||||||
|
|
||||||
|
Alias: `minSampleSizeContinous`
|
||||||
|
|
||||||
|
Uses the formula described in [this article](https://towardsdatascience.com/required-sample-size-for-a-b-testing-6f6608dd330a). Assumes equal sizes of treatment and control groups. Returns the required sample size for one group (i.e. the sample size required for the whole experiment is twice the returned value). Also assumes equal variance of the test metric in treatment and control groups.
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `baseline` — Baseline value of a metric. [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md).
|
||||||
|
- `sigma` — Baseline standard deviation of a metric. [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md).
|
||||||
|
- `mde` — Minimum detectable effect (MDE) as percentage of the baseline value (e.g. for a baseline value 112.25 the MDE 0.03 means an expected change to 112.25 ± 112.25*0.03). [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md).
|
||||||
|
- `power` — Required statistical power of a test (1 - probability of Type II error). [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md).
|
||||||
|
- `alpha` — Required significance level of a test (probability of Type I error). [Integer](../data-types/int-uint.md) or [Float](../data-types/float.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
A named [Tuple](../data-types/tuple.md) with 3 elements:
|
||||||
|
|
||||||
|
- `"minimum_sample_size"` — Required sample size. [Float64](../data-types/float.md).
|
||||||
|
- `"detect_range_lower"` — Lower bound of the range of values not detectable with the returned required sample size (i.e. all values less than or equal to `"detect_range_lower"` are detectable with the provided `alpha` and `power`). Calculated as `baseline * (1 - mde)`. [Float64](../data-types/float.md).
|
||||||
|
- `"detect_range_upper"` — Upper bound of the range of values not detectable with the returned required sample size (i.e. all values greater than or equal to `"detect_range_upper"` are detectable with the provided `alpha` and `power`). Calculated as `baseline * (1 + mde)`. [Float64](../data-types/float.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
The following query calculates the required sample size for an A/B test on a metric with baseline value of 112.25, standard deviation of 21.1, MDE of 3%, significance level of 5%, and the desired statistical power of 80%:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT minSampleSizeContinous(112.25, 21.1, 0.03, 0.80, 0.05) AS sample_size;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─sample_size───────────────────────────┐
|
||||||
|
│ (616.2931945826209,108.8825,115.6175) │
|
||||||
|
└───────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
@ -1383,6 +1383,71 @@ Result:
|
|||||||
└──────────────────┘
|
└──────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## punycodeEncode
|
||||||
|
|
||||||
|
Returns the [Punycode](https://en.wikipedia.org/wiki/Punycode) of a string.
|
||||||
|
The string must be UTF8-encoded, otherwise results are undefined.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
punycodeEncode(val)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `val` - Input value. [String](../data-types/string.md)
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- A Punycode representation of the input value. [String](../data-types/string.md)
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
select punycodeEncode('München');
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```result
|
||||||
|
┌─punycodeEncode('München')─┐
|
||||||
|
│ Mnchen-3ya │
|
||||||
|
└───────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## punycodeDecode
|
||||||
|
|
||||||
|
Returns the UTF8-encoded plaintext of a [Punycode](https://en.wikipedia.org/wiki/Punycode)-encoded string.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
punycodeEncode(val)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `val` - Punycode-encoded string. [String](../data-types/string.md)
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The plaintext of the input value. [String](../data-types/string.md)
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
select punycodeDecode('Mnchen-3ya');
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```result
|
||||||
|
┌─punycodeEncode('Mnchen-3ya')─┐
|
||||||
|
│ München │
|
||||||
|
└──────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## byteHammingDistance
|
## byteHammingDistance
|
||||||
|
|
||||||
Calculates the [hamming distance](https://en.wikipedia.org/wiki/Hamming_distance) between two byte strings.
|
Calculates the [hamming distance](https://en.wikipedia.org/wiki/Hamming_distance) between two byte strings.
|
||||||
|
22
docs/en/sql-reference/statements/alter/apply-deleted-mask.md
Normal file
22
docs/en/sql-reference/statements/alter/apply-deleted-mask.md
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
slug: /en/sql-reference/statements/alter/apply-deleted-mask
|
||||||
|
sidebar_position: 46
|
||||||
|
sidebar_label: APPLY DELETED MASK
|
||||||
|
---
|
||||||
|
|
||||||
|
# Apply mask of deleted rows
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
ALTER TABLE [db].name [ON CLUSTER cluster] APPLY DELETED MASK [IN PARTITION partition_id]
|
||||||
|
```
|
||||||
|
|
||||||
|
The command applies mask created by [lightweight delete](/docs/en/sql-reference/statements/delete) and forcefully removes rows marked as deleted from disk. This command is a heavyweight mutation and it semantically equals to query ```ALTER TABLE [db].name DELETE WHERE _row_exists = 0```.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
It only works for tables in the [`MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) family (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) tables).
|
||||||
|
:::
|
||||||
|
|
||||||
|
**See also**
|
||||||
|
|
||||||
|
- [Lightweight deletes](/docs/en/sql-reference/statements/delete)
|
||||||
|
- [Heavyweight deletes](/docs/en/sql-reference/statements/alter/delete.md)
|
@ -17,6 +17,7 @@ Most `ALTER TABLE` queries modify table settings or data:
|
|||||||
- [CONSTRAINT](/docs/en/sql-reference/statements/alter/constraint.md)
|
- [CONSTRAINT](/docs/en/sql-reference/statements/alter/constraint.md)
|
||||||
- [TTL](/docs/en/sql-reference/statements/alter/ttl.md)
|
- [TTL](/docs/en/sql-reference/statements/alter/ttl.md)
|
||||||
- [STATISTIC](/docs/en/sql-reference/statements/alter/statistic.md)
|
- [STATISTIC](/docs/en/sql-reference/statements/alter/statistic.md)
|
||||||
|
- [APPLY DELETED MASK](/docs/en/sql-reference/statements/alter/apply-deleted-mask.md)
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
Most `ALTER TABLE` queries are supported only for [\*MergeTree](/docs/en/engines/table-engines/mergetree-family/index.md) tables, as well as [Merge](/docs/en/engines/table-engines/special/merge.md) and [Distributed](/docs/en/engines/table-engines/special/distributed.md).
|
Most `ALTER TABLE` queries are supported only for [\*MergeTree](/docs/en/engines/table-engines/mergetree-family/index.md) tables, as well as [Merge](/docs/en/engines/table-engines/special/merge.md) and [Distributed](/docs/en/engines/table-engines/special/distributed.md).
|
||||||
|
@ -11,7 +11,7 @@ Inserts data into a table.
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
|
INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] [SETTINGS ...] VALUES (v11, v12, v13), (v21, v22, v23), ...
|
||||||
```
|
```
|
||||||
|
|
||||||
You can specify a list of columns to insert using the `(c1, c2, c3)`. You can also use an expression with column [matcher](../../sql-reference/statements/select/index.md#asterisk) such as `*` and/or [modifiers](../../sql-reference/statements/select/index.md#select-modifiers) such as [APPLY](../../sql-reference/statements/select/index.md#apply-modifier), [EXCEPT](../../sql-reference/statements/select/index.md#except-modifier), [REPLACE](../../sql-reference/statements/select/index.md#replace-modifier).
|
You can specify a list of columns to insert using the `(c1, c2, c3)`. You can also use an expression with column [matcher](../../sql-reference/statements/select/index.md#asterisk) such as `*` and/or [modifiers](../../sql-reference/statements/select/index.md#select-modifiers) such as [APPLY](../../sql-reference/statements/select/index.md#apply-modifier), [EXCEPT](../../sql-reference/statements/select/index.md#except-modifier), [REPLACE](../../sql-reference/statements/select/index.md#replace-modifier).
|
||||||
@ -126,7 +126,7 @@ To insert a default value instead of `NULL` into a column with not nullable data
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] FROM INFILE file_name [COMPRESSION type] FORMAT format_name
|
INSERT INTO [TABLE] [db.]table [(c1, c2, c3)] FROM INFILE file_name [COMPRESSION type] [SETTINGS ...] [FORMAT format_name]
|
||||||
```
|
```
|
||||||
|
|
||||||
Use the syntax above to insert data from a file, or files, stored on the **client** side. `file_name` and `type` are string literals. Input file [format](../../interfaces/formats.md) must be set in the `FORMAT` clause.
|
Use the syntax above to insert data from a file, or files, stored on the **client** side. `file_name` and `type` are string literals. Input file [format](../../interfaces/formats.md) must be set in the `FORMAT` clause.
|
||||||
|
@ -17,7 +17,7 @@ This query tries to initialize an unscheduled merge of data parts for tables. No
|
|||||||
OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]]
|
OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE [BY expression]]
|
||||||
```
|
```
|
||||||
|
|
||||||
The `OPTIMIZE` query is supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family, the [MaterializedView](../../engines/table-engines/special/materializedview.md) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines aren’t supported.
|
The `OPTIMIZE` query is supported for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) family (including [materialized views](../../sql-reference/statements/create/view.md#materialized-view)) and the [Buffer](../../engines/table-engines/special/buffer.md) engines. Other table engines aren’t supported.
|
||||||
|
|
||||||
When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all replicas (if the [alter_sync](../../operations/settings/settings.md#alter-sync) setting is set to `2`) or on current replica (if the [alter_sync](../../operations/settings/settings.md#alter-sync) setting is set to `1`).
|
When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) family of table engines, ClickHouse creates a task for merging and waits for execution on all replicas (if the [alter_sync](../../operations/settings/settings.md#alter-sync) setting is set to `2`) or on current replica (if the [alter_sync](../../operations/settings/settings.md#alter-sync) setting is set to `1`).
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ Queries that use `FINAL` are executed slightly slower than similar queries that
|
|||||||
- Data is merged during query execution.
|
- Data is merged during query execution.
|
||||||
- Queries with `FINAL` read primary key columns in addition to the columns specified in the query.
|
- Queries with `FINAL` read primary key columns in addition to the columns specified in the query.
|
||||||
|
|
||||||
**In most cases, avoid using `FINAL`.** The common approach is to use different queries that assume the background processes of the `MergeTree` engine haven’t happened yet and deal with it by applying aggregation (for example, to discard duplicates).
|
`FINAL` requires additional compute and memory resources, as the processing that normally would occur at merge time must occur in memory at the time of the query. However, using FINAL is sometimes necessary in order to produce accurate results, and is less expensive than running `OPTIMIZE` to force a merge. It is also sometimes possible to use different queries that assume the background processes of the `MergeTree` engine haven’t happened yet and deal with it by applying aggregation (for example, to discard duplicates). If you need to use FINAL in your queries in order to get the required results, then it is okay to do so but be aware of the additional processing required.
|
||||||
|
|
||||||
`FINAL` can be applied automatically using [FINAL](../../../operations/settings/settings.md#final) setting to all tables in a query using a session or a user profile.
|
`FINAL` can be applied automatically using [FINAL](../../../operations/settings/settings.md#final) setting to all tables in a query using a session or a user profile.
|
||||||
|
|
||||||
|
@ -43,22 +43,23 @@ Additional join types available in ClickHouse:
|
|||||||
- `LEFT ANTI JOIN` and `RIGHT ANTI JOIN`, a blacklist on “join keys”, without producing a cartesian product.
|
- `LEFT ANTI JOIN` and `RIGHT ANTI JOIN`, a blacklist on “join keys”, without producing a cartesian product.
|
||||||
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
||||||
- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
||||||
|
- `PASTE JOIN`, performs a horizontal concatenation of two tables.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
When [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm) is set to `partial_merge`, `RIGHT JOIN` and `FULL JOIN` are supported only with `ALL` strictness (`SEMI`, `ANTI`, `ANY`, and `ASOF` are not supported).
|
When [join_algorithm](../../../operations/settings/settings.md#join_algorithm) is set to `partial_merge`, `RIGHT JOIN` and `FULL JOIN` are supported only with `ALL` strictness (`SEMI`, `ANTI`, `ANY`, and `ASOF` are not supported).
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Settings
|
## Settings
|
||||||
|
|
||||||
The default join type can be overridden using [join_default_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting.
|
The default join type can be overridden using [join_default_strictness](../../../operations/settings/settings.md#join_default_strictness) setting.
|
||||||
|
|
||||||
The behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting.
|
The behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting.
|
||||||
|
|
||||||
|
|
||||||
**See also**
|
**See also**
|
||||||
|
|
||||||
- [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm)
|
- [join_algorithm](../../../operations/settings/settings.md#join_algorithm)
|
||||||
- [join_any_take_last_row](../../../operations/settings/settings.md#settings-join_any_take_last_row)
|
- [join_any_take_last_row](../../../operations/settings/settings.md#join_any_take_last_row)
|
||||||
- [join_use_nulls](../../../operations/settings/settings.md#join_use_nulls)
|
- [join_use_nulls](../../../operations/settings/settings.md#join_use_nulls)
|
||||||
- [partial_merge_join_optimizations](../../../operations/settings/settings.md#partial_merge_join_optimizations)
|
- [partial_merge_join_optimizations](../../../operations/settings/settings.md#partial_merge_join_optimizations)
|
||||||
- [partial_merge_join_rows_in_right_blocks](../../../operations/settings/settings.md#partial_merge_join_rows_in_right_blocks)
|
- [partial_merge_join_rows_in_right_blocks](../../../operations/settings/settings.md#partial_merge_join_rows_in_right_blocks)
|
||||||
@ -269,6 +270,33 @@ For example, consider the following tables:
|
|||||||
`ASOF` join is **not** supported in the [Join](../../../engines/table-engines/special/join.md) table engine.
|
`ASOF` join is **not** supported in the [Join](../../../engines/table-engines/special/join.md) table engine.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
## PASTE JOIN Usage
|
||||||
|
|
||||||
|
The result of `PASTE JOIN` is a table that contains all columns from left subquery followed by all columns from the right subquery.
|
||||||
|
The rows are matched based on their positions in the original tables (the order of rows should be defined).
|
||||||
|
If the subqueries return a different number of rows, extra rows will be cut.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```SQL
|
||||||
|
SELECT *
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT number AS a
|
||||||
|
FROM numbers(2)
|
||||||
|
) AS t1
|
||||||
|
PASTE JOIN
|
||||||
|
(
|
||||||
|
SELECT number AS a
|
||||||
|
FROM numbers(2)
|
||||||
|
ORDER BY a DESC
|
||||||
|
) AS t2
|
||||||
|
|
||||||
|
┌─a─┬─t2.a─┐
|
||||||
|
│ 0 │ 1 │
|
||||||
|
│ 1 │ 0 │
|
||||||
|
└───┴──────┘
|
||||||
|
```
|
||||||
|
|
||||||
## Distributed JOIN
|
## Distributed JOIN
|
||||||
|
|
||||||
There are two ways to execute join involving distributed tables:
|
There are two ways to execute join involving distributed tables:
|
||||||
@ -352,7 +380,7 @@ If you need a `JOIN` for joining with dimension tables (these are relatively sma
|
|||||||
|
|
||||||
### Memory Limitations
|
### Memory Limitations
|
||||||
|
|
||||||
By default, ClickHouse uses the [hash join](https://en.wikipedia.org/wiki/Hash_join) algorithm. ClickHouse takes the right_table and creates a hash table for it in RAM. If `join_algorithm = 'auto'` is enabled, then after some threshold of memory consumption, ClickHouse falls back to [merge](https://en.wikipedia.org/wiki/Sort-merge_join) join algorithm. For `JOIN` algorithms description see the [join_algorithm](../../../operations/settings/settings.md#settings-join_algorithm) setting.
|
By default, ClickHouse uses the [hash join](https://en.wikipedia.org/wiki/Hash_join) algorithm. ClickHouse takes the right_table and creates a hash table for it in RAM. If `join_algorithm = 'auto'` is enabled, then after some threshold of memory consumption, ClickHouse falls back to [merge](https://en.wikipedia.org/wiki/Sort-merge_join) join algorithm. For `JOIN` algorithms description see the [join_algorithm](../../../operations/settings/settings.md#join_algorithm) setting.
|
||||||
|
|
||||||
If you need to restrict `JOIN` operation memory consumption use the following settings:
|
If you need to restrict `JOIN` operation memory consumption use the following settings:
|
||||||
|
|
||||||
|
@ -5,12 +5,22 @@ sidebar_label: ORDER BY
|
|||||||
|
|
||||||
# ORDER BY Clause
|
# ORDER BY Clause
|
||||||
|
|
||||||
The `ORDER BY` clause contains a list of expressions, which can each be attributed with `DESC` (descending) or `ASC` (ascending) modifier which determine the sorting direction. If the direction is not specified, `ASC` is assumed, so it’s usually omitted. The sorting direction applies to a single expression, not to the entire list. Example: `ORDER BY Visits DESC, SearchPhrase`. Sorting is case-sensitive.
|
The `ORDER BY` clause contains
|
||||||
|
|
||||||
If you want to sort by column numbers instead of column names, enable the setting [enable_positional_arguments](../../../operations/settings/settings.md#enable-positional-arguments).
|
- a list of expressions, e.g. `ORDER BY visits, search_phrase`,
|
||||||
|
- a list of numbers referring to columns in the `SELECT` clause, e.g. `ORDER BY 2, 1`, or
|
||||||
|
- `ALL` which means all columns of the `SELECT` clause, e.g. `ORDER BY ALL`.
|
||||||
|
|
||||||
Rows that have identical values for the list of sorting expressions are output in an arbitrary order, which can also be non-deterministic (different each time).
|
To disable sorting by column numbers, set setting [enable_positional_arguments](../../../operations/settings/settings.md#enable-positional-arguments) = 0.
|
||||||
If the ORDER BY clause is omitted, the order of the rows is also undefined, and may be non-deterministic as well.
|
To disable sorting by `ALL`, set setting [enable_order_by_all](../../../operations/settings/settings.md#enable-order-by-all) = 0.
|
||||||
|
|
||||||
|
The `ORDER BY` clause can be attributed by a `DESC` (descending) or `ASC` (ascending) modifier which determines the sorting direction.
|
||||||
|
Unless an explicit sort order is specified, `ASC` is used by default.
|
||||||
|
The sorting direction applies to a single expression, not to the entire list, e.g. `ORDER BY Visits DESC, SearchPhrase`.
|
||||||
|
Also, sorting is performed case-sensitively.
|
||||||
|
|
||||||
|
Rows with identical values for a sort expressions are returned in an arbitrary and non-deterministic order.
|
||||||
|
If the `ORDER BY` clause is omitted in a `SELECT` statement, the row order is also arbitrary and non-deterministic.
|
||||||
|
|
||||||
## Sorting of Special Values
|
## Sorting of Special Values
|
||||||
|
|
||||||
@ -265,8 +275,9 @@ Consider disabling `optimize_read_in_order` manually, when running queries that
|
|||||||
|
|
||||||
Optimization is supported in the following table engines:
|
Optimization is supported in the following table engines:
|
||||||
|
|
||||||
- [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md)
|
- [MergeTree](../../../engines/table-engines/mergetree-family/mergetree.md) (including [materialized views](../../../sql-reference/statements/create/view.md#materialized-view)),
|
||||||
- [Merge](../../../engines/table-engines/special/merge.md), [Buffer](../../../engines/table-engines/special/buffer.md), and [MaterializedView](../../../engines/table-engines/special/materializedview.md) table engines over `MergeTree`-engine tables
|
- [Merge](../../../engines/table-engines/special/merge.md),
|
||||||
|
- [Buffer](../../../engines/table-engines/special/buffer.md)
|
||||||
|
|
||||||
In `MaterializedView`-engine tables the optimization works with views like `SELECT ... FROM merge_tree_table ORDER BY pk`. But it is not supported in the queries like `SELECT ... FROM view ORDER BY pk` if the view query does not have the `ORDER BY` clause.
|
In `MaterializedView`-engine tables the optimization works with views like `SELECT ... FROM merge_tree_table ORDER BY pk`. But it is not supported in the queries like `SELECT ... FROM view ORDER BY pk` if the view query does not have the `ORDER BY` clause.
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def')
|
|||||||
|
|
||||||
The `INSERT INTO t VALUES` fragment is parsed by the full parser, and the data `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` is parsed by the fast stream parser. You can also turn on the full parser for the data by using the [input_format_values_interpret_expressions](../operations/settings/settings-formats.md#input_format_values_interpret_expressions) setting. When `input_format_values_interpret_expressions = 1`, ClickHouse first tries to parse values with the fast stream parser. If it fails, ClickHouse tries to use the full parser for the data, treating it like an SQL [expression](#expressions).
|
The `INSERT INTO t VALUES` fragment is parsed by the full parser, and the data `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` is parsed by the fast stream parser. You can also turn on the full parser for the data by using the [input_format_values_interpret_expressions](../operations/settings/settings-formats.md#input_format_values_interpret_expressions) setting. When `input_format_values_interpret_expressions = 1`, ClickHouse first tries to parse values with the fast stream parser. If it fails, ClickHouse tries to use the full parser for the data, treating it like an SQL [expression](#expressions).
|
||||||
|
|
||||||
Data can have any format. When a query is received, the server calculates no more than [max_query_size](../operations/settings/settings.md#settings-max_query_size) bytes of the request in RAM (by default, 1 MB), and the rest is stream parsed.
|
Data can have any format. When a query is received, the server calculates no more than [max_query_size](../operations/settings/settings.md#max_query_size) bytes of the request in RAM (by default, 1 MB), and the rest is stream parsed.
|
||||||
It allows for avoiding issues with large `INSERT` queries.
|
It allows for avoiding issues with large `INSERT` queries.
|
||||||
|
|
||||||
When using the `Values` format in an `INSERT` query, it may seem that data is parsed the same as expressions in a `SELECT` query, but this is not true. The `Values` format is much more limited.
|
When using the `Values` format in an `INSERT` query, it may seem that data is parsed the same as expressions in a `SELECT` query, but this is not true. The `Values` format is much more limited.
|
||||||
|
@ -55,5 +55,5 @@ Connection settings like `host`, `port`, `user`, `password`, `compression`, `sec
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [skip_unavailable_shards](../../operations/settings/settings.md#settings-skip_unavailable_shards)
|
- [skip_unavailable_shards](../../operations/settings/settings.md#skip_unavailable_shards)
|
||||||
- [load_balancing](../../operations/settings/settings.md#settings-load_balancing)
|
- [load_balancing](../../operations/settings/settings.md#load_balancing)
|
||||||
|
@ -199,11 +199,11 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3
|
|||||||
|
|
||||||
## Settings {#settings}
|
## Settings {#settings}
|
||||||
|
|
||||||
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
|
- [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-empty_if-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default.
|
||||||
- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
- [engine_file_truncate_on_insert](/docs/en/operations/settings/settings.md#engine-file-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
- [engine_file_allow_create_multiple_files](/docs/en/operations/settings/settings.md#engine_file_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
- [engine_file_skip_empty_files](/docs/en/operations/settings/settings.md#engine_file_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-emptyif-not-exists) - method of reading data from storage file, one of: read, pread, mmap (only for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
- [storage_file_read_method](/docs/en/operations/settings/settings.md#engine-file-empty_if-not-exists) - method of reading data from storage file, one of: read, pread, mmap (only for clickhouse-local). Default value: `pread` for clickhouse-server, `mmap` for clickhouse-local.
|
||||||
|
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
@ -100,7 +100,7 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin
|
|||||||
|
|
||||||
## Storage Settings {#storage-settings}
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
- [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
- [hdfs_create_multiple_files](/docs/en/operations/settings/settings.md#hdfs_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
- [hdfs_skip_empty_files](/docs/en/operations/settings/settings.md#hdfs_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
- [ignore_access_denied_multidirectory_globs](/docs/en/operations/settings/settings.md#ignore_access_denied_multidirectory_globs) - allows to ignore permission denied errors for multi-directory globs.
|
- [ignore_access_denied_multidirectory_globs](/docs/en/operations/settings/settings.md#ignore_access_denied_multidirectory_globs) - allows to ignore permission denied errors for multi-directory globs.
|
||||||
|
@ -165,5 +165,5 @@ The following pattern types are supported.
|
|||||||
- `{0n..0m}` - A range of numbers with leading zeroes. This pattern preserves leading zeroes in indices. For instance, `example{01..03}-1` generates `example01-1`, `example02-1` and `example03-1`.
|
- `{0n..0m}` - A range of numbers with leading zeroes. This pattern preserves leading zeroes in indices. For instance, `example{01..03}-1` generates `example01-1`, `example02-1` and `example03-1`.
|
||||||
- `{a|b}` - Any number of variants separated by a `|`. The pattern specifies replicas. For instance, `example01-{1|2}` generates replicas `example01-1` and `example01-2`.
|
- `{a|b}` - Any number of variants separated by a `|`. The pattern specifies replicas. For instance, `example01-{1|2}` generates replicas `example01-1` and `example01-2`.
|
||||||
|
|
||||||
The query will be sent to the first healthy replica. However, for `remote` the replicas are iterated in the order currently set in the [load_balancing](../../operations/settings/settings.md#settings-load_balancing) setting.
|
The query will be sent to the first healthy replica. However, for `remote` the replicas are iterated in the order currently set in the [load_balancing](../../operations/settings/settings.md#load_balancing) setting.
|
||||||
The number of generated addresses is limited by [table_function_remote_max_addresses](../../operations/settings/settings.md#table_function_remote_max_addresses) setting.
|
The number of generated addresses is limited by [table_function_remote_max_addresses](../../operations/settings/settings.md#table_function_remote_max_addresses) setting.
|
||||||
|
@ -16,7 +16,7 @@ When using the `s3 table function` with [`INSERT INTO...SELECT`](../../sql-refer
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
s3(path [, NOSIGN | aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression])
|
s3(path [, NOSIGN | aws_access_key_id, aws_secret_access_key [,session_token]] [,format] [,structure] [,compression])
|
||||||
```
|
```
|
||||||
|
|
||||||
:::tip GCS
|
:::tip GCS
|
||||||
@ -38,6 +38,8 @@ For GCS, substitute your HMAC key and HMAC secret where you see `aws_access_key_
|
|||||||
:::
|
:::
|
||||||
|
|
||||||
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
- `NOSIGN` - If this keyword is provided in place of credentials, all the requests will not be signed.
|
||||||
|
- `access_key_id`, `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional.
|
||||||
|
- `session_token` - Session token to use with the given keys. Optional when passing keys.
|
||||||
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
||||||
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||||
- `compression` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension.
|
- `compression` — Parameter is optional. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. By default, it will autodetect compression by file extension.
|
||||||
@ -236,7 +238,7 @@ LIMIT 5;
|
|||||||
|
|
||||||
## Storage Settings {#storage-settings}
|
## Storage Settings {#storage-settings}
|
||||||
|
|
||||||
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3-truncate-on-insert) - allows to truncate file before insert into it. Disabled by default.
|
- [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default.
|
||||||
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
- [s3_create_multiple_files](/docs/en/operations/settings/settings.md#s3_allow_create_multiple_files) - allows to create a new file on each insert if format has suffix. Disabled by default.
|
||||||
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
- [s3_skip_empty_files](/docs/en/operations/settings/settings.md#s3_skip_empty_files) - allows to skip empty files while reading. Disabled by default.
|
||||||
|
|
||||||
|
@ -10,14 +10,15 @@ Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) and Google
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
s3Cluster(cluster_name, source, [,access_key_id, secret_access_key] [,format] [,structure])
|
s3Cluster(cluster_name, source, [,access_key_id, secret_access_key, [session_token]] [,format] [,structure])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
- `cluster_name` — Name of a cluster that is used to build a set of addresses and connection parameters to remote and local servers.
|
||||||
- `source` — URL to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
- `source` — URL to a file or a bunch of files. Supports following wildcards in readonly mode: `*`, `**`, `?`, `{'abc','def'}` and `{N..M}` where `N`, `M` — numbers, `abc`, `def` — strings. For more information see [Wildcards In Path](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||||
- `access_key_id` and `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional.
|
- `access_key_id`, `secret_access_key` — Keys that specify credentials to use with given endpoint. Optional.
|
||||||
|
- `session_token` - Session token to use with the given keys. Optional when passing keys.
|
||||||
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
- `format` — The [format](../../interfaces/formats.md#formats) of the file.
|
||||||
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
- `structure` — Structure of the table. Format `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ sidebar_label: s3
|
|||||||
**Синтаксис**
|
**Синтаксис**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
s3(path [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression])
|
s3(path [,access_key_id, secret_access_key [,session_token]] [,format] [,structure] [,compression])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Aргументы**
|
**Aргументы**
|
||||||
|
@ -11,14 +11,14 @@ sidebar_label: s3Cluster
|
|||||||
**Синтаксис**
|
**Синтаксис**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
s3Cluster(cluster_name, source, [,access_key_id, secret_access_key] [,format] [,structure])
|
s3Cluster(cluster_name, source, [,access_key_id, secret_access_key [,session_token]] [,format] [,structure])
|
||||||
```
|
```
|
||||||
|
|
||||||
**Аргументы**
|
**Аргументы**
|
||||||
|
|
||||||
- `cluster_name` — имя кластера, используемое для создания набора адресов и параметров подключения к удаленным и локальным серверам.
|
- `cluster_name` — имя кластера, используемое для создания набора адресов и параметров подключения к удаленным и локальным серверам.
|
||||||
- `source` — URL файла или нескольких файлов. Поддерживает следующие символы подстановки: `*`, `?`, `{'abc','def'}` и `{N..M}`, где `N`, `M` — числа, `abc`, `def` — строки. Подробнее смотрите в разделе [Символы подстановки](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
- `source` — URL файла или нескольких файлов. Поддерживает следующие символы подстановки: `*`, `?`, `{'abc','def'}` и `{N..M}`, где `N`, `M` — числа, `abc`, `def` — строки. Подробнее смотрите в разделе [Символы подстановки](../../engines/table-engines/integrations/s3.md#wildcards-in-path).
|
||||||
- `access_key_id` и `secret_access_key` — ключи, указывающие на учетные данные для использования с точкой приема запроса. Необязательные параметры.
|
- `access_key_id`, `secret_access_key` и `session_token` — ключи, указывающие на учетные данные для использования с точкой приема запроса. Необязательные параметры.
|
||||||
- `format` — [формат](../../interfaces/formats.md#formats) файла.
|
- `format` — [формат](../../interfaces/formats.md#formats) файла.
|
||||||
- `structure` — структура таблицы. Формат `'column1_name column1_type, column2_name column2_type, ...'`.
|
- `structure` — структура таблицы. Формат `'column1_name column1_type, column2_name column2_type, ...'`.
|
||||||
|
|
||||||
|
@ -61,6 +61,22 @@ sidebar_label: ORDER BY
|
|||||||
|
|
||||||
我们只建议使用 `COLLATE` 对于少量行的最终排序,因为排序与 `COLLATE` 比正常的按字节排序效率低。
|
我们只建议使用 `COLLATE` 对于少量行的最终排序,因为排序与 `COLLATE` 比正常的按字节排序效率低。
|
||||||
|
|
||||||
|
## ORDER BY ALL
|
||||||
|
|
||||||
|
`ORDER BY ALL` 对所有选定的列进行升序排序。
|
||||||
|
|
||||||
|
示例:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT a, b, c FROM t ORDER BY ALL
|
||||||
|
```
|
||||||
|
|
||||||
|
等同于:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT a, b, c FROM t ORDER BY a, b, c
|
||||||
|
```
|
||||||
|
|
||||||
## 实现细节 {#implementation-details}
|
## 实现细节 {#implementation-details}
|
||||||
|
|
||||||
更少的RAM使用,如果一个足够小 [LIMIT](../../../sql-reference/statements/select/limit.md) 除了指定 `ORDER BY`. 否则,所花费的内存量与用于排序的数据量成正比。 对于分布式查询处理,如果 [GROUP BY](../../../sql-reference/statements/select/group-by.md) 省略排序,在远程服务器上部分完成排序,并将结果合并到请求者服务器上。 这意味着对于分布式排序,要排序的数据量可以大于单个服务器上的内存量。
|
更少的RAM使用,如果一个足够小 [LIMIT](../../../sql-reference/statements/select/limit.md) 除了指定 `ORDER BY`. 否则,所花费的内存量与用于排序的数据量成正比。 对于分布式查询处理,如果 [GROUP BY](../../../sql-reference/statements/select/group-by.md) 省略排序,在远程服务器上部分完成排序,并将结果合并到请求者服务器上。 这意味着对于分布式排序,要排序的数据量可以大于单个服务器上的内存量。
|
||||||
|
@ -11,7 +11,7 @@ sidebar_label: s3
|
|||||||
**语法**
|
**语法**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
s3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compression])
|
s3(path [,access_key_id, secret_access_key [,session_token]] ,format, structure, [compression])
|
||||||
```
|
```
|
||||||
|
|
||||||
**参数**
|
**参数**
|
||||||
|
@ -35,7 +35,6 @@
|
|||||||
#include <Common/StudentTTest.h>
|
#include <Common/StudentTTest.h>
|
||||||
#include <Common/CurrentMetrics.h>
|
#include <Common/CurrentMetrics.h>
|
||||||
#include <Common/ErrorCodes.h>
|
#include <Common/ErrorCodes.h>
|
||||||
#include <filesystem>
|
|
||||||
|
|
||||||
|
|
||||||
/** A tool for evaluating ClickHouse performance.
|
/** A tool for evaluating ClickHouse performance.
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
#include <TableFunctions/registerTableFunctions.h>
|
#include <TableFunctions/registerTableFunctions.h>
|
||||||
#include <Storages/StorageFactory.h>
|
#include <Storages/StorageFactory.h>
|
||||||
#include <Storages/registerStorages.h>
|
#include <Storages/registerStorages.h>
|
||||||
|
#include <Storages/MergeTree/MergeTreeSettings.h>
|
||||||
#include <DataTypes/DataTypeFactory.h>
|
#include <DataTypes/DataTypeFactory.h>
|
||||||
#include <Formats/FormatFactory.h>
|
#include <Formats/FormatFactory.h>
|
||||||
#include <Formats/registerFormats.h>
|
#include <Formats/registerFormats.h>
|
||||||
@ -32,6 +33,9 @@
|
|||||||
#pragma GCC diagnostic ignored "-Wunused-function"
|
#pragma GCC diagnostic ignored "-Wunused-function"
|
||||||
#pragma GCC diagnostic ignored "-Wmissing-declarations"
|
#pragma GCC diagnostic ignored "-Wmissing-declarations"
|
||||||
|
|
||||||
|
extern const char * auto_time_zones[];
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
@ -133,9 +137,25 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
|||||||
|
|
||||||
auto all_known_storage_names = StorageFactory::instance().getAllRegisteredNames();
|
auto all_known_storage_names = StorageFactory::instance().getAllRegisteredNames();
|
||||||
auto all_known_data_type_names = DataTypeFactory::instance().getAllRegisteredNames();
|
auto all_known_data_type_names = DataTypeFactory::instance().getAllRegisteredNames();
|
||||||
|
auto all_known_settings = Settings().getAllRegisteredNames();
|
||||||
|
auto all_known_merge_tree_settings = MergeTreeSettings().getAllRegisteredNames();
|
||||||
|
|
||||||
additional_names.insert(all_known_storage_names.begin(), all_known_storage_names.end());
|
additional_names.insert(all_known_storage_names.begin(), all_known_storage_names.end());
|
||||||
additional_names.insert(all_known_data_type_names.begin(), all_known_data_type_names.end());
|
additional_names.insert(all_known_data_type_names.begin(), all_known_data_type_names.end());
|
||||||
|
additional_names.insert(all_known_settings.begin(), all_known_settings.end());
|
||||||
|
additional_names.insert(all_known_merge_tree_settings.begin(), all_known_merge_tree_settings.end());
|
||||||
|
|
||||||
|
for (auto * it = auto_time_zones; *it; ++it)
|
||||||
|
{
|
||||||
|
String time_zone_name = *it;
|
||||||
|
|
||||||
|
/// Example: Europe/Amsterdam
|
||||||
|
Strings split;
|
||||||
|
boost::split(split, time_zone_name, [](char c){ return c == '/'; });
|
||||||
|
for (const auto & word : split)
|
||||||
|
if (!word.empty())
|
||||||
|
additional_names.insert(word);
|
||||||
|
}
|
||||||
|
|
||||||
KnownIdentifierFunc is_known_identifier = [&](std::string_view name)
|
KnownIdentifierFunc is_known_identifier = [&](std::string_view name)
|
||||||
{
|
{
|
||||||
|
@ -771,6 +771,7 @@ void LocalServer::processConfig()
|
|||||||
|
|
||||||
global_context->setQueryKindInitial();
|
global_context->setQueryKindInitial();
|
||||||
global_context->setQueryKind(query_kind);
|
global_context->setQueryKind(query_kind);
|
||||||
|
global_context->setQueryParameters(query_parameters);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -817,6 +818,7 @@ void LocalServer::printHelpMessage([[maybe_unused]] const OptionsDescription & o
|
|||||||
std::cout << getHelpHeader() << "\n";
|
std::cout << getHelpHeader() << "\n";
|
||||||
std::cout << options_description.main_description.value() << "\n";
|
std::cout << options_description.main_description.value() << "\n";
|
||||||
std::cout << getHelpFooter() << "\n";
|
std::cout << getHelpFooter() << "\n";
|
||||||
|
std::cout << "In addition, --param_name=value can be specified for substitution of parameters for parametrized queries.\n";
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -893,7 +895,31 @@ void LocalServer::readArguments(int argc, char ** argv, Arguments & common_argum
|
|||||||
for (int arg_num = 1; arg_num < argc; ++arg_num)
|
for (int arg_num = 1; arg_num < argc; ++arg_num)
|
||||||
{
|
{
|
||||||
std::string_view arg = argv[arg_num];
|
std::string_view arg = argv[arg_num];
|
||||||
if (arg == "--multiquery" && (arg_num + 1) < argc && !std::string_view(argv[arg_num + 1]).starts_with('-'))
|
/// Parameter arg after underline.
|
||||||
|
if (arg.starts_with("--param_"))
|
||||||
|
{
|
||||||
|
auto param_continuation = arg.substr(strlen("--param_"));
|
||||||
|
auto equal_pos = param_continuation.find_first_of('=');
|
||||||
|
|
||||||
|
if (equal_pos == std::string::npos)
|
||||||
|
{
|
||||||
|
/// param_name value
|
||||||
|
++arg_num;
|
||||||
|
if (arg_num >= argc)
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter requires value");
|
||||||
|
arg = argv[arg_num];
|
||||||
|
query_parameters.emplace(String(param_continuation), String(arg));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (equal_pos == 0)
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter name cannot be empty");
|
||||||
|
|
||||||
|
/// param_name=value
|
||||||
|
query_parameters.emplace(param_continuation.substr(0, equal_pos), param_continuation.substr(equal_pos + 1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (arg == "--multiquery" && (arg_num + 1) < argc && !std::string_view(argv[arg_num + 1]).starts_with('-'))
|
||||||
{
|
{
|
||||||
/// Transform the abbreviated syntax '--multiquery <SQL>' into the full syntax '--multiquery -q <SQL>'
|
/// Transform the abbreviated syntax '--multiquery <SQL>' into the full syntax '--multiquery -q <SQL>'
|
||||||
++arg_num;
|
++arg_num;
|
||||||
|
@ -2128,10 +2128,9 @@ void Server::createServers(
|
|||||||
{
|
{
|
||||||
const Settings & settings = global_context->getSettingsRef();
|
const Settings & settings = global_context->getSettingsRef();
|
||||||
|
|
||||||
Poco::Timespan keep_alive_timeout(config.getUInt("keep_alive_timeout", 10), 0);
|
|
||||||
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
|
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
|
||||||
http_params->setTimeout(settings.http_receive_timeout);
|
http_params->setTimeout(settings.http_receive_timeout);
|
||||||
http_params->setKeepAliveTimeout(keep_alive_timeout);
|
http_params->setKeepAliveTimeout(global_context->getServerSettings().keep_alive_timeout);
|
||||||
|
|
||||||
Poco::Util::AbstractConfiguration::Keys protocols;
|
Poco::Util::AbstractConfiguration::Keys protocols;
|
||||||
config.keys("protocols", protocols);
|
config.keys("protocols", protocols);
|
||||||
@ -2385,10 +2384,9 @@ void Server::createInterserverServers(
|
|||||||
{
|
{
|
||||||
const Settings & settings = global_context->getSettingsRef();
|
const Settings & settings = global_context->getSettingsRef();
|
||||||
|
|
||||||
Poco::Timespan keep_alive_timeout(config.getUInt("keep_alive_timeout", 10), 0);
|
|
||||||
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
|
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
|
||||||
http_params->setTimeout(settings.http_receive_timeout);
|
http_params->setTimeout(settings.http_receive_timeout);
|
||||||
http_params->setKeepAliveTimeout(keep_alive_timeout);
|
http_params->setKeepAliveTimeout(global_context->getServerSettings().keep_alive_timeout);
|
||||||
|
|
||||||
/// Now iterate over interserver_listen_hosts
|
/// Now iterate over interserver_listen_hosts
|
||||||
for (const auto & interserver_listen_host : interserver_listen_hosts)
|
for (const auto & interserver_listen_host : interserver_listen_hosts)
|
||||||
|
267
programs/server/binary.html
Normal file
267
programs/server/binary.html
Normal file
@ -0,0 +1,267 @@
|
|||||||
|
<!doctype html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<link rel="icon" href="data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSI1NCIgaGVpZ2h0PSI0OCIgdmlld0JveD0iMCAwIDkgOCI+PHN0eWxlPi5ve2ZpbGw6I2ZjMH0ucntmaWxsOnJlZH08L3N0eWxlPjxwYXRoIGQ9Ik0wLDcgaDEgdjEgaC0xIHoiIGNsYXNzPSJyIi8+PHBhdGggZD0iTTAsMCBoMSB2NyBoLTEgeiIgY2xhc3M9Im8iLz48cGF0aCBkPSJNMiwwIGgxIHY4IGgtMSB6IiBjbGFzcz0ibyIvPjxwYXRoIGQ9Ik00LDAgaDEgdjggaC0xIHoiIGNsYXNzPSJvIi8+PHBhdGggZD0iTTYsMCBoMSB2OCBoLTEgeiIgY2xhc3M9Im8iLz48cGF0aCBkPSJNOCwzLjI1IGgxIHYxLjUgaC0xIHoiIGNsYXNzPSJvIi8+PC9zdmc+">
|
||||||
|
<title>ClickHouse Binary Viewer</title>
|
||||||
|
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet/1.9.4/leaflet.min.css" />
|
||||||
|
<style type="text/css">
|
||||||
|
html, body, #space {
|
||||||
|
width: 100%;
|
||||||
|
height: 100%;
|
||||||
|
padding: 0;
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#space {
|
||||||
|
background: #111;
|
||||||
|
image-rendering: pixelated;
|
||||||
|
}
|
||||||
|
|
||||||
|
#error {
|
||||||
|
display: none;
|
||||||
|
position: absolute;
|
||||||
|
z-index: 1001;
|
||||||
|
bottom: max(5%, 1em);
|
||||||
|
left: 50%;
|
||||||
|
transform: translate(-50%, 0);
|
||||||
|
background: #300;
|
||||||
|
color: white;
|
||||||
|
font-family: monospace;
|
||||||
|
white-space: pre-wrap;
|
||||||
|
font-size: 16pt;
|
||||||
|
padding: 1em;
|
||||||
|
min-width: 50%;
|
||||||
|
max-width: 80%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.leaflet-fade-anim .leaflet-popup {
|
||||||
|
transition: none;
|
||||||
|
font-family: monospace;
|
||||||
|
}
|
||||||
|
|
||||||
|
.leaflet-control-attribution {
|
||||||
|
font-size: 12pt;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body>
|
||||||
|
<div id="space"></div>
|
||||||
|
<div id="error"></div>
|
||||||
|
|
||||||
|
<script src="https://cdnjs.cloudflare.com/ajax/libs/leaflet/1.9.4/leaflet.min.js"></script>
|
||||||
|
<script>
|
||||||
|
let host = 'http://localhost:8123/';
|
||||||
|
let user = 'default';
|
||||||
|
let password = '';
|
||||||
|
let add_http_cors_header = true;
|
||||||
|
|
||||||
|
/// If it is hosted on server, assume that it is the address of ClickHouse.
|
||||||
|
if (location.protocol != 'file:') {
|
||||||
|
host = location.origin;
|
||||||
|
user = 'default';
|
||||||
|
add_http_cors_header = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
let map = L.map('space', {
|
||||||
|
crs: L.CRS.Simple,
|
||||||
|
center: [-512, 512],
|
||||||
|
maxBounds: [[128, -128], [-1152, 1152]],
|
||||||
|
zoom: 0,
|
||||||
|
});
|
||||||
|
|
||||||
|
let cached_tiles = {};
|
||||||
|
|
||||||
|
async function render(coords, tile) {
|
||||||
|
const sql = `
|
||||||
|
WITH
|
||||||
|
bitShiftLeft(1::UInt64, 5 - {z:UInt8})::UInt64 AS zoom_factor,
|
||||||
|
|
||||||
|
number MOD 1024 AS tile_x,
|
||||||
|
number DIV 1024 AS tile_y,
|
||||||
|
|
||||||
|
(zoom_factor * (tile_x + {x:UInt16} * 1024))::UInt16 AS x,
|
||||||
|
(zoom_factor * (tile_y + {y:UInt16} * 1024))::UInt16 AS y,
|
||||||
|
|
||||||
|
mortonEncode(x, y) AS addr,
|
||||||
|
|
||||||
|
extract(demangle(addressToSymbol(addr)), '^[^<]+') AS name,
|
||||||
|
(empty(name) ? 0 : sipHash64(name)) AS hash,
|
||||||
|
hash MOD 256 AS r, hash DIV 256 MOD 256 AS g, hash DIV 65536 MOD 256 AS b
|
||||||
|
|
||||||
|
SELECT r::UInt8, g::UInt8, b::UInt8
|
||||||
|
FROM numbers_mt(1024*1024)
|
||||||
|
ORDER BY number`;
|
||||||
|
|
||||||
|
const key = `${coords.z}-${coords.x}-${coords.y}`;
|
||||||
|
let buf = cached_tiles[key];
|
||||||
|
if (!buf) {
|
||||||
|
let url = `${host}?default_format=RowBinary&allow_introspection_functions=1`;
|
||||||
|
|
||||||
|
if (add_http_cors_header) {
|
||||||
|
// For debug purposes, you may set add_http_cors_header from a browser console
|
||||||
|
url += '&add_http_cors_header=1';
|
||||||
|
}
|
||||||
|
|
||||||
|
if (user) {
|
||||||
|
url += `&user=${encodeURIComponent(user)}`;
|
||||||
|
}
|
||||||
|
if (password) {
|
||||||
|
url += `&password=${encodeURIComponent(password)}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
url += `¶m_z=${coords.z}¶m_x=${coords.x}¶m_y=${coords.y}`;
|
||||||
|
url += `&enable_http_compression=1&network_compression_method=zstd&network_zstd_compression_level=6`;
|
||||||
|
|
||||||
|
const response = await fetch(url, { method: 'POST', body: sql });
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const text = await response.text();
|
||||||
|
let err = document.getElementById('error');
|
||||||
|
err.textContent = text;
|
||||||
|
err.style.display = 'block';
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
buf = await response.arrayBuffer();
|
||||||
|
cached_tiles[key] = buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
let ctx = tile.getContext('2d');
|
||||||
|
let image = ctx.createImageData(1024, 1024);
|
||||||
|
let arr = new Uint8ClampedArray(buf);
|
||||||
|
|
||||||
|
for (let i = 0; i < 1024 * 1024; ++i) {
|
||||||
|
image.data[i * 4 + 0] = arr[i * 3 + 0];
|
||||||
|
image.data[i * 4 + 1] = arr[i * 3 + 1];
|
||||||
|
image.data[i * 4 + 2] = arr[i * 3 + 2];
|
||||||
|
image.data[i * 4 + 3] = 255;
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.putImageData(image, 0, 0, 0, 0, 1024, 1024);
|
||||||
|
|
||||||
|
let err = document.getElementById('error');
|
||||||
|
err.style.display = 'none';
|
||||||
|
}
|
||||||
|
|
||||||
|
L.GridLayer.ClickHouse = L.GridLayer.extend({
|
||||||
|
createTile: function(coords, done) {
|
||||||
|
let tile = L.DomUtil.create('canvas', 'leaflet-tile');
|
||||||
|
tile.width = 1024;
|
||||||
|
tile.height = 1024;
|
||||||
|
if (coords.x < 0 || coords.y < 0 || coords.x >= Math.pow(2, coords.z) || coords.y >= Math.pow(2, coords.z)) return tile;
|
||||||
|
render(coords, tile).then(err => done(err, tile));
|
||||||
|
return tile;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let layer = new L.GridLayer.ClickHouse({
|
||||||
|
tileSize: 1024,
|
||||||
|
minZoom: 0,
|
||||||
|
maxZoom: 10,
|
||||||
|
minNativeZoom: 0,
|
||||||
|
maxNativeZoom: 5,
|
||||||
|
attribution: '© ClickHouse, Inc.'
|
||||||
|
});
|
||||||
|
|
||||||
|
layer.addTo(map);
|
||||||
|
|
||||||
|
map.attributionControl.setPrefix('<a href="https://github.com/ClickHouse/ClickHouse/">About</a>');
|
||||||
|
|
||||||
|
function latLngToPixel(latlng) {
|
||||||
|
return { x: ((latlng.lng / 1024) * 32768)|0, y: ((-latlng.lat / 1024) * 32768)|0 };
|
||||||
|
}
|
||||||
|
|
||||||
|
function pixelToLatLng(pixel) {
|
||||||
|
return { lat: (-pixel.y - 0.5) / 32768 * 1024, lng: (pixel.x + 0.5) / 32768 * 1024 };
|
||||||
|
}
|
||||||
|
|
||||||
|
let popup = L.popup({maxWidth: '100%'});
|
||||||
|
let current_requested_addr = '';
|
||||||
|
|
||||||
|
function updateHistory() {
|
||||||
|
const state = {
|
||||||
|
zoom: map.getZoom(),
|
||||||
|
center: latLngToPixel(map.getCenter()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let query = `?zoom=${state.zoom}&x=${state.center.x}&y=${state.center.y}`;
|
||||||
|
|
||||||
|
if (popup.isOpen() && map.getBounds().contains(popup.getLatLng())) {
|
||||||
|
state.popup = latLngToPixel(popup.getLatLng());
|
||||||
|
query += `&px=${state.popup.x}&py=${state.popup.y}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
history.replaceState(state, '', query);
|
||||||
|
}
|
||||||
|
|
||||||
|
window.onpopstate = function(event) {
|
||||||
|
const state = event.state;
|
||||||
|
if (!state) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
map.setView(pixelToLatLng(state.center), state.zoom);
|
||||||
|
|
||||||
|
if (state.popup) {
|
||||||
|
showPopup(state.popup.x, state.popup.y);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (window.location.search) {
|
||||||
|
const params = new URLSearchParams(window.location.search);
|
||||||
|
|
||||||
|
map.setView(pixelToLatLng({x: params.get('x')|0, y: params.get('y')|0}), params.get('zoom'));
|
||||||
|
|
||||||
|
if (params.get('px') !== null && params.get('py') !== null) {
|
||||||
|
showPopup(params.get('px')|0, params.get('py')|0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function showPopup(x, y) {
|
||||||
|
const xn = BigInt(x);
|
||||||
|
const yn = BigInt(y);
|
||||||
|
let addr_int = 0n;
|
||||||
|
for (let bit = 0n; bit < 16n; ++bit) {
|
||||||
|
addr_int |= ((xn >> bit) & 1n) << (bit * 2n);
|
||||||
|
addr_int |= ((yn >> bit) & 1n) << (1n + bit * 2n);
|
||||||
|
}
|
||||||
|
|
||||||
|
current_requested_addr = addr_int;
|
||||||
|
|
||||||
|
const addr_hex = '0x' + addr_int.toString(16);
|
||||||
|
const response = fetch(
|
||||||
|
`http://localhost:8123/?default_format=JSON`,
|
||||||
|
{
|
||||||
|
method: 'POST',
|
||||||
|
body: `SELECT encodeXMLComponent(demangle(addressToSymbol(${addr_int}::UInt64))) AS name,
|
||||||
|
encodeXMLComponent(addressToLine(${addr_int}::UInt64)) AS line`
|
||||||
|
}).then(response => response.json().then(o => {
|
||||||
|
|
||||||
|
let name = o.rows ? o.data[0].name : 'nothing';
|
||||||
|
let line = o.rows ? o.data[0].line : '';
|
||||||
|
|
||||||
|
if (addr_int == current_requested_addr) {
|
||||||
|
popup.setContent(`<p><b>${addr_hex}</b></p><p>${name}</p><p>${line}</p>`);
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
|
popup
|
||||||
|
.setLatLng(pixelToLatLng({x: x, y: y}))
|
||||||
|
.setContent(addr_hex)
|
||||||
|
.openOn(map);
|
||||||
|
}
|
||||||
|
|
||||||
|
map.on('click', e => {
|
||||||
|
const {x, y} = latLngToPixel(e.latlng);
|
||||||
|
if (x < 0 || x >= 32768 || y < 0 || y >= 32768) return;
|
||||||
|
|
||||||
|
showPopup(x, y);
|
||||||
|
updateHistory();
|
||||||
|
});
|
||||||
|
|
||||||
|
map.on('moveend', e => updateHistory());
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
@ -965,12 +965,10 @@ document.getElementById('mass-editor-textarea').addEventListener('input', e => {
|
|||||||
|
|
||||||
function legendAsTooltipPlugin({ className, style = { background: "var(--legend-background)" } } = {}) {
|
function legendAsTooltipPlugin({ className, style = { background: "var(--legend-background)" } } = {}) {
|
||||||
let legendEl;
|
let legendEl;
|
||||||
let showTop = false;
|
let multiline;
|
||||||
const showLimit = 5;
|
|
||||||
|
|
||||||
function init(u, opts) {
|
function init(u, opts) {
|
||||||
legendEl = u.root.querySelector(".u-legend");
|
legendEl = u.root.querySelector(".u-legend");
|
||||||
|
|
||||||
legendEl.classList.remove("u-inline");
|
legendEl.classList.remove("u-inline");
|
||||||
className && legendEl.classList.add(className);
|
className && legendEl.classList.add(className);
|
||||||
|
|
||||||
@ -986,18 +984,19 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend-
|
|||||||
...style
|
...style
|
||||||
});
|
});
|
||||||
|
|
||||||
if (opts.series.length == 2) {
|
|
||||||
const nodes = legendEl.querySelectorAll("th");
|
const nodes = legendEl.querySelectorAll("th");
|
||||||
|
for (let i = 0; i < nodes.length; i++)
|
||||||
|
nodes[i]._order = i;
|
||||||
|
|
||||||
|
if (opts.series.length == 2) {
|
||||||
|
multiline = false;
|
||||||
for (let i = 0; i < nodes.length; i++)
|
for (let i = 0; i < nodes.length; i++)
|
||||||
nodes[i].style.display = "none";
|
nodes[i].style.display = "none";
|
||||||
} else {
|
} else {
|
||||||
|
multiline = true;
|
||||||
legendEl.querySelector("th").remove();
|
legendEl.querySelector("th").remove();
|
||||||
legendEl.querySelector("td").setAttribute('colspan', '2');
|
legendEl.querySelector("td").setAttribute('colspan', '2');
|
||||||
legendEl.querySelector("td").style.textAlign = 'center';
|
legendEl.querySelector("td").style.textAlign = 'center';
|
||||||
}
|
|
||||||
|
|
||||||
if (opts.series.length - 1 > showLimit) {
|
|
||||||
showTop = true;
|
|
||||||
let footer = legendEl.insertRow().insertCell();
|
let footer = legendEl.insertRow().insertCell();
|
||||||
footer.setAttribute('colspan', '2');
|
footer.setAttribute('colspan', '2');
|
||||||
footer.style.textAlign = 'center';
|
footer.style.textAlign = 'center';
|
||||||
@ -1024,18 +1023,20 @@ function legendAsTooltipPlugin({ className, style = { background: "var(--legend-
|
|||||||
left -= legendEl.clientWidth / 2;
|
left -= legendEl.clientWidth / 2;
|
||||||
top -= legendEl.clientHeight / 2;
|
top -= legendEl.clientHeight / 2;
|
||||||
legendEl.style.transform = "translate(" + left + "px, " + top + "px)";
|
legendEl.style.transform = "translate(" + left + "px, " + top + "px)";
|
||||||
if (showTop) {
|
|
||||||
|
if (multiline) {
|
||||||
let nodes = nodeListToArray(legendEl.querySelectorAll("tr"));
|
let nodes = nodeListToArray(legendEl.querySelectorAll("tr"));
|
||||||
let header = nodes.shift();
|
let header = nodes.shift();
|
||||||
let footer = nodes.pop();
|
let footer = nodes.pop();
|
||||||
nodes.forEach(function (node) { node._sort_key = +node.querySelector("td").textContent; });
|
let showLimit = Math.floor(u.height / 30);
|
||||||
nodes.sort((a, b) => +b._sort_key - +a._sort_key);
|
nodes.forEach(function (node) { node._sort_key = nodes.length > showLimit ? +node.querySelector("td").textContent.replace(/,/g,'') : node._order; });
|
||||||
|
nodes.sort((a, b) => b._sort_key - a._sort_key);
|
||||||
nodes.forEach(function (node) { node.parentNode.appendChild(node); });
|
nodes.forEach(function (node) { node.parentNode.appendChild(node); });
|
||||||
for (let i = 0; i < nodes.length; i++) {
|
for (let i = 0; i < nodes.length; i++) {
|
||||||
nodes[i].style.display = i < showLimit ? null : "none";
|
nodes[i].style.display = i < showLimit ? null : "none";
|
||||||
delete nodes[i]._sort_key;
|
|
||||||
}
|
}
|
||||||
footer.parentNode.appendChild(footer);
|
footer.parentNode.appendChild(footer);
|
||||||
|
footer.style.display = nodes.length > showLimit ? null : "none";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
clickhouse_import_crate(MANIFEST_PATH Cargo.toml)
|
|
||||||
target_include_directories(_ch_rust_blake3 INTERFACE include)
|
|
||||||
add_library(ch_rust::blake3 ALIAS _ch_rust_blake3)
|
|
@ -1,20 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "_ch_rust_blake3"
|
|
||||||
version = "0.1.0"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
blake3 = "1.2.0"
|
|
||||||
libc = "0.2.132"
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
crate-type = ["staticlib"]
|
|
||||||
|
|
||||||
[profile.release]
|
|
||||||
debug = true
|
|
||||||
|
|
||||||
[profile.release-thinlto]
|
|
||||||
inherits = "release"
|
|
||||||
# BLAKE3 module requires "full" LTO (not "thin") to get additional 10% performance benefit
|
|
||||||
lto = true
|
|
@ -1,15 +0,0 @@
|
|||||||
#ifndef BLAKE3_H
|
|
||||||
#define BLAKE3_H
|
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
|
|
||||||
char *blake3_apply_shim(const char *begin, uint32_t _size, uint8_t *out_char_data);
|
|
||||||
|
|
||||||
void blake3_free_char_pointer(char *ptr_to_free);
|
|
||||||
|
|
||||||
} // extern "C"
|
|
||||||
|
|
||||||
#endif /* BLAKE3_H */
|
|
@ -1,31 +0,0 @@
|
|||||||
extern crate blake3;
|
|
||||||
extern crate libc;
|
|
||||||
|
|
||||||
use std::ffi::{CString};
|
|
||||||
use std::slice;
|
|
||||||
use std::os::raw::c_char;
|
|
||||||
|
|
||||||
#[no_mangle]
|
|
||||||
pub unsafe extern "C" fn blake3_apply_shim(
|
|
||||||
begin: *const c_char,
|
|
||||||
size: u32,
|
|
||||||
out_char_data: *mut u8,
|
|
||||||
) -> *mut c_char {
|
|
||||||
if begin.is_null() {
|
|
||||||
let err_str = CString::new("input was a null pointer").unwrap();
|
|
||||||
return err_str.into_raw();
|
|
||||||
}
|
|
||||||
let input_res = slice::from_raw_parts(begin as *const u8, size as usize);
|
|
||||||
let mut hasher = blake3::Hasher::new();
|
|
||||||
hasher.update(input_res);
|
|
||||||
let mut reader = hasher.finalize_xof();
|
|
||||||
|
|
||||||
reader.fill(std::slice::from_raw_parts_mut(out_char_data, blake3::OUT_LEN));
|
|
||||||
std::ptr::null_mut()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Freeing memory according to docs: https://doc.rust-lang.org/std/ffi/struct.CString.html#method.into_raw
|
|
||||||
#[no_mangle]
|
|
||||||
pub unsafe extern "C" fn blake3_free_char_pointer(ptr_to_free: *mut c_char) {
|
|
||||||
std::mem::drop(CString::from_raw(ptr_to_free));
|
|
||||||
}
|
|
@ -99,6 +99,5 @@ function(add_rust_subdirectory src)
|
|||||||
VERBATIM)
|
VERBATIM)
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
add_rust_subdirectory (BLAKE3)
|
|
||||||
add_rust_subdirectory (skim)
|
add_rust_subdirectory (skim)
|
||||||
add_rust_subdirectory (prql)
|
add_rust_subdirectory (prql)
|
||||||
|
683
rust/Cargo.lock
generated
683
rust/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,6 @@
|
|||||||
# workspace is required to vendor crates for all packages.
|
# workspace is required to vendor crates for all packages.
|
||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
"BLAKE3",
|
|
||||||
"skim",
|
"skim",
|
||||||
"prql",
|
"prql",
|
||||||
]
|
]
|
||||||
|
@ -51,10 +51,10 @@ void AggregateFunctionFactory::registerFunction(const String & name, Value creat
|
|||||||
void AggregateFunctionFactory::registerNullsActionTransformation(const String & source_ignores_nulls, const String & target_respect_nulls)
|
void AggregateFunctionFactory::registerNullsActionTransformation(const String & source_ignores_nulls, const String & target_respect_nulls)
|
||||||
{
|
{
|
||||||
if (!aggregate_functions.contains(source_ignores_nulls))
|
if (!aggregate_functions.contains(source_ignores_nulls))
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "registerNullsActionTransformation: Source aggregation '{}' not found");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "registerNullsActionTransformation: Source aggregation '{}' not found", source_ignores_nulls);
|
||||||
|
|
||||||
if (!aggregate_functions.contains(target_respect_nulls))
|
if (!aggregate_functions.contains(target_respect_nulls))
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "registerNullsActionTransformation: Target aggregation '{}' not found");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "registerNullsActionTransformation: Target aggregation '{}' not found", target_respect_nulls);
|
||||||
|
|
||||||
if (!respect_nulls.emplace(source_ignores_nulls, target_respect_nulls).second)
|
if (!respect_nulls.emplace(source_ignores_nulls, target_respect_nulls).second)
|
||||||
throw Exception(
|
throw Exception(
|
||||||
|
@ -197,7 +197,7 @@ public:
|
|||||||
virtual void insertMergeResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const
|
virtual void insertMergeResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena * arena) const
|
||||||
{
|
{
|
||||||
if (isState())
|
if (isState())
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} is marked as State but method insertMergeResultInto is not implemented");
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} is marked as State but method insertMergeResultInto is not implemented", getName());
|
||||||
|
|
||||||
insertResultInto(place, to, arena);
|
insertResultInto(place, to, arena);
|
||||||
}
|
}
|
||||||
|
@ -536,7 +536,8 @@ CNF CNF::toCNF(const QueryTreeNodePtr & node, ContextPtr context, size_t max_gro
|
|||||||
if (!cnf)
|
if (!cnf)
|
||||||
throw Exception(ErrorCodes::TOO_MANY_TEMPORARY_COLUMNS,
|
throw Exception(ErrorCodes::TOO_MANY_TEMPORARY_COLUMNS,
|
||||||
"Cannot convert expression '{}' to CNF, because it produces to many clauses."
|
"Cannot convert expression '{}' to CNF, because it produces to many clauses."
|
||||||
"Size of boolean formula in CNF can be exponential of size of source formula.");
|
"Size of boolean formula in CNF can be exponential of size of source formula.",
|
||||||
|
node->formatConvertedASTForErrorMessage());
|
||||||
|
|
||||||
return *cnf;
|
return *cnf;
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <Analyzer/FunctionNode.h>
|
#include <Analyzer/FunctionNode.h>
|
||||||
#include <Analyzer/IQueryTreeNode.h>
|
#include <Analyzer/IQueryTreeNode.h>
|
||||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
|
#include <Analyzer/Utils.h>
|
||||||
|
|
||||||
#include <DataTypes/DataTypeArray.h>
|
#include <DataTypes/DataTypeArray.h>
|
||||||
#include <DataTypes/DataTypeEnum.h>
|
#include <DataTypes/DataTypeEnum.h>
|
||||||
@ -41,22 +42,6 @@ DataTypePtr getEnumType(const std::set<std::string> & string_values)
|
|||||||
return getDataEnumType<DataTypeEnum8>(string_values);
|
return getDataEnumType<DataTypeEnum8>(string_values);
|
||||||
}
|
}
|
||||||
|
|
||||||
QueryTreeNodePtr createCastFunction(QueryTreeNodePtr from, DataTypePtr result_type, ContextPtr context)
|
|
||||||
{
|
|
||||||
auto enum_literal = std::make_shared<ConstantValue>(result_type->getName(), std::make_shared<DataTypeString>());
|
|
||||||
auto enum_literal_node = std::make_shared<ConstantNode>(std::move(enum_literal));
|
|
||||||
|
|
||||||
auto cast_function = FunctionFactory::instance().get("_CAST", std::move(context));
|
|
||||||
QueryTreeNodes arguments{ std::move(from), std::move(enum_literal_node) };
|
|
||||||
|
|
||||||
auto function_node = std::make_shared<FunctionNode>("_CAST");
|
|
||||||
function_node->getArguments().getNodes() = std::move(arguments);
|
|
||||||
|
|
||||||
function_node->resolveAsFunction(cast_function->build(function_node->getArgumentColumns()));
|
|
||||||
|
|
||||||
return function_node;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// if(arg1, arg2, arg3) will be transformed to if(arg1, _CAST(arg2, Enum...), _CAST(arg3, Enum...))
|
/// if(arg1, arg2, arg3) will be transformed to if(arg1, _CAST(arg2, Enum...), _CAST(arg3, Enum...))
|
||||||
/// where Enum is generated based on the possible values stored in string_values
|
/// where Enum is generated based on the possible values stored in string_values
|
||||||
void changeIfArguments(
|
void changeIfArguments(
|
||||||
|
@ -9,6 +9,8 @@
|
|||||||
#include <Analyzer/HashUtils.h>
|
#include <Analyzer/HashUtils.h>
|
||||||
#include <Analyzer/Utils.h>
|
#include <Analyzer/Utils.h>
|
||||||
|
|
||||||
|
#include <DataTypes/DataTypeLowCardinality.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -323,8 +325,21 @@ private:
|
|||||||
/// Because we reduce the number of operands here by eliminating the same equality checks,
|
/// Because we reduce the number of operands here by eliminating the same equality checks,
|
||||||
/// the only situation we can end up here is we had AND check where all the equality checks are the same so we know the type is UInt8.
|
/// the only situation we can end up here is we had AND check where all the equality checks are the same so we know the type is UInt8.
|
||||||
/// Otherwise, we will have > 1 operands and we don't have to do anything.
|
/// Otherwise, we will have > 1 operands and we don't have to do anything.
|
||||||
assert(!function_node.getResultType()->isNullable() && and_operands[0]->getResultType()->equals(*function_node.getResultType()));
|
|
||||||
|
auto operand_type = and_operands[0]->getResultType();
|
||||||
|
auto function_type = function_node.getResultType();
|
||||||
|
assert(!function_type->isNullable());
|
||||||
|
if (!function_type->equals(*operand_type))
|
||||||
|
{
|
||||||
|
/// Result of equality operator can be low cardinality, while AND always returns UInt8.
|
||||||
|
/// In that case we replace `(lc = 1) AND (lc = 1)` with `(lc = 1) AS UInt8`
|
||||||
|
assert(function_type->equals(*removeLowCardinality(operand_type)));
|
||||||
|
node = createCastFunction(std::move(and_operands[0]), function_type, getContext());
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
node = std::move(and_operands[0]);
|
node = std::move(and_operands[0]);
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -389,11 +404,14 @@ private:
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool is_any_nullable = false;
|
||||||
Tuple args;
|
Tuple args;
|
||||||
args.reserve(equals_functions.size());
|
args.reserve(equals_functions.size());
|
||||||
/// first we create tuple from RHS of equals functions
|
/// first we create tuple from RHS of equals functions
|
||||||
for (const auto & equals : equals_functions)
|
for (const auto & equals : equals_functions)
|
||||||
{
|
{
|
||||||
|
is_any_nullable |= equals->getResultType()->isNullable();
|
||||||
|
|
||||||
const auto * equals_function = equals->as<FunctionNode>();
|
const auto * equals_function = equals->as<FunctionNode>();
|
||||||
assert(equals_function && equals_function->getFunctionName() == "equals");
|
assert(equals_function && equals_function->getFunctionName() == "equals");
|
||||||
|
|
||||||
@ -421,9 +439,21 @@ private:
|
|||||||
|
|
||||||
in_function->getArguments().getNodes() = std::move(in_arguments);
|
in_function->getArguments().getNodes() = std::move(in_arguments);
|
||||||
in_function->resolveAsFunction(in_function_resolver);
|
in_function->resolveAsFunction(in_function_resolver);
|
||||||
|
/** For `k :: UInt8`, expression `k = 1 OR k = NULL` with result type Nullable(UInt8)
|
||||||
|
* is replaced with `k IN (1, NULL)` with result type UInt8.
|
||||||
|
* Convert it back to Nullable(UInt8).
|
||||||
|
*/
|
||||||
|
if (is_any_nullable && !in_function->getResultType()->isNullable())
|
||||||
|
{
|
||||||
|
auto nullable_result_type = std::make_shared<DataTypeNullable>(in_function->getResultType());
|
||||||
|
auto in_function_nullable = createCastFunction(std::move(in_function), std::move(nullable_result_type), getContext());
|
||||||
|
or_operands.push_back(std::move(in_function_nullable));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
or_operands.push_back(std::move(in_function));
|
or_operands.push_back(std::move(in_function));
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (or_operands.size() == function_node.getArguments().getNodes().size())
|
if (or_operands.size() == function_node.getArguments().getNodes().size())
|
||||||
return;
|
return;
|
||||||
|
@ -119,6 +119,7 @@ namespace ErrorCodes
|
|||||||
extern const int NUMBER_OF_COLUMNS_DOESNT_MATCH;
|
extern const int NUMBER_OF_COLUMNS_DOESNT_MATCH;
|
||||||
extern const int FUNCTION_CANNOT_HAVE_PARAMETERS;
|
extern const int FUNCTION_CANNOT_HAVE_PARAMETERS;
|
||||||
extern const int SYNTAX_ERROR;
|
extern const int SYNTAX_ERROR;
|
||||||
|
extern const int UNEXPECTED_EXPRESSION;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Query analyzer implementation overview. Please check documentation in QueryAnalysisPass.h first.
|
/** Query analyzer implementation overview. Please check documentation in QueryAnalysisPass.h first.
|
||||||
@ -1209,6 +1210,8 @@ private:
|
|||||||
|
|
||||||
static void expandGroupByAll(QueryNode & query_tree_node_typed);
|
static void expandGroupByAll(QueryNode & query_tree_node_typed);
|
||||||
|
|
||||||
|
static void expandOrderByAll(QueryNode & query_tree_node_typed);
|
||||||
|
|
||||||
static std::string
|
static std::string
|
||||||
rewriteAggregateFunctionNameIfNeeded(const std::string & aggregate_function_name, NullsAction action, const ContextPtr & context);
|
rewriteAggregateFunctionNameIfNeeded(const std::string & aggregate_function_name, NullsAction action, const ContextPtr & context);
|
||||||
|
|
||||||
@ -2312,6 +2315,35 @@ void QueryAnalyzer::expandGroupByAll(QueryNode & query_tree_node_typed)
|
|||||||
recursivelyCollectMaxOrdinaryExpressions(node, group_by_nodes);
|
recursivelyCollectMaxOrdinaryExpressions(node, group_by_nodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void QueryAnalyzer::expandOrderByAll(QueryNode & query_tree_node_typed)
|
||||||
|
{
|
||||||
|
auto * all_node = query_tree_node_typed.getOrderBy().getNodes()[0]->as<SortNode>();
|
||||||
|
if (!all_node)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Select analyze for not sort node.");
|
||||||
|
|
||||||
|
auto & projection_nodes = query_tree_node_typed.getProjection().getNodes();
|
||||||
|
auto list_node = std::make_shared<ListNode>();
|
||||||
|
list_node->getNodes().reserve(projection_nodes.size());
|
||||||
|
|
||||||
|
for (auto & node : projection_nodes)
|
||||||
|
{
|
||||||
|
if (auto * identifier_node = node->as<IdentifierNode>(); identifier_node != nullptr)
|
||||||
|
if (Poco::toUpper(identifier_node->getIdentifier().getFullName()) == "ALL" || Poco::toUpper(identifier_node->getAlias()) == "ALL")
|
||||||
|
throw Exception(ErrorCodes::UNEXPECTED_EXPRESSION,
|
||||||
|
"Cannot use ORDER BY ALL to sort a column with name 'all', please disable setting `enable_order_by_all` and try again");
|
||||||
|
|
||||||
|
if (auto * function_node = node->as<FunctionNode>(); function_node != nullptr)
|
||||||
|
if (Poco::toUpper(function_node->getAlias()) == "ALL")
|
||||||
|
throw Exception(ErrorCodes::UNEXPECTED_EXPRESSION,
|
||||||
|
"Cannot use ORDER BY ALL to sort a column with name 'all', please disable setting `enable_order_by_all` and try again");
|
||||||
|
|
||||||
|
auto sort_node = std::make_shared<SortNode>(node, all_node->getSortDirection(), all_node->getNullsSortDirection());
|
||||||
|
list_node->getNodes().push_back(sort_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
query_tree_node_typed.getOrderByNode() = list_node;
|
||||||
|
}
|
||||||
|
|
||||||
std::string QueryAnalyzer::rewriteAggregateFunctionNameIfNeeded(
|
std::string QueryAnalyzer::rewriteAggregateFunctionNameIfNeeded(
|
||||||
const std::string & aggregate_function_name, NullsAction action, const ContextPtr & context)
|
const std::string & aggregate_function_name, NullsAction action, const ContextPtr & context)
|
||||||
{
|
{
|
||||||
@ -6975,6 +7007,9 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
|||||||
if (query_node_typed.hasHaving() && query_node_typed.isGroupByWithTotals() && is_rollup_or_cube)
|
if (query_node_typed.hasHaving() && query_node_typed.isGroupByWithTotals() && is_rollup_or_cube)
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "WITH TOTALS and WITH ROLLUP or CUBE are not supported together in presence of HAVING");
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "WITH TOTALS and WITH ROLLUP or CUBE are not supported together in presence of HAVING");
|
||||||
|
|
||||||
|
if (settings.enable_order_by_all && query_node_typed.isOrderByAll())
|
||||||
|
expandOrderByAll(query_node_typed);
|
||||||
|
|
||||||
/// Initialize aliases in query node scope
|
/// Initialize aliases in query node scope
|
||||||
QueryExpressionsAliasVisitor visitor(scope);
|
QueryExpressionsAliasVisitor visitor(scope);
|
||||||
|
|
||||||
|
@ -219,6 +219,18 @@ public:
|
|||||||
is_group_by_all = is_group_by_all_value;
|
is_group_by_all = is_group_by_all_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns true, if query node has ORDER BY ALL modifier, false otherwise
|
||||||
|
bool isOrderByAll() const
|
||||||
|
{
|
||||||
|
return is_order_by_all;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set query node ORDER BY ALL modifier value
|
||||||
|
void setIsOrderByAll(bool is_order_by_all_value)
|
||||||
|
{
|
||||||
|
is_order_by_all = is_order_by_all_value;
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns true if query node WITH section is not empty, false otherwise
|
/// Returns true if query node WITH section is not empty, false otherwise
|
||||||
bool hasWith() const
|
bool hasWith() const
|
||||||
{
|
{
|
||||||
@ -590,6 +602,7 @@ private:
|
|||||||
bool is_group_by_with_cube = false;
|
bool is_group_by_with_cube = false;
|
||||||
bool is_group_by_with_grouping_sets = false;
|
bool is_group_by_with_grouping_sets = false;
|
||||||
bool is_group_by_all = false;
|
bool is_group_by_all = false;
|
||||||
|
bool is_order_by_all = false;
|
||||||
|
|
||||||
std::string cte_name;
|
std::string cte_name;
|
||||||
NamesAndTypes projection_columns;
|
NamesAndTypes projection_columns;
|
||||||
|
@ -284,6 +284,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildSelectExpression(const ASTPtr & select_q
|
|||||||
current_query_tree->setIsGroupByWithRollup(select_query_typed.group_by_with_rollup);
|
current_query_tree->setIsGroupByWithRollup(select_query_typed.group_by_with_rollup);
|
||||||
current_query_tree->setIsGroupByWithGroupingSets(select_query_typed.group_by_with_grouping_sets);
|
current_query_tree->setIsGroupByWithGroupingSets(select_query_typed.group_by_with_grouping_sets);
|
||||||
current_query_tree->setIsGroupByAll(select_query_typed.group_by_all);
|
current_query_tree->setIsGroupByAll(select_query_typed.group_by_all);
|
||||||
|
current_query_tree->setIsOrderByAll(select_query_typed.order_by_all);
|
||||||
current_query_tree->setOriginalAST(select_query);
|
current_query_tree->setOriginalAST(select_query);
|
||||||
|
|
||||||
auto current_context = current_query_tree->getContext();
|
auto current_context = current_query_tree->getContext();
|
||||||
|
@ -667,4 +667,20 @@ NameSet collectIdentifiersFullNames(const QueryTreeNodePtr & node)
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
QueryTreeNodePtr createCastFunction(QueryTreeNodePtr node, DataTypePtr result_type, ContextPtr context)
|
||||||
|
{
|
||||||
|
auto enum_literal = std::make_shared<ConstantValue>(result_type->getName(), std::make_shared<DataTypeString>());
|
||||||
|
auto enum_literal_node = std::make_shared<ConstantNode>(std::move(enum_literal));
|
||||||
|
|
||||||
|
auto cast_function = FunctionFactory::instance().get("_CAST", std::move(context));
|
||||||
|
QueryTreeNodes arguments{ std::move(node), std::move(enum_literal_node) };
|
||||||
|
|
||||||
|
auto function_node = std::make_shared<FunctionNode>("_CAST");
|
||||||
|
function_node->getArguments().getNodes() = std::move(arguments);
|
||||||
|
|
||||||
|
function_node->resolveAsFunction(cast_function->build(function_node->getArgumentColumns()));
|
||||||
|
|
||||||
|
return function_node;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -99,4 +99,7 @@ void rerunFunctionResolve(FunctionNode * function_node, ContextPtr context);
|
|||||||
/// Just collect all identifiers from query tree
|
/// Just collect all identifiers from query tree
|
||||||
NameSet collectIdentifiersFullNames(const QueryTreeNodePtr & node);
|
NameSet collectIdentifiersFullNames(const QueryTreeNodePtr & node);
|
||||||
|
|
||||||
|
/// Wrap node into `_CAST` function
|
||||||
|
QueryTreeNodePtr createCastFunction(QueryTreeNodePtr node, DataTypePtr result_type, ContextPtr context);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -88,15 +88,16 @@ BackupEntriesCollector::BackupEntriesCollector(
|
|||||||
, read_settings(read_settings_)
|
, read_settings(read_settings_)
|
||||||
, context(context_)
|
, context(context_)
|
||||||
, on_cluster_first_sync_timeout(context->getConfigRef().getUInt64("backups.on_cluster_first_sync_timeout", 180000))
|
, on_cluster_first_sync_timeout(context->getConfigRef().getUInt64("backups.on_cluster_first_sync_timeout", 180000))
|
||||||
, collect_metadata_timeout(context->getConfigRef().getUInt64("backups.collect_metadata_timeout", context->getConfigRef().getUInt64("backups.consistent_metadata_snapshot_timeout", 600000)))
|
, collect_metadata_timeout(context->getConfigRef().getUInt64(
|
||||||
|
"backups.collect_metadata_timeout", context->getConfigRef().getUInt64("backups.consistent_metadata_snapshot_timeout", 600000)))
|
||||||
, attempts_to_collect_metadata_before_sleep(context->getConfigRef().getUInt("backups.attempts_to_collect_metadata_before_sleep", 2))
|
, attempts_to_collect_metadata_before_sleep(context->getConfigRef().getUInt("backups.attempts_to_collect_metadata_before_sleep", 2))
|
||||||
, min_sleep_before_next_attempt_to_collect_metadata(context->getConfigRef().getUInt64("backups.min_sleep_before_next_attempt_to_collect_metadata", 100))
|
, min_sleep_before_next_attempt_to_collect_metadata(
|
||||||
, max_sleep_before_next_attempt_to_collect_metadata(context->getConfigRef().getUInt64("backups.max_sleep_before_next_attempt_to_collect_metadata", 5000))
|
context->getConfigRef().getUInt64("backups.min_sleep_before_next_attempt_to_collect_metadata", 100))
|
||||||
|
, max_sleep_before_next_attempt_to_collect_metadata(
|
||||||
|
context->getConfigRef().getUInt64("backups.max_sleep_before_next_attempt_to_collect_metadata", 5000))
|
||||||
, compare_collected_metadata(context->getConfigRef().getBool("backups.compare_collected_metadata", true))
|
, compare_collected_metadata(context->getConfigRef().getBool("backups.compare_collected_metadata", true))
|
||||||
, log(&Poco::Logger::get("BackupEntriesCollector"))
|
, log(&Poco::Logger::get("BackupEntriesCollector"))
|
||||||
, global_zookeeper_retries_info(
|
, global_zookeeper_retries_info(
|
||||||
"BackupEntriesCollector",
|
|
||||||
log,
|
|
||||||
context->getSettingsRef().backup_restore_keeper_max_retries,
|
context->getSettingsRef().backup_restore_keeper_max_retries,
|
||||||
context->getSettingsRef().backup_restore_keeper_retry_initial_backoff_ms,
|
context->getSettingsRef().backup_restore_keeper_retry_initial_backoff_ms,
|
||||||
context->getSettingsRef().backup_restore_keeper_retry_max_backoff_ms)
|
context->getSettingsRef().backup_restore_keeper_retry_max_backoff_ms)
|
||||||
@ -572,7 +573,7 @@ std::vector<std::pair<ASTPtr, StoragePtr>> BackupEntriesCollector::findTablesInD
|
|||||||
{
|
{
|
||||||
/// Database or table could be replicated - so may use ZooKeeper. We need to retry.
|
/// Database or table could be replicated - so may use ZooKeeper. We need to retry.
|
||||||
auto zookeeper_retries_info = global_zookeeper_retries_info;
|
auto zookeeper_retries_info = global_zookeeper_retries_info;
|
||||||
ZooKeeperRetriesControl retries_ctl("getTablesForBackup", zookeeper_retries_info, nullptr);
|
ZooKeeperRetriesControl retries_ctl("getTablesForBackup", log, zookeeper_retries_info, nullptr);
|
||||||
retries_ctl.retryLoop([&](){ db_tables = database->getTablesForBackup(filter_by_table_name, context); });
|
retries_ctl.retryLoop([&](){ db_tables = database->getTablesForBackup(filter_by_table_name, context); });
|
||||||
}
|
}
|
||||||
catch (Exception & e)
|
catch (Exception & e)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user