mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-28 02:21:59 +00:00
Compare commits
45 Commits
ce3d4203c8
...
715d7ac31b
Author | SHA1 | Date | |
---|---|---|---|
|
715d7ac31b | ||
|
de13b819f0 | ||
|
558f639f2a | ||
|
d8de6d1dce | ||
|
559ea73045 | ||
|
e192d6c558 | ||
|
16f3447ef9 | ||
|
208e96f360 | ||
|
8de4ecf513 | ||
|
9602e338ad | ||
|
f3cd9d4c72 | ||
|
904be0a2e8 | ||
|
8baa1bea43 | ||
|
cb582abbb5 | ||
|
52391a8271 | ||
|
c30f8aecd5 | ||
|
559c4c0b17 | ||
|
6c7cde5702 | ||
|
34bf8d3b2c | ||
|
ff6f10843f | ||
|
4ac9aa255b | ||
|
0ca97d3036 | ||
|
26de447039 | ||
|
844513b1d0 | ||
|
93dc65ad88 | ||
|
c3a32cc215 | ||
|
ebcce35e8c | ||
|
0d65fbf0e2 | ||
|
ed45abbc1a | ||
|
e5cfc7daee | ||
|
51356a4e56 | ||
|
7baaf24fc3 | ||
|
f6610790a8 | ||
|
27fb90bb58 | ||
|
b8c70bf059 | ||
|
cdac4f600f | ||
|
f878c551c7 | ||
|
eab454f3b9 | ||
|
aa58fde1d7 | ||
|
33426ac58a | ||
|
ac2140d3af | ||
|
66de8310c3 | ||
|
c0fa7bca61 | ||
|
d8230416d1 | ||
|
6065de3073 |
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -12,7 +12,7 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
|
|||||||
- Backward Incompatible Change
|
- Backward Incompatible Change
|
||||||
- Build/Testing/Packaging Improvement
|
- Build/Testing/Packaging Improvement
|
||||||
- Documentation (changelog entry is not required)
|
- Documentation (changelog entry is not required)
|
||||||
- Critical Bug Fix (crash, data loss, RBAC)
|
- Critical Bug Fix (crash, data loss, RBAC) or LOGICAL_ERROR
|
||||||
- Bug Fix (user-visible misbehavior in an official stable release)
|
- Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
- CI Fix or Improvement (changelog entry is not required)
|
- CI Fix or Improvement (changelog entry is not required)
|
||||||
- Not for changelog (changelog entry is not required)
|
- Not for changelog (changelog entry is not required)
|
||||||
|
@ -13,7 +13,7 @@ execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version
|
|||||||
message (STATUS "Using compiler:\n${COMPILER_SELF_IDENTIFICATION}")
|
message (STATUS "Using compiler:\n${COMPILER_SELF_IDENTIFICATION}")
|
||||||
|
|
||||||
# Require minimum compiler versions
|
# Require minimum compiler versions
|
||||||
set (CLANG_MINIMUM_VERSION 17)
|
set (CLANG_MINIMUM_VERSION 18)
|
||||||
set (XCODE_MINIMUM_VERSION 12.0)
|
set (XCODE_MINIMUM_VERSION 12.0)
|
||||||
set (APPLE_CLANG_MINIMUM_VERSION 12.0.0)
|
set (APPLE_CLANG_MINIMUM_VERSION 12.0.0)
|
||||||
|
|
||||||
|
2
contrib/SimSIMD
vendored
2
contrib/SimSIMD
vendored
@ -1 +1 @@
|
|||||||
Subproject commit fa60f1b8e3582c50978f0ae86c2ebb6c9af957f3
|
Subproject commit da2d38537299ade247c2499131d936fb8db38f03
|
@ -1,5 +1,5 @@
|
|||||||
# See contrib/usearch-cmake/CMakeLists.txt, why only enabled on x86
|
# See contrib/usearch-cmake/CMakeLists.txt, why only enabled on x86 and ARM
|
||||||
if (ARCH_AMD64)
|
if (ARCH_AMD64 OR (ARCH_AARCH64 AND NOT NO_ARMV81_OR_HIGHER AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 19))
|
||||||
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD")
|
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD")
|
||||||
set(SIMSIMD_SRCS ${SIMSIMD_PROJECT_DIR}/c/lib.c)
|
set(SIMSIMD_SRCS ${SIMSIMD_PROJECT_DIR}/c/lib.c)
|
||||||
add_library(_simsimd ${SIMSIMD_SRCS})
|
add_library(_simsimd ${SIMSIMD_SRCS})
|
||||||
|
2
contrib/usearch
vendored
2
contrib/usearch
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 7efe8b710c9831bfe06573b1df0fad001b04a2b5
|
Subproject commit 9561fcae1249ea8effbf71250e8a7a7ea97e5dfe
|
@ -6,9 +6,8 @@ target_include_directories(_usearch SYSTEM INTERFACE ${USEARCH_PROJECT_DIR}/incl
|
|||||||
target_link_libraries(_usearch INTERFACE _fp16)
|
target_link_libraries(_usearch INTERFACE _fp16)
|
||||||
target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB)
|
target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB)
|
||||||
|
|
||||||
# Only x86 for now. On ARM, the linker goes down in flames. To make SimSIMD compile, I had to remove a macro checks in SimSIMD
|
# SimSIMD supports x86 and ARM platforms. The latter requires Clang 19 because older versions had a buggy bf16 implementation.
|
||||||
# for AVX512 (x86, worked nicely) and __ARM_BF16_FORMAT_ALTERNATIVE. It is probably because of that.
|
if (ARCH_AMD64 OR (ARCH_AARCH64 AND NOT NO_ARMV81_OR_HIGHER AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 19))
|
||||||
if (ARCH_AMD64)
|
|
||||||
target_link_libraries(_usearch INTERFACE _simsimd)
|
target_link_libraries(_usearch INTERFACE _simsimd)
|
||||||
target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD)
|
target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD)
|
||||||
|
|
||||||
@ -17,52 +16,3 @@ if (ARCH_AMD64)
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
add_library(ch_contrib::usearch ALIAS _usearch)
|
add_library(ch_contrib::usearch ALIAS _usearch)
|
||||||
|
|
||||||
|
|
||||||
# Cf. https://github.com/llvm/llvm-project/issues/107810 (though it is not 100% the same stack)
|
|
||||||
#
|
|
||||||
# LLVM ERROR: Cannot select: 0x7996e7a73150: f32,ch = load<(load (s16) from %ir.22, !tbaa !54231), anyext from bf16> 0x79961cb737c0, 0x7996e7a1a500, undef:i64, ./contrib/SimSIMD/include/simsimd/dot.h:215:1
|
|
||||||
# 0x7996e7a1a500: i64 = add 0x79961e770d00, Constant:i64<-16>, ./contrib/SimSIMD/include/simsimd/dot.h:215:1
|
|
||||||
# 0x79961e770d00: i64,ch = CopyFromReg 0x79961cb737c0, Register:i64 %4, ./contrib/SimSIMD/include/simsimd/dot.h:215:1
|
|
||||||
# 0x7996e7a1ae10: i64 = Register %4
|
|
||||||
# 0x7996e7a1b5f0: i64 = Constant<-16>
|
|
||||||
# 0x7996e7a1a730: i64 = undef
|
|
||||||
# In function: _ZL23simsimd_dot_bf16_serialPKu6__bf16S0_yPd
|
|
||||||
# PLEASE submit a bug report to https://github.com/llvm/llvm-project/issues/ and include the crash backtrace.
|
|
||||||
# Stack dump:
|
|
||||||
# 0. Running pass 'Function Pass Manager' on module 'src/libdbms.a(MergeTreeIndexVectorSimilarity.cpp.o at 2312737440)'.
|
|
||||||
# 1. Running pass 'AArch64 Instruction Selection' on function '@_ZL23simsimd_dot_bf16_serialPKu6__bf16S0_yPd'
|
|
||||||
# #0 0x00007999e83a63bf llvm::sys::PrintStackTrace(llvm::raw_ostream&, int) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda63bf)
|
|
||||||
# #1 0x00007999e83a44f9 llvm::sys::RunSignalHandlers() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda44f9)
|
|
||||||
# #2 0x00007999e83a6b00 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda6b00)
|
|
||||||
# #3 0x00007999e6e45320 (/lib/x86_64-linux-gnu/libc.so.6+0x45320)
|
|
||||||
# #4 0x00007999e6e9eb1c pthread_kill (/lib/x86_64-linux-gnu/libc.so.6+0x9eb1c)
|
|
||||||
# #5 0x00007999e6e4526e raise (/lib/x86_64-linux-gnu/libc.so.6+0x4526e)
|
|
||||||
# #6 0x00007999e6e288ff abort (/lib/x86_64-linux-gnu/libc.so.6+0x288ff)
|
|
||||||
# #7 0x00007999e82fe0c2 llvm::report_fatal_error(llvm::Twine const&, bool) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xcfe0c2)
|
|
||||||
# #8 0x00007999e8c2f8e3 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162f8e3)
|
|
||||||
# #9 0x00007999e8c2ed76 llvm::SelectionDAGISel::SelectCodeCommon(llvm::SDNode*, unsigned char const*, unsigned int) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162ed76)
|
|
||||||
# #10 0x00007999ea1adbcb (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x2badbcb)
|
|
||||||
# #11 0x00007999e8c2611f llvm::SelectionDAGISel::DoInstructionSelection() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162611f)
|
|
||||||
# #12 0x00007999e8c25790 llvm::SelectionDAGISel::CodeGenAndEmitDAG() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x1625790)
|
|
||||||
# #13 0x00007999e8c248de llvm::SelectionDAGISel::SelectAllBasicBlocks(llvm::Function const&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x16248de)
|
|
||||||
# #14 0x00007999e8c22934 llvm::SelectionDAGISel::runOnMachineFunction(llvm::MachineFunction&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x1622934)
|
|
||||||
# #15 0x00007999e87826b9 llvm::MachineFunctionPass::runOnFunction(llvm::Function&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x11826b9)
|
|
||||||
# #16 0x00007999e84f7772 llvm::FPPassManager::runOnFunction(llvm::Function&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xef7772)
|
|
||||||
# #17 0x00007999e84fd2f4 llvm::FPPassManager::runOnModule(llvm::Module&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xefd2f4)
|
|
||||||
# #18 0x00007999e84f7e9f llvm::legacy::PassManagerImpl::run(llvm::Module&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xef7e9f)
|
|
||||||
# #19 0x00007999e99f7d61 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f7d61)
|
|
||||||
# #20 0x00007999e99f8c91 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f8c91)
|
|
||||||
# #21 0x00007999e99f8b10 llvm::lto::thinBackend(llvm::lto::Config const&, unsigned int, std::function<llvm::Expected<std::unique_ptr<llvm::CachedFileStream, std::default_delete<llvm::CachedFileStream>>> (unsigned int, llvm::Twine const&)>, llvm::Module&, llvm::ModuleSummaryIndex const&, llvm::DenseMap<llvm::StringRef, std::unordered_set<unsigned long, std::hash<unsigned long>, std::equal_to<unsigned long>, std::allocator<unsigned long>>, llvm::DenseMapInfo<llvm::StringRef, void
|
|
||||||
# >, llvm::detail::DenseMapPair<llvm::StringRef, std::unordered_set<unsigned long, std::hash<unsigned long>, std::equal_to<unsigned long>, std::allocator<unsigned long>>>> const&, llvm::DenseMap<unsigned long, llvm::GlobalValueSummary*, llvm::DenseMapInfo<unsigned long, void>, llvm::detail::DenseMapPair<unsigned long, llvm::GlobalValueSummary*>> const&, llvm::MapVector<llvm::StringRef, llvm::BitcodeModule, llvm::DenseMap<llvm::StringRef, unsigned int, llvm::DenseMapInfo<llvm::S
|
|
||||||
# tringRef, void>, llvm::detail::DenseMapPair<llvm::StringRef, unsigned int>>, llvm::SmallVector<std::pair<llvm::StringRef, llvm::BitcodeModule>, 0u>>*, std::vector<unsigned char, std::allocator<unsigned char>> const&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f8b10)
|
|
||||||
# #22 0x00007999e99f248d (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f248d)
|
|
||||||
# #23 0x00007999e99f1cd6 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f1cd6)
|
|
||||||
# #24 0x00007999e82c9beb (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xcc9beb)
|
|
||||||
# #25 0x00007999e834ebe3 llvm::ThreadPool::processTasks(llvm::ThreadPoolTaskGroup*) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xd4ebe3)
|
|
||||||
# #26 0x00007999e834f704 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xd4f704)
|
|
||||||
# #27 0x00007999e6e9ca94 (/lib/x86_64-linux-gnu/libc.so.6+0x9ca94)
|
|
||||||
# #28 0x00007999e6f29c3c (/lib/x86_64-linux-gnu/libc.so.6+0x129c3c)
|
|
||||||
# clang++-18: error: unable to execute command: Aborted (core dumped)
|
|
||||||
# clang++-18: error: linker command failed due to signal (use -v to see invocation)
|
|
||||||
# ^[[A^Cninja: build stopped: interrupted by user.
|
|
||||||
|
@ -131,8 +131,8 @@ For the query to run successfully, the following conditions must be met:
|
|||||||
|
|
||||||
- Both tables must have the same structure.
|
- Both tables must have the same structure.
|
||||||
- Both tables must have the same partition key, the same order by key and the same primary key.
|
- Both tables must have the same partition key, the same order by key and the same primary key.
|
||||||
- Both tables must have the same indices and projections.
|
|
||||||
- Both tables must have the same storage policy.
|
- Both tables must have the same storage policy.
|
||||||
|
- The destination table must include all indices and projections from the source table. If the `enforce_index_structure_match_on_partition_manipulation` setting is enabled in destination table, the indices and projections must be identical. Otherwise, the destination table can have a superset of the source table’s indices and projections.
|
||||||
|
|
||||||
## REPLACE PARTITION
|
## REPLACE PARTITION
|
||||||
|
|
||||||
@ -151,8 +151,8 @@ For the query to run successfully, the following conditions must be met:
|
|||||||
|
|
||||||
- Both tables must have the same structure.
|
- Both tables must have the same structure.
|
||||||
- Both tables must have the same partition key, the same order by key and the same primary key.
|
- Both tables must have the same partition key, the same order by key and the same primary key.
|
||||||
- Both tables must have the same indices and projections.
|
|
||||||
- Both tables must have the same storage policy.
|
- Both tables must have the same storage policy.
|
||||||
|
- The destination table must include all indices and projections from the source table. If the `enforce_index_structure_match_on_partition_manipulation` setting is enabled in destination table, the indices and projections must be identical. Otherwise, the destination table can have a superset of the source table’s indices and projections.
|
||||||
|
|
||||||
## MOVE PARTITION TO TABLE
|
## MOVE PARTITION TO TABLE
|
||||||
|
|
||||||
@ -166,9 +166,9 @@ For the query to run successfully, the following conditions must be met:
|
|||||||
|
|
||||||
- Both tables must have the same structure.
|
- Both tables must have the same structure.
|
||||||
- Both tables must have the same partition key, the same order by key and the same primary key.
|
- Both tables must have the same partition key, the same order by key and the same primary key.
|
||||||
- Both tables must have the same indices and projections.
|
|
||||||
- Both tables must have the same storage policy.
|
- Both tables must have the same storage policy.
|
||||||
- Both tables must be the same engine family (replicated or non-replicated).
|
- Both tables must be the same engine family (replicated or non-replicated).
|
||||||
|
- The destination table must include all indices and projections from the source table. If the `enforce_index_structure_match_on_partition_manipulation` setting is enabled in destination table, the indices and projections must be identical. Otherwise, the destination table can have a superset of the source table’s indices and projections.
|
||||||
|
|
||||||
## CLEAR COLUMN IN PARTITION
|
## CLEAR COLUMN IN PARTITION
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ ClickHouse применяет настройку в тех случаях, ко
|
|||||||
- 0 — выключена.
|
- 0 — выключена.
|
||||||
- 1 — включена.
|
- 1 — включена.
|
||||||
|
|
||||||
Значение по умолчанию: 1.
|
Значение по умолчанию: 0.
|
||||||
|
|
||||||
## http_zlib_compression_level {#settings-http_zlib_compression_level}
|
## http_zlib_compression_level {#settings-http_zlib_compression_level}
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ ClickHouse从表的过时副本中选择最相关的副本。
|
|||||||
- 0 — Disabled.
|
- 0 — Disabled.
|
||||||
- 1 — Enabled.
|
- 1 — Enabled.
|
||||||
|
|
||||||
默认值:1。
|
默认值:0。
|
||||||
|
|
||||||
## http_zlib_compression_level {#settings-http_zlib_compression_level}
|
## http_zlib_compression_level {#settings-http_zlib_compression_level}
|
||||||
|
|
||||||
|
@ -319,6 +319,8 @@ public:
|
|||||||
variant_column_ptr = assert_cast<ColumnVariant *>(variant_column.get());
|
variant_column_ptr = assert_cast<ColumnVariant *>(variant_column.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void forEachSubcolumn(ColumnCallback callback) const override { callback(variant_column); }
|
||||||
|
|
||||||
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
|
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
|
||||||
{
|
{
|
||||||
callback(*variant_column);
|
callback(*variant_column);
|
||||||
|
@ -41,7 +41,7 @@ static struct InitFiu
|
|||||||
REGULAR(use_delayed_remote_source) \
|
REGULAR(use_delayed_remote_source) \
|
||||||
REGULAR(cluster_discovery_faults) \
|
REGULAR(cluster_discovery_faults) \
|
||||||
REGULAR(replicated_sends_failpoint) \
|
REGULAR(replicated_sends_failpoint) \
|
||||||
REGULAR(stripe_log_sink_write_fallpoint)\
|
REGULAR(stripe_log_sink_write_fallpoint) \
|
||||||
ONCE(smt_commit_merge_mutate_zk_fail_after_op) \
|
ONCE(smt_commit_merge_mutate_zk_fail_after_op) \
|
||||||
ONCE(smt_commit_merge_mutate_zk_fail_before_op) \
|
ONCE(smt_commit_merge_mutate_zk_fail_before_op) \
|
||||||
ONCE(smt_commit_write_zk_fail_after_op) \
|
ONCE(smt_commit_write_zk_fail_after_op) \
|
||||||
@ -77,6 +77,8 @@ static struct InitFiu
|
|||||||
REGULAR(replicated_merge_tree_all_replicas_stale) \
|
REGULAR(replicated_merge_tree_all_replicas_stale) \
|
||||||
REGULAR(zero_copy_lock_zk_fail_before_op) \
|
REGULAR(zero_copy_lock_zk_fail_before_op) \
|
||||||
REGULAR(zero_copy_lock_zk_fail_after_op) \
|
REGULAR(zero_copy_lock_zk_fail_after_op) \
|
||||||
|
REGULAR(plain_object_storage_write_fail_on_directory_create) \
|
||||||
|
REGULAR(plain_object_storage_write_fail_on_directory_move) \
|
||||||
|
|
||||||
|
|
||||||
namespace FailPoints
|
namespace FailPoints
|
||||||
|
@ -1800,7 +1800,7 @@ Possible values:
|
|||||||
|
|
||||||
- 0 — Disabled.
|
- 0 — Disabled.
|
||||||
- 1 — Enabled.
|
- 1 — Enabled.
|
||||||
)", 1) \
|
)", 0) \
|
||||||
DECLARE(Int64, http_zlib_compression_level, 3, R"(
|
DECLARE(Int64, http_zlib_compression_level, 3, R"(
|
||||||
Sets the level of data compression in the response to an HTTP request if [enable_http_compression = 1](#enable_http_compression).
|
Sets the level of data compression in the response to an HTTP request if [enable_http_compression = 1](#enable_http_compression).
|
||||||
|
|
||||||
@ -5782,7 +5782,7 @@ Allow JSON data type
|
|||||||
DECLARE(Bool, allow_experimental_codecs, false, R"(
|
DECLARE(Bool, allow_experimental_codecs, false, R"(
|
||||||
If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).
|
If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).
|
||||||
)", EXPERIMENTAL) \
|
)", EXPERIMENTAL) \
|
||||||
DECLARE(Bool, allow_experimental_shared_set_join, true, R"(
|
DECLARE(Bool, allow_experimental_shared_set_join, false, R"(
|
||||||
Only in ClickHouse Cloud. Allow to create ShareSet and SharedJoin
|
Only in ClickHouse Cloud. Allow to create ShareSet and SharedJoin
|
||||||
)", EXPERIMENTAL) \
|
)", EXPERIMENTAL) \
|
||||||
DECLARE(UInt64, max_limit_for_ann_queries, 1'000'000, R"(
|
DECLARE(UInt64, max_limit_for_ann_queries, 1'000'000, R"(
|
||||||
|
@ -64,7 +64,6 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
|||||||
},
|
},
|
||||||
{"24.11",
|
{"24.11",
|
||||||
{
|
{
|
||||||
{"enable_http_compression", false, true, "Improvement for read-only clients since they can't change settings"},
|
|
||||||
{"validate_mutation_query", false, true, "New setting to validate mutation queries by default."},
|
{"validate_mutation_query", false, true, "New setting to validate mutation queries by default."},
|
||||||
{"enable_job_stack_trace", false, true, "Enable by default collecting stack traces from job's scheduling."},
|
{"enable_job_stack_trace", false, true, "Enable by default collecting stack traces from job's scheduling."},
|
||||||
{"allow_suspicious_types_in_group_by", true, false, "Don't allow Variant/Dynamic types in GROUP BY by default"},
|
{"allow_suspicious_types_in_group_by", true, false, "Don't allow Variant/Dynamic types in GROUP BY by default"},
|
||||||
@ -80,6 +79,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
|||||||
{"backup_restore_finish_timeout_after_error_sec", 0, 180, "New setting."},
|
{"backup_restore_finish_timeout_after_error_sec", 0, 180, "New setting."},
|
||||||
{"query_plan_merge_filters", false, true, "Allow to merge filters in the query plan. This is required to properly support filter-push-down with a new analyzer."},
|
{"query_plan_merge_filters", false, true, "Allow to merge filters in the query plan. This is required to properly support filter-push-down with a new analyzer."},
|
||||||
{"parallel_replicas_local_plan", false, true, "Use local plan for local replica in a query with parallel replicas"},
|
{"parallel_replicas_local_plan", false, true, "Use local plan for local replica in a query with parallel replicas"},
|
||||||
|
{"allow_experimental_shared_set_join", 1, 0, "Disable a setting for ClickHouse Cloud"},
|
||||||
{"merge_tree_use_v1_object_and_dynamic_serialization", true, false, "Add new serialization V2 version for JSON and Dynamic types"},
|
{"merge_tree_use_v1_object_and_dynamic_serialization", true, false, "Add new serialization V2 version for JSON and Dynamic types"},
|
||||||
{"min_joined_block_size_bytes", 524288, 524288, "New setting."},
|
{"min_joined_block_size_bytes", 524288, 524288, "New setting."},
|
||||||
{"allow_experimental_bfloat16_type", false, false, "Add new experimental BFloat16 type"},
|
{"allow_experimental_bfloat16_type", false, false, "Add new experimental BFloat16 type"},
|
||||||
@ -605,6 +605,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
|||||||
{
|
{
|
||||||
{"24.12",
|
{"24.12",
|
||||||
{
|
{
|
||||||
|
{"enforce_index_structure_match_on_partition_manipulation", true, false, "Add new setting to allow attach when source table's projections and secondary indices is a subset of those in the target table."}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{"24.11",
|
{"24.11",
|
||||||
|
@ -57,6 +57,12 @@ struct InMemoryDirectoryPathMap
|
|||||||
return it->second;
|
return it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool removePathIfExists(const std::filesystem::path & path)
|
||||||
|
{
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
|
return map.erase(path) != 0;
|
||||||
|
}
|
||||||
|
|
||||||
mutable SharedMutex mutex;
|
mutable SharedMutex mutex;
|
||||||
|
|
||||||
#ifdef OS_LINUX
|
#ifdef OS_LINUX
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <Poco/Timestamp.h>
|
#include <Poco/Timestamp.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
|
#include <Common/FailPoint.h>
|
||||||
#include <Common/SharedLockGuard.h>
|
#include <Common/SharedLockGuard.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
|
|
||||||
@ -18,8 +19,15 @@ namespace ErrorCodes
|
|||||||
extern const int FILE_DOESNT_EXIST;
|
extern const int FILE_DOESNT_EXIST;
|
||||||
extern const int FILE_ALREADY_EXISTS;
|
extern const int FILE_ALREADY_EXISTS;
|
||||||
extern const int INCORRECT_DATA;
|
extern const int INCORRECT_DATA;
|
||||||
|
extern const int FAULT_INJECTED;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
namespace FailPoints
|
||||||
|
{
|
||||||
|
extern const char plain_object_storage_write_fail_on_directory_create[];
|
||||||
|
extern const char plain_object_storage_write_fail_on_directory_move[];
|
||||||
|
}
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -72,8 +80,14 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std:
|
|||||||
/* buf_size */ DBMS_DEFAULT_BUFFER_SIZE,
|
/* buf_size */ DBMS_DEFAULT_BUFFER_SIZE,
|
||||||
/* settings */ {});
|
/* settings */ {});
|
||||||
|
|
||||||
write_created = true;
|
writeString(path.string(), *buf);
|
||||||
|
fiu_do_on(FailPoints::plain_object_storage_write_fail_on_directory_create, {
|
||||||
|
throw Exception(ErrorCodes::FAULT_INJECTED, "Injecting fault when creating '{}' directory", path);
|
||||||
|
});
|
||||||
|
buf->finalize();
|
||||||
|
|
||||||
|
auto event = object_storage->getMetadataStorageMetrics().directory_created;
|
||||||
|
ProfileEvents::increment(event);
|
||||||
{
|
{
|
||||||
std::lock_guard lock(path_map.mutex);
|
std::lock_guard lock(path_map.mutex);
|
||||||
auto & map = path_map.map;
|
auto & map = path_map.map;
|
||||||
@ -83,33 +97,19 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std:
|
|||||||
}
|
}
|
||||||
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
|
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
|
||||||
CurrentMetrics::add(metric, 1);
|
CurrentMetrics::add(metric, 1);
|
||||||
|
|
||||||
writeString(path.string(), *buf);
|
|
||||||
buf->finalize();
|
|
||||||
|
|
||||||
write_finalized = true;
|
|
||||||
|
|
||||||
auto event = object_storage->getMetadataStorageMetrics().directory_created;
|
|
||||||
ProfileEvents::increment(event);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::undo(std::unique_lock<SharedMutex> &)
|
void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::undo(std::unique_lock<SharedMutex> &)
|
||||||
{
|
{
|
||||||
auto metadata_object_key = createMetadataObjectKey(object_key_prefix, metadata_key_prefix);
|
LOG_TRACE(getLogger("MetadataStorageFromPlainObjectStorageCreateDirectoryOperation"), "Undoing '{}' directory creation", path);
|
||||||
|
|
||||||
if (write_finalized)
|
|
||||||
{
|
|
||||||
const auto base_path = path.parent_path();
|
const auto base_path = path.parent_path();
|
||||||
|
if (path_map.removePathIfExists(base_path))
|
||||||
{
|
{
|
||||||
std::lock_guard lock(path_map.mutex);
|
|
||||||
path_map.map.erase(base_path);
|
|
||||||
}
|
|
||||||
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
|
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
|
||||||
CurrentMetrics::sub(metric, 1);
|
CurrentMetrics::sub(metric, 1);
|
||||||
|
|
||||||
object_storage->removeObjectIfExists(StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME));
|
|
||||||
}
|
}
|
||||||
else if (write_created)
|
|
||||||
|
auto metadata_object_key = createMetadataObjectKey(object_key_prefix, metadata_key_prefix);
|
||||||
object_storage->removeObjectIfExists(StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME));
|
object_storage->removeObjectIfExists(StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -184,8 +184,10 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::execute(std::u
|
|||||||
getLogger("MetadataStorageFromPlainObjectStorageMoveDirectoryOperation"), "Moving directory '{}' to '{}'", path_from, path_to);
|
getLogger("MetadataStorageFromPlainObjectStorageMoveDirectoryOperation"), "Moving directory '{}' to '{}'", path_from, path_to);
|
||||||
|
|
||||||
auto write_buf = createWriteBuf(path_from, path_to, /* validate_content */ true);
|
auto write_buf = createWriteBuf(path_from, path_to, /* validate_content */ true);
|
||||||
write_created = true;
|
|
||||||
writeString(path_to.string(), *write_buf);
|
writeString(path_to.string(), *write_buf);
|
||||||
|
fiu_do_on(FailPoints::plain_object_storage_write_fail_on_directory_move, {
|
||||||
|
throw Exception(ErrorCodes::FAULT_INJECTED, "Injecting fault when moving from '{}' to '{}'", path_from, path_to);
|
||||||
|
});
|
||||||
write_buf->finalize();
|
write_buf->finalize();
|
||||||
|
|
||||||
/// parent_path() removes the trailing '/'.
|
/// parent_path() removes the trailing '/'.
|
||||||
@ -206,14 +208,13 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::execute(std::u
|
|||||||
void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::undo(std::unique_lock<SharedMutex> &)
|
void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::undo(std::unique_lock<SharedMutex> &)
|
||||||
{
|
{
|
||||||
if (write_finalized)
|
if (write_finalized)
|
||||||
|
{
|
||||||
{
|
{
|
||||||
std::lock_guard lock(path_map.mutex);
|
std::lock_guard lock(path_map.mutex);
|
||||||
auto & map = path_map.map;
|
auto & map = path_map.map;
|
||||||
map.emplace(path_from.parent_path(), map.extract(path_to.parent_path()).mapped());
|
map.emplace(path_from.parent_path(), map.extract(path_to.parent_path()).mapped());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (write_created)
|
|
||||||
{
|
|
||||||
auto write_buf = createWriteBuf(path_to, path_from, /* verify_content */ false);
|
auto write_buf = createWriteBuf(path_to, path_from, /* verify_content */ false);
|
||||||
writeString(path_from.string(), *write_buf);
|
writeString(path_from.string(), *write_buf);
|
||||||
write_buf->finalize();
|
write_buf->finalize();
|
||||||
@ -249,26 +250,31 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std:
|
|||||||
auto metadata_object = StoredObject(/*remote_path*/ metadata_object_key.serialize(), /*local_path*/ path / PREFIX_PATH_FILE_NAME);
|
auto metadata_object = StoredObject(/*remote_path*/ metadata_object_key.serialize(), /*local_path*/ path / PREFIX_PATH_FILE_NAME);
|
||||||
object_storage->removeObjectIfExists(metadata_object);
|
object_storage->removeObjectIfExists(metadata_object);
|
||||||
|
|
||||||
|
if (path_map.removePathIfExists(base_path))
|
||||||
{
|
{
|
||||||
std::lock_guard lock(path_map.mutex);
|
|
||||||
auto & map = path_map.map;
|
|
||||||
map.erase(base_path);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
|
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
|
||||||
CurrentMetrics::sub(metric, 1);
|
CurrentMetrics::sub(metric, 1);
|
||||||
|
|
||||||
removed = true;
|
|
||||||
|
|
||||||
auto event = object_storage->getMetadataStorageMetrics().directory_removed;
|
auto event = object_storage->getMetadataStorageMetrics().directory_removed;
|
||||||
ProfileEvents::increment(event);
|
ProfileEvents::increment(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
remove_attempted = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::undo(std::unique_lock<SharedMutex> &)
|
void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::undo(std::unique_lock<SharedMutex> &)
|
||||||
{
|
{
|
||||||
if (!removed)
|
if (!remove_attempted)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
{
|
||||||
|
std::lock_guard lock(path_map.mutex);
|
||||||
|
auto & map = path_map.map;
|
||||||
|
map.emplace(path.parent_path(), key_prefix);
|
||||||
|
}
|
||||||
|
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
|
||||||
|
CurrentMetrics::add(metric, 1);
|
||||||
|
|
||||||
auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix);
|
auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix);
|
||||||
auto metadata_object = StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME);
|
auto metadata_object = StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME);
|
||||||
auto buf = object_storage->writeObject(
|
auto buf = object_storage->writeObject(
|
||||||
@ -279,14 +285,6 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::undo(std::un
|
|||||||
/* settings */ {});
|
/* settings */ {});
|
||||||
writeString(path.string(), *buf);
|
writeString(path.string(), *buf);
|
||||||
buf->finalize();
|
buf->finalize();
|
||||||
|
|
||||||
{
|
|
||||||
std::lock_guard lock(path_map.mutex);
|
|
||||||
auto & map = path_map.map;
|
|
||||||
map.emplace(path.parent_path(), std::move(key_prefix));
|
|
||||||
}
|
|
||||||
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
|
|
||||||
CurrentMetrics::add(metric, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MetadataStorageFromPlainObjectStorageWriteFileOperation::MetadataStorageFromPlainObjectStorageWriteFileOperation(
|
MetadataStorageFromPlainObjectStorageWriteFileOperation::MetadataStorageFromPlainObjectStorageWriteFileOperation(
|
||||||
|
@ -19,9 +19,6 @@ private:
|
|||||||
const std::string metadata_key_prefix;
|
const std::string metadata_key_prefix;
|
||||||
const std::string object_key_prefix;
|
const std::string object_key_prefix;
|
||||||
|
|
||||||
bool write_created = false;
|
|
||||||
bool write_finalized = false;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
MetadataStorageFromPlainObjectStorageCreateDirectoryOperation(
|
MetadataStorageFromPlainObjectStorageCreateDirectoryOperation(
|
||||||
/// path_ must end with a trailing '/'.
|
/// path_ must end with a trailing '/'.
|
||||||
@ -43,7 +40,6 @@ private:
|
|||||||
ObjectStoragePtr object_storage;
|
ObjectStoragePtr object_storage;
|
||||||
const std::string metadata_key_prefix;
|
const std::string metadata_key_prefix;
|
||||||
|
|
||||||
bool write_created = false;
|
|
||||||
bool write_finalized = false;
|
bool write_finalized = false;
|
||||||
|
|
||||||
std::unique_ptr<WriteBufferFromFileBase>
|
std::unique_ptr<WriteBufferFromFileBase>
|
||||||
@ -73,7 +69,7 @@ private:
|
|||||||
const std::string metadata_key_prefix;
|
const std::string metadata_key_prefix;
|
||||||
|
|
||||||
std::string key_prefix;
|
std::string key_prefix;
|
||||||
bool removed = false;
|
bool remove_attempted = false;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation(
|
MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation(
|
||||||
|
@ -26,14 +26,6 @@ void UserDefinedSQLFunctionVisitor::visit(ASTPtr & ast)
|
|||||||
{
|
{
|
||||||
chassert(ast);
|
chassert(ast);
|
||||||
|
|
||||||
if (const auto * function = ast->template as<ASTFunction>())
|
|
||||||
{
|
|
||||||
std::unordered_set<std::string> udf_in_replace_process;
|
|
||||||
auto replace_result = tryToReplaceFunction(*function, udf_in_replace_process);
|
|
||||||
if (replace_result)
|
|
||||||
ast = replace_result;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (auto & child : ast->children)
|
for (auto & child : ast->children)
|
||||||
{
|
{
|
||||||
if (!child)
|
if (!child)
|
||||||
@ -48,6 +40,14 @@ void UserDefinedSQLFunctionVisitor::visit(ASTPtr & ast)
|
|||||||
if (new_ptr != old_ptr)
|
if (new_ptr != old_ptr)
|
||||||
ast->updatePointerToChild(old_ptr, new_ptr);
|
ast->updatePointerToChild(old_ptr, new_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (const auto * function = ast->template as<ASTFunction>())
|
||||||
|
{
|
||||||
|
std::unordered_set<std::string> udf_in_replace_process;
|
||||||
|
auto replace_result = tryToReplaceFunction(*function, udf_in_replace_process);
|
||||||
|
if (replace_result)
|
||||||
|
ast = replace_result;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void UserDefinedSQLFunctionVisitor::visit(IAST * ast)
|
void UserDefinedSQLFunctionVisitor::visit(IAST * ast)
|
||||||
|
@ -32,9 +32,8 @@ public:
|
|||||||
|
|
||||||
// Format message with fmt::format, like the logging functions.
|
// Format message with fmt::format, like the logging functions.
|
||||||
template <typename... Args>
|
template <typename... Args>
|
||||||
S3Exception(Aws::S3::S3Errors code_, fmt::format_string<Args...> fmt, Args &&... args)
|
S3Exception(Aws::S3::S3Errors code_, FormatStringHelper<Args...> fmt, Args &&... args)
|
||||||
: Exception(fmt::format(fmt, std::forward<Args>(args)...), ErrorCodes::S3_ERROR)
|
: Exception(PreformattedMessage{fmt.format(std::forward<Args>(args)...)}, ErrorCodes::S3_ERROR), code(code_)
|
||||||
, code(code_)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1451,7 +1451,7 @@ String fourSpaceIndent(size_t indent);
|
|||||||
|
|
||||||
bool inline isWritingToTerminal(const WriteBuffer & buf)
|
bool inline isWritingToTerminal(const WriteBuffer & buf)
|
||||||
{
|
{
|
||||||
const auto * write_buffer_to_descriptor = typeid_cast<const WriteBufferFromFileDescriptor *>(&buf);
|
const auto * write_buffer_to_descriptor = dynamic_cast<const WriteBufferFromFileDescriptor *>(&buf);
|
||||||
return write_buffer_to_descriptor && write_buffer_to_descriptor->getFD() == STDOUT_FILENO && isatty(STDOUT_FILENO);
|
return write_buffer_to_descriptor && write_buffer_to_descriptor->getFD() == STDOUT_FILENO && isatty(STDOUT_FILENO);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ struct JoinedElement
|
|||||||
join->strictness = JoinStrictness::All;
|
join->strictness = JoinStrictness::All;
|
||||||
|
|
||||||
join->on_expression = on_expression;
|
join->on_expression = on_expression;
|
||||||
join->children.push_back(join->on_expression);
|
join->children = {join->on_expression};
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#include <Core/Settings.h>
|
#include <Core/Settings.h>
|
||||||
|
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
|
#include <shared_mutex>
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
|
|
||||||
|
|
||||||
@ -524,6 +525,7 @@ public:
|
|||||||
Block nextImpl() override
|
Block nextImpl() override
|
||||||
{
|
{
|
||||||
ExtraBlockPtr not_processed = nullptr;
|
ExtraBlockPtr not_processed = nullptr;
|
||||||
|
std::shared_lock shared(eof_mutex);
|
||||||
|
|
||||||
{
|
{
|
||||||
std::lock_guard lock(extra_block_mutex);
|
std::lock_guard lock(extra_block_mutex);
|
||||||
@ -557,7 +559,24 @@ public:
|
|||||||
block = left_reader.read();
|
block = left_reader.read();
|
||||||
if (!block)
|
if (!block)
|
||||||
{
|
{
|
||||||
return {};
|
shared.unlock();
|
||||||
|
bool there_are_still_might_be_rows_to_process = false;
|
||||||
|
{
|
||||||
|
/// The following race condition could happen without this mutex:
|
||||||
|
/// * we're called from `IBlocksStream::next()`
|
||||||
|
/// * another thread just read the last block from `left_reader` and now is in the process of or about to call `joinBlock()`
|
||||||
|
/// * it might be that `joinBlock()` will leave some rows in the `not_processed`
|
||||||
|
/// * but if the current thread will return now an empty block `finished` will be set to true in `IBlocksStream::next()` and
|
||||||
|
/// these not processed rows will be lost
|
||||||
|
/// So we shouldn't finish execution while there is at least one in-flight `joinBlock()` call. Let's wait until we're alone
|
||||||
|
/// and double check if there are any not processed rows left.
|
||||||
|
std::unique_lock exclusive(eof_mutex);
|
||||||
|
|
||||||
|
std::lock_guard lock(extra_block_mutex);
|
||||||
|
if (!not_processed_blocks.empty())
|
||||||
|
there_are_still_might_be_rows_to_process = true;
|
||||||
|
}
|
||||||
|
return there_are_still_might_be_rows_to_process ? nextImpl() : Block();
|
||||||
}
|
}
|
||||||
|
|
||||||
// block comes from left_reader, need to join with right table to get the result.
|
// block comes from left_reader, need to join with right table to get the result.
|
||||||
@ -592,7 +611,7 @@ public:
|
|||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t current_bucket;
|
const size_t current_bucket;
|
||||||
Buckets buckets;
|
Buckets buckets;
|
||||||
InMemoryJoinPtr hash_join;
|
InMemoryJoinPtr hash_join;
|
||||||
|
|
||||||
@ -603,6 +622,8 @@ public:
|
|||||||
|
|
||||||
std::mutex extra_block_mutex;
|
std::mutex extra_block_mutex;
|
||||||
std::list<ExtraBlockPtr> not_processed_blocks TSA_GUARDED_BY(extra_block_mutex);
|
std::list<ExtraBlockPtr> not_processed_blocks TSA_GUARDED_BY(extra_block_mutex);
|
||||||
|
|
||||||
|
std::shared_mutex eof_mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
IBlocksStreamPtr GraceHashJoin::getDelayedBlocks()
|
IBlocksStreamPtr GraceHashJoin::getDelayedBlocks()
|
||||||
|
@ -818,18 +818,18 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti
|
|||||||
{
|
{
|
||||||
IndexDescription index_desc = IndexDescription::getIndexFromAST(index->clone(), properties.columns, getContext());
|
IndexDescription index_desc = IndexDescription::getIndexFromAST(index->clone(), properties.columns, getContext());
|
||||||
if (properties.indices.has(index_desc.name))
|
if (properties.indices.has(index_desc.name))
|
||||||
throw Exception(ErrorCodes::ILLEGAL_INDEX, "Duplicated index name {} is not allowed. Please use different index names.", backQuoteIfNeed(index_desc.name));
|
throw Exception(ErrorCodes::ILLEGAL_INDEX, "Duplicated index name {} is not allowed. Please use a different index name", backQuoteIfNeed(index_desc.name));
|
||||||
|
|
||||||
const auto & settings = getContext()->getSettingsRef();
|
const auto & settings = getContext()->getSettingsRef();
|
||||||
if (index_desc.type == FULL_TEXT_INDEX_NAME && !settings[Setting::allow_experimental_full_text_index])
|
if (index_desc.type == FULL_TEXT_INDEX_NAME && !settings[Setting::allow_experimental_full_text_index])
|
||||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental full-text index feature is disabled. Turn on setting 'allow_experimental_full_text_index'");
|
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "The experimental full-text index feature is disabled. Enable the setting 'allow_experimental_full_text_index' to use it");
|
||||||
/// ----
|
/// ----
|
||||||
/// Temporary check during a transition period. Please remove at the end of 2024.
|
/// Temporary check during a transition period. Please remove at the end of 2024.
|
||||||
if (index_desc.type == INVERTED_INDEX_NAME && !settings[Setting::allow_experimental_inverted_index])
|
if (index_desc.type == INVERTED_INDEX_NAME && !settings[Setting::allow_experimental_inverted_index])
|
||||||
throw Exception(ErrorCodes::ILLEGAL_INDEX, "Please use index type 'full_text' instead of 'inverted'");
|
throw Exception(ErrorCodes::ILLEGAL_INDEX, "The 'inverted' index type is deprecated. Please use the 'full_text' index type instead");
|
||||||
/// ----
|
/// ----
|
||||||
if (index_desc.type == "vector_similarity" && !settings[Setting::allow_experimental_vector_similarity_index])
|
if (index_desc.type == "vector_similarity" && !settings[Setting::allow_experimental_vector_similarity_index])
|
||||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental vector similarity index is disabled. Turn on setting 'allow_experimental_vector_similarity_index'");
|
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "The experimental vector similarity index feature is disabled. Enable the setting 'allow_experimental_vector_similarity_index' to use it");
|
||||||
|
|
||||||
properties.indices.push_back(index_desc);
|
properties.indices.push_back(index_desc);
|
||||||
}
|
}
|
||||||
|
@ -130,12 +130,25 @@ void ASTColumnDeclaration::formatImpl(const FormatSettings & format_settings, Fo
|
|||||||
|
|
||||||
void ASTColumnDeclaration::forEachPointerToChild(std::function<void(void **)> f)
|
void ASTColumnDeclaration::forEachPointerToChild(std::function<void(void **)> f)
|
||||||
{
|
{
|
||||||
f(reinterpret_cast<void **>(&default_expression));
|
auto visit_child = [&f](ASTPtr & member)
|
||||||
f(reinterpret_cast<void **>(&comment));
|
{
|
||||||
f(reinterpret_cast<void **>(&codec));
|
IAST * new_member_ptr = member.get();
|
||||||
f(reinterpret_cast<void **>(&statistics_desc));
|
f(reinterpret_cast<void **>(&new_member_ptr));
|
||||||
f(reinterpret_cast<void **>(&ttl));
|
if (new_member_ptr != member.get())
|
||||||
f(reinterpret_cast<void **>(&collation));
|
{
|
||||||
f(reinterpret_cast<void **>(&settings));
|
if (new_member_ptr)
|
||||||
|
member = new_member_ptr->ptr();
|
||||||
|
else
|
||||||
|
member.reset();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
visit_child(default_expression);
|
||||||
|
visit_child(comment);
|
||||||
|
visit_child(codec);
|
||||||
|
visit_child(statistics_desc);
|
||||||
|
visit_child(ttl);
|
||||||
|
visit_child(collation);
|
||||||
|
visit_child(settings);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -61,6 +61,29 @@ ASTPtr ASTTableJoin::clone() const
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ASTTableJoin::forEachPointerToChild(std::function<void(void **)> f)
|
||||||
|
{
|
||||||
|
IAST * new_using_expression_list = using_expression_list.get();
|
||||||
|
f(reinterpret_cast<void **>(&new_using_expression_list));
|
||||||
|
if (new_using_expression_list != using_expression_list.get())
|
||||||
|
{
|
||||||
|
if (new_using_expression_list)
|
||||||
|
using_expression_list = new_using_expression_list->ptr();
|
||||||
|
else
|
||||||
|
using_expression_list.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
IAST * new_on_expression = on_expression.get();
|
||||||
|
f(reinterpret_cast<void **>(&new_on_expression));
|
||||||
|
if (new_on_expression != on_expression.get())
|
||||||
|
{
|
||||||
|
if (new_on_expression)
|
||||||
|
on_expression = new_on_expression->ptr();
|
||||||
|
else
|
||||||
|
on_expression.reset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void ASTArrayJoin::updateTreeHashImpl(SipHash & hash_state, bool ignore_aliases) const
|
void ASTArrayJoin::updateTreeHashImpl(SipHash & hash_state, bool ignore_aliases) const
|
||||||
{
|
{
|
||||||
hash_state.update(kind);
|
hash_state.update(kind);
|
||||||
|
@ -80,6 +80,9 @@ struct ASTTableJoin : public IAST
|
|||||||
void formatImplAfterTable(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const;
|
void formatImplAfterTable(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const;
|
||||||
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||||
void updateTreeHashImpl(SipHash & hash_state, bool ignore_aliases) const override;
|
void updateTreeHashImpl(SipHash & hash_state, bool ignore_aliases) const override;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void forEachPointerToChild(std::function<void(void **)> f) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Specification of ARRAY JOIN.
|
/// Specification of ARRAY JOIN.
|
||||||
|
@ -233,6 +233,7 @@ namespace MergeTreeSetting
|
|||||||
extern const MergeTreeSettingsString storage_policy;
|
extern const MergeTreeSettingsString storage_policy;
|
||||||
extern const MergeTreeSettingsFloat zero_copy_concurrent_part_removal_max_postpone_ratio;
|
extern const MergeTreeSettingsFloat zero_copy_concurrent_part_removal_max_postpone_ratio;
|
||||||
extern const MergeTreeSettingsUInt64 zero_copy_concurrent_part_removal_max_split_times;
|
extern const MergeTreeSettingsUInt64 zero_copy_concurrent_part_removal_max_split_times;
|
||||||
|
extern const MergeTreeSettingsBool enforce_index_structure_match_on_partition_manipulation;
|
||||||
extern const MergeTreeSettingsBool prewarm_mark_cache;
|
extern const MergeTreeSettingsBool prewarm_mark_cache;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7533,10 +7534,11 @@ MergeTreeData & MergeTreeData::checkStructureAndGetMergeTreeData(IStorage & sour
|
|||||||
|
|
||||||
if (query_to_string(my_snapshot->getPrimaryKeyAST()) != query_to_string(src_snapshot->getPrimaryKeyAST()))
|
if (query_to_string(my_snapshot->getPrimaryKeyAST()) != query_to_string(src_snapshot->getPrimaryKeyAST()))
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Tables have different primary key");
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Tables have different primary key");
|
||||||
|
const auto check_definitions = [this](const auto & my_descriptions, const auto & src_descriptions)
|
||||||
const auto check_definitions = [](const auto & my_descriptions, const auto & src_descriptions)
|
|
||||||
{
|
{
|
||||||
if (my_descriptions.size() != src_descriptions.size())
|
bool strict_match = (*getSettings())[MergeTreeSetting::enforce_index_structure_match_on_partition_manipulation];
|
||||||
|
if ((my_descriptions.size() < src_descriptions.size()) ||
|
||||||
|
(strict_match && my_descriptions.size() != src_descriptions.size()))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
std::unordered_set<std::string> my_query_strings;
|
std::unordered_set<std::string> my_query_strings;
|
||||||
|
@ -100,6 +100,7 @@ namespace ErrorCodes
|
|||||||
DECLARE(String, merge_workload, "", "Name of workload to be used to access resources for merges", 0) \
|
DECLARE(String, merge_workload, "", "Name of workload to be used to access resources for merges", 0) \
|
||||||
DECLARE(String, mutation_workload, "", "Name of workload to be used to access resources for mutations", 0) \
|
DECLARE(String, mutation_workload, "", "Name of workload to be used to access resources for mutations", 0) \
|
||||||
DECLARE(Milliseconds, background_task_preferred_step_execution_time_ms, 50, "Target time to execution of one step of merge or mutation. Can be exceeded if one step takes longer time", 0) \
|
DECLARE(Milliseconds, background_task_preferred_step_execution_time_ms, 50, "Target time to execution of one step of merge or mutation. Can be exceeded if one step takes longer time", 0) \
|
||||||
|
DECLARE(Bool, enforce_index_structure_match_on_partition_manipulation, false, "If this setting is enabled for destination table of a partition manipulation query (`ATTACH/MOVE/REPLACE PARTITION`), the indices and projections must be identical between the source and destination tables. Otherwise, the destination table can have a superset of the source table's indices and projections.", 0) \
|
||||||
DECLARE(MergeSelectorAlgorithm, merge_selector_algorithm, MergeSelectorAlgorithm::SIMPLE, "The algorithm to select parts for merges assignment", EXPERIMENTAL) \
|
DECLARE(MergeSelectorAlgorithm, merge_selector_algorithm, MergeSelectorAlgorithm::SIMPLE, "The algorithm to select parts for merges assignment", EXPERIMENTAL) \
|
||||||
DECLARE(Bool, merge_selector_enable_heuristic_to_remove_small_parts_at_right, true, "Enable heuristic for selecting parts for merge which removes parts from right side of range, if their size is less than specified ratio (0.01) of sum_size. Works for Simple and StochasticSimple merge selectors", 0) \
|
DECLARE(Bool, merge_selector_enable_heuristic_to_remove_small_parts_at_right, true, "Enable heuristic for selecting parts for merge which removes parts from right side of range, if their size is less than specified ratio (0.01) of sum_size. Works for Simple and StochasticSimple merge selectors", 0) \
|
||||||
DECLARE(Float, merge_selector_base, 5.0, "Affects write amplification of assigned merges (expert level setting, don't change if you don't understand what it is doing). Works for Simple and StochasticSimple merge selectors", 0) \
|
DECLARE(Float, merge_selector_base, 5.0, "Affects write amplification of assigned merges (expert level setting, don't change if you don't understand what it is doing). Works for Simple and StochasticSimple merge selectors", 0) \
|
||||||
|
@ -56,7 +56,9 @@ LABEL_CATEGORIES = {
|
|||||||
"Bug Fix (user-visible misbehaviour in official stable or prestable release)",
|
"Bug Fix (user-visible misbehaviour in official stable or prestable release)",
|
||||||
"Bug Fix (user-visible misbehavior in official stable or prestable release)",
|
"Bug Fix (user-visible misbehavior in official stable or prestable release)",
|
||||||
],
|
],
|
||||||
"pr-critical-bugfix": ["Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)"],
|
"pr-critical-bugfix": [
|
||||||
|
"Critical Bug Fix (crash, data loss, RBAC) or LOGICAL_ERROR"
|
||||||
|
],
|
||||||
"pr-build": [
|
"pr-build": [
|
||||||
"Build/Testing/Packaging Improvement",
|
"Build/Testing/Packaging Improvement",
|
||||||
"Build Improvement",
|
"Build Improvement",
|
||||||
|
@ -0,0 +1,5 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<merge_tree>
|
||||||
|
<enforce_index_structure_match_on_partition_manipulation>true</enforce_index_structure_match_on_partition_manipulation>
|
||||||
|
</merge_tree>
|
||||||
|
</clickhouse>
|
@ -0,0 +1,5 @@
|
|||||||
|
<clickhouse>
|
||||||
|
<merge_tree>
|
||||||
|
<enforce_index_structure_match_on_partition_manipulation>false</enforce_index_structure_match_on_partition_manipulation>
|
||||||
|
</merge_tree>
|
||||||
|
</clickhouse>
|
@ -0,0 +1,473 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from helpers.client import QueryRuntimeException
|
||||||
|
from helpers.cluster import ClickHouseCluster
|
||||||
|
|
||||||
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
node1 = cluster.add_instance(
|
||||||
|
"node1", main_configs=["configs/config_with_check_table_structure_completely.xml"]
|
||||||
|
)
|
||||||
|
# node1 = cluster.add_instance("node1")
|
||||||
|
node2 = cluster.add_instance(
|
||||||
|
"node2",
|
||||||
|
main_configs=["configs/config_without_check_table_structure_completely.xml"],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def start_cluster():
|
||||||
|
try:
|
||||||
|
cluster.start()
|
||||||
|
yield cluster
|
||||||
|
finally:
|
||||||
|
cluster.shutdown()
|
||||||
|
|
||||||
|
|
||||||
|
# def test_setting_check_table_structure_completely(start_cluster):
|
||||||
|
# assert node1.query("""select value from system.merge_tree_settings where name='enforce_index_structure_match_on_partition_manipulation';""") == "0\n"
|
||||||
|
def test_check_completely_attach_with_different_indices(start_cluster):
|
||||||
|
node1.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t1
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
`c` String,
|
||||||
|
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
node1.query(
|
||||||
|
"INSERT INTO attach_partition_t1 SELECT number, toString(number), toString(number) FROM numbers(10);"
|
||||||
|
)
|
||||||
|
node1.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t2
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
`c` String,
|
||||||
|
INDEX bf b TYPE bloom_filter GRANULARITY 1
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
# serverError 36
|
||||||
|
with pytest.raises(QueryRuntimeException) as exc:
|
||||||
|
node1.query(
|
||||||
|
"ALTER TABLE attach_partition_t2 ATTACH PARTITION tuple() FROM attach_partition_t1;"
|
||||||
|
)
|
||||||
|
assert "Tables have different secondary indices" in str(exc.value)
|
||||||
|
node1.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t3
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
`c` String,
|
||||||
|
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1,
|
||||||
|
INDEX cf c TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
# serverError 36
|
||||||
|
with pytest.raises(QueryRuntimeException) as exc:
|
||||||
|
node1.query(
|
||||||
|
"ALTER TABLE attach_partition_t3 ATTACH PARTITION tuple() FROM attach_partition_t1;"
|
||||||
|
)
|
||||||
|
assert "Tables have different secondary indices" in str(exc.value)
|
||||||
|
node1.query("DROP TABLE attach_partition_t1")
|
||||||
|
node1.query("DROP TABLE attach_partition_t2")
|
||||||
|
node1.query("DROP TABLE attach_partition_t3")
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_attach_with_different_indices(start_cluster):
|
||||||
|
node2.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t1
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
`c` String,
|
||||||
|
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
node2.query(
|
||||||
|
"INSERT INTO attach_partition_t1 SELECT number, toString(number), toString(number) FROM numbers(10);"
|
||||||
|
)
|
||||||
|
node2.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t2
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
`c` String,
|
||||||
|
INDEX bf b TYPE bloom_filter GRANULARITY 1
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
# serverError 36
|
||||||
|
with pytest.raises(QueryRuntimeException) as exc:
|
||||||
|
node2.query(
|
||||||
|
"ALTER TABLE attach_partition_t2 ATTACH PARTITION tuple() FROM attach_partition_t1;"
|
||||||
|
)
|
||||||
|
assert "Tables have different secondary indices" in str(exc.value)
|
||||||
|
node2.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t3
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
`c` String,
|
||||||
|
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1,
|
||||||
|
INDEX cf c TYPE bloom_filter GRANULARITY 1
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
node2.query(
|
||||||
|
"ALTER TABLE attach_partition_t3 ATTACH PARTITION tuple() FROM attach_partition_t1;"
|
||||||
|
)
|
||||||
|
assert node2.query("SELECT COUNT() FROM attach_partition_t3") == "10\n"
|
||||||
|
assert node2.query("SELECT `a` FROM attach_partition_t3 WHERE `b` = '1'") == "1\n"
|
||||||
|
assert node2.query("SELECT `a` FROM attach_partition_t3 WHERE `c` = '1'") == "1\n"
|
||||||
|
node2.query("DROP TABLE attach_partition_t1")
|
||||||
|
node2.query("DROP TABLE attach_partition_t2")
|
||||||
|
node2.query("DROP TABLE attach_partition_t3")
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_completely_attach_with_different_projections(start_cluster):
|
||||||
|
node1.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t1
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
PROJECTION proj1 (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
sum(a)
|
||||||
|
GROUP BY b
|
||||||
|
)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
node1.query(
|
||||||
|
"INSERT INTO attach_partition_t1 SELECT number, toString(number) FROM numbers(10);"
|
||||||
|
)
|
||||||
|
node1.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t2
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
PROJECTION differently_named_proj (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
sum(a)
|
||||||
|
GROUP BY b
|
||||||
|
)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a;
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
# serverError 36
|
||||||
|
with pytest.raises(QueryRuntimeException) as exc:
|
||||||
|
node1.query(
|
||||||
|
"ALTER TABLE attach_partition_t2 ATTACH PARTITION tuple() FROM attach_partition_t1;"
|
||||||
|
)
|
||||||
|
assert "Tables have different projections" in str(exc.value)
|
||||||
|
node1.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t3
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
PROJECTION proj1 (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
sum(a)
|
||||||
|
GROUP BY b
|
||||||
|
),
|
||||||
|
PROJECTION proj2 (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
avg(a)
|
||||||
|
GROUP BY b
|
||||||
|
)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
# serverError 36
|
||||||
|
with pytest.raises(QueryRuntimeException) as exc:
|
||||||
|
node1.query(
|
||||||
|
"ALTER TABLE attach_partition_t3 ATTACH PARTITION tuple() FROM attach_partition_t1;"
|
||||||
|
)
|
||||||
|
assert "Tables have different projections" in str(exc.value)
|
||||||
|
node1.query("DROP TABLE attach_partition_t1")
|
||||||
|
node1.query("DROP TABLE attach_partition_t2")
|
||||||
|
node1.query("DROP TABLE attach_partition_t3")
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_attach_with_different_projections(start_cluster):
|
||||||
|
node2.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t1
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
PROJECTION proj1 (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
sum(a)
|
||||||
|
GROUP BY b
|
||||||
|
)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
node2.query(
|
||||||
|
"INSERT INTO attach_partition_t1 SELECT number, toString(number) FROM numbers(10);"
|
||||||
|
)
|
||||||
|
node2.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t2
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
PROJECTION differently_named_proj (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
sum(a)
|
||||||
|
GROUP BY b
|
||||||
|
)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a;
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
# serverError 36
|
||||||
|
with pytest.raises(QueryRuntimeException) as exc:
|
||||||
|
node2.query(
|
||||||
|
"ALTER TABLE attach_partition_t2 ATTACH PARTITION tuple() FROM attach_partition_t1;"
|
||||||
|
)
|
||||||
|
assert "Tables have different projections" in str(exc.value)
|
||||||
|
node2.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t3
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
PROJECTION proj1 (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
sum(a)
|
||||||
|
GROUP BY b
|
||||||
|
),
|
||||||
|
PROJECTION proj2 (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
avg(a)
|
||||||
|
GROUP BY b
|
||||||
|
)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
node2.query(
|
||||||
|
"ALTER TABLE attach_partition_t3 ATTACH PARTITION tuple() FROM attach_partition_t1;"
|
||||||
|
)
|
||||||
|
assert node2.query("SELECT COUNT() FROM attach_partition_t3") == "10\n"
|
||||||
|
node2.query("DROP TABLE attach_partition_t1")
|
||||||
|
node2.query("DROP TABLE attach_partition_t2")
|
||||||
|
node2.query("DROP TABLE attach_partition_t3")
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_completely_attach_with_different_indices_and_projections(start_cluster):
|
||||||
|
node1.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t1
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
`c` String,
|
||||||
|
PROJECTION proj1 (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
sum(a)
|
||||||
|
GROUP BY b
|
||||||
|
),
|
||||||
|
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
node1.query(
|
||||||
|
"INSERT INTO attach_partition_t1 SELECT number, toString(number), toString(number) FROM numbers(10);"
|
||||||
|
)
|
||||||
|
node1.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t2
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
`c` String,
|
||||||
|
PROJECTION proj (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
sum(a)
|
||||||
|
GROUP BY b
|
||||||
|
),
|
||||||
|
INDEX bf b TYPE bloom_filter GRANULARITY 1,
|
||||||
|
INDEX cf c TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
# serverError 36
|
||||||
|
with pytest.raises(QueryRuntimeException) as exc:
|
||||||
|
node1.query(
|
||||||
|
"ALTER TABLE attach_partition_t2 ATTACH PARTITION tuple() FROM attach_partition_t1;"
|
||||||
|
)
|
||||||
|
assert "Tables have different secondary indices" in str(exc.value)
|
||||||
|
node1.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t3
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
`c` String,
|
||||||
|
PROJECTION proj1 (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
sum(a)
|
||||||
|
GROUP BY b
|
||||||
|
),
|
||||||
|
PROJECTION proj2 (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
avg(a)
|
||||||
|
GROUP BY b
|
||||||
|
),
|
||||||
|
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1,
|
||||||
|
INDEX cf c TYPE bloom_filter GRANULARITY 1
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
# serverError 36
|
||||||
|
with pytest.raises(QueryRuntimeException) as exc:
|
||||||
|
node1.query(
|
||||||
|
"ALTER TABLE attach_partition_t3 ATTACH PARTITION tuple() FROM attach_partition_t1;"
|
||||||
|
)
|
||||||
|
assert "Tables have different secondary indices" in str(exc.value)
|
||||||
|
node1.query("DROP TABLE attach_partition_t1")
|
||||||
|
node1.query("DROP TABLE attach_partition_t2")
|
||||||
|
node1.query("DROP TABLE attach_partition_t3")
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_attach_with_different_indices_and_projections(start_cluster):
|
||||||
|
node2.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t1
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
`c` String,
|
||||||
|
PROJECTION proj1 (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
sum(a)
|
||||||
|
GROUP BY b
|
||||||
|
),
|
||||||
|
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
node2.query(
|
||||||
|
"INSERT INTO attach_partition_t1 SELECT number, toString(number), toString(number) FROM numbers(10);"
|
||||||
|
)
|
||||||
|
node2.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t2
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
`c` String,
|
||||||
|
PROJECTION proj (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
sum(a)
|
||||||
|
GROUP BY b
|
||||||
|
),
|
||||||
|
INDEX bf b TYPE bloom_filter GRANULARITY 1,
|
||||||
|
INDEX cf c TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
# serverError 36
|
||||||
|
with pytest.raises(QueryRuntimeException) as exc:
|
||||||
|
node2.query(
|
||||||
|
"ALTER TABLE attach_partition_t2 ATTACH PARTITION tuple() FROM attach_partition_t1;"
|
||||||
|
)
|
||||||
|
assert "Tables have different secondary indices" in str(exc.value)
|
||||||
|
node2.query(
|
||||||
|
"""
|
||||||
|
CREATE TABLE attach_partition_t3
|
||||||
|
(
|
||||||
|
`a` UInt32,
|
||||||
|
`b` String,
|
||||||
|
`c` String,
|
||||||
|
PROJECTION proj1 (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
sum(a)
|
||||||
|
GROUP BY b
|
||||||
|
),
|
||||||
|
PROJECTION proj2 (
|
||||||
|
SELECT
|
||||||
|
b,
|
||||||
|
avg(a)
|
||||||
|
GROUP BY b
|
||||||
|
),
|
||||||
|
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1,
|
||||||
|
INDEX cf c TYPE bloom_filter GRANULARITY 1
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY a
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
node2.query(
|
||||||
|
"ALTER TABLE attach_partition_t3 ATTACH PARTITION tuple() FROM attach_partition_t1;"
|
||||||
|
)
|
||||||
|
assert node2.query("SELECT COUNT() FROM attach_partition_t3") == "10\n"
|
||||||
|
assert node2.query("SELECT `a` FROM attach_partition_t3 WHERE `b` = '1'") == "1\n"
|
||||||
|
assert node2.query("SELECT `a` FROM attach_partition_t3 WHERE `c` = '1'") == "1\n"
|
||||||
|
node2.query("DROP TABLE attach_partition_t1")
|
||||||
|
node2.query("DROP TABLE attach_partition_t2")
|
||||||
|
node2.query("DROP TABLE attach_partition_t3")
|
@ -0,0 +1,12 @@
|
|||||||
|
1 2
|
||||||
|
2 2
|
||||||
|
3 1
|
||||||
|
4 7
|
||||||
|
5 10
|
||||||
|
6 12
|
||||||
|
1 2
|
||||||
|
2 2
|
||||||
|
3 1
|
||||||
|
4 7
|
||||||
|
5 10
|
||||||
|
6 12
|
62
tests/queries/0_stateless/03008_s3_plain_rewritable_fault.sh
Executable file
62
tests/queries/0_stateless/03008_s3_plain_rewritable_fault.sh
Executable file
@ -0,0 +1,62 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Tags: no-fasttest, no-shared-merge-tree, no-parallel
|
||||||
|
# Tag no-fasttest: requires S3
|
||||||
|
# Tag no-shared-merge-tree: does not support replication
|
||||||
|
# Tag no-parallel: uses failpoints
|
||||||
|
|
||||||
|
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CUR_DIR"/../shell_config.sh
|
||||||
|
|
||||||
|
on_exit() {
|
||||||
|
${CLICKHOUSE_CLIENT} -m --query "
|
||||||
|
SYSTEM DISABLE FAILPOINT plain_object_storage_write_fail_on_directory_create;
|
||||||
|
SYSTEM DISABLE FAILPOINT plain_object_storage_write_fail_on_directory_move;
|
||||||
|
"
|
||||||
|
}
|
||||||
|
|
||||||
|
trap on_exit EXIT
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test_s3_mt_fault"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} --query "
|
||||||
|
CREATE TABLE test_s3_mt_fault (a Int32, b Int64) engine = MergeTree() ORDER BY tuple(a, b)
|
||||||
|
SETTINGS disk = disk(
|
||||||
|
name = 03008_s3_plain_rewritable_fault,
|
||||||
|
type = s3_plain_rewritable,
|
||||||
|
endpoint = 'http://localhost:11111/test/03008_test_s3_mt_fault/',
|
||||||
|
access_key_id = clickhouse,
|
||||||
|
secret_access_key = clickhouse);
|
||||||
|
"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} --query "
|
||||||
|
INSERT INTO test_s3_mt_fault (*) VALUES (1, 2), (2, 2), (3, 1), (4, 7), (5, 10), (6, 12);
|
||||||
|
OPTIMIZE TABLE test_s3_mt_fault FINAL;
|
||||||
|
"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} --query "
|
||||||
|
SYSTEM ENABLE FAILPOINT plain_object_storage_write_fail_on_directory_create
|
||||||
|
"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} --query "
|
||||||
|
INSERT INTO test_s3_mt_fault (*) select number, number from numbers_mt(100)" 2>&1 | grep -Fq "FAULT_INJECTED"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_s3_mt_fault;"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} --query "
|
||||||
|
SYSTEM DISABLE FAILPOINT plain_object_storage_write_fail_on_directory_create;
|
||||||
|
SYSTEM ENABLE FAILPOINT plain_object_storage_write_fail_on_directory_move;
|
||||||
|
"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} --query "
|
||||||
|
INSERT INTO test_s3_mt_fault (*) select number, number from numbers_mt(100);
|
||||||
|
" 2>&1 | grep -Fq "FAULT_INJECTED"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_s3_mt_fault;"
|
||||||
|
|
||||||
|
${CLICKHOUSE_CLIENT} --query "
|
||||||
|
SYSTEM DISABLE FAILPOINT plain_object_storage_write_fail_on_directory_move;
|
||||||
|
"
|
||||||
|
|
||||||
|
# Filter out 'Removing temporary directory' because the fault injection prevents directory rename.
|
||||||
|
${CLICKHOUSE_CLIENT} --query "DROP TABLE test_s3_mt_fault SYNC" 2>&1 | grep -v 'Removing temporary directory' ||:
|
@ -1,4 +1,4 @@
|
|||||||
-- Tags: no-parallel
|
-- Tags: no-fasttest, no-parallel
|
||||||
-- Tag no-parallel -- due to failpoints
|
-- Tag no-parallel -- due to failpoints
|
||||||
|
|
||||||
create table data_r1 (key Int, value String) engine=ReplicatedMergeTree('/tables/{database}/data', '{table}') order by tuple();
|
create table data_r1 (key Int, value String) engine=ReplicatedMergeTree('/tables/{database}/data', '{table}') order by tuple();
|
||||||
|
@ -0,0 +1,7 @@
|
|||||||
|
SET join_algorithm = 'parallel_hash';
|
||||||
|
SET allow_experimental_dynamic_type = 1;
|
||||||
|
DROP TABLE IF EXISTS t0;
|
||||||
|
CREATE TABLE t0 (c0 Tuple(c1 Int,c2 Dynamic)) ENGINE = Memory();
|
||||||
|
SELECT 1 FROM t0 tx JOIN t0 ty ON tx.c0 = ty.c0;
|
||||||
|
DROP TABLE t0;
|
||||||
|
|
@ -0,0 +1 @@
|
|||||||
|
180 15
|
@ -0,0 +1,15 @@
|
|||||||
|
DROP TABLE IF EXISTS t0;
|
||||||
|
DROP TABLE IF EXISTS t1;
|
||||||
|
|
||||||
|
CREATE TABLE t0 (x UInt64) ENGINE = MergeTree ORDER BY x;
|
||||||
|
INSERT INTO t0 SELECT number from numbers(20);
|
||||||
|
|
||||||
|
CREATE TABLE t1 (x UInt64) ENGINE = MergeTree ORDER BY x;
|
||||||
|
INSERT INTO t1 SELECT number from numbers(5, 20);
|
||||||
|
|
||||||
|
SET max_joined_block_size_rows = 1;
|
||||||
|
SET grace_hash_join_initial_buckets = 2;
|
||||||
|
SET join_algorithm = 'grace_hash';
|
||||||
|
|
||||||
|
SELECT sum(x), count() FROM t0 JOIN t1 USING x;
|
||||||
|
|
36
tests/queries/0_stateless/03274_pretty_output_coloring.expect
Executable file
36
tests/queries/0_stateless/03274_pretty_output_coloring.expect
Executable file
@ -0,0 +1,36 @@
|
|||||||
|
#!/usr/bin/expect -f
|
||||||
|
|
||||||
|
set basedir [file dirname $argv0]
|
||||||
|
set basename [file tail $argv0]
|
||||||
|
if {[info exists env(CLICKHOUSE_TMP)]} {
|
||||||
|
set CLICKHOUSE_TMP $env(CLICKHOUSE_TMP)
|
||||||
|
} else {
|
||||||
|
set CLICKHOUSE_TMP "."
|
||||||
|
}
|
||||||
|
exp_internal -f $CLICKHOUSE_TMP/$basename.debuglog 0
|
||||||
|
set history_file $CLICKHOUSE_TMP/$basename.history
|
||||||
|
|
||||||
|
log_user 0
|
||||||
|
set timeout 60
|
||||||
|
match_max 100000
|
||||||
|
|
||||||
|
expect_after {
|
||||||
|
# Do not ignore eof from expect
|
||||||
|
-i $any_spawn_id eof { exp_continue }
|
||||||
|
# A default timeout action is to do nothing, change it to fail
|
||||||
|
-i $any_spawn_id timeout { exit 1 }
|
||||||
|
}
|
||||||
|
|
||||||
|
# useful debugging configuration
|
||||||
|
# exp_internal 1
|
||||||
|
|
||||||
|
spawn bash -c "source $basedir/../shell_config.sh ; \$CLICKHOUSE_CLIENT_BINARY \$CLICKHOUSE_CLIENT_OPT --disable_suggestion --enable-progress-table-toggle=0 --highlight 0 --history_file=$history_file"
|
||||||
|
expect ":) "
|
||||||
|
|
||||||
|
# Make a query
|
||||||
|
send -- "SELECT 1 as Hello\r"
|
||||||
|
expect -re "\\\[1mHello.*\\\[90m1\\\."
|
||||||
|
expect ":) "
|
||||||
|
|
||||||
|
send -- "exit\r"
|
||||||
|
expect eof
|
7
tests/queries/0_stateless/03274_udf_in_join.reference
Normal file
7
tests/queries/0_stateless/03274_udf_in_join.reference
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
SELECT 1
|
||||||
|
FROM
|
||||||
|
(
|
||||||
|
SELECT 1 AS c0
|
||||||
|
) AS v0
|
||||||
|
ALL INNER JOIN v0 AS vx ON c0 = vx.c0
|
||||||
|
1
|
21
tests/queries/0_stateless/03274_udf_in_join.sh
Executable file
21
tests/queries/0_stateless/03274_udf_in_join.sh
Executable file
@ -0,0 +1,21 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CUR_DIR"/../shell_config.sh
|
||||||
|
|
||||||
|
$CLICKHOUSE_CLIENT -q "
|
||||||
|
CREATE VIEW v0 AS SELECT 1 AS c0;
|
||||||
|
CREATE FUNCTION ${CLICKHOUSE_DATABASE}_second AS (x, y) -> y;
|
||||||
|
CREATE FUNCTION ${CLICKHOUSE_DATABASE}_equals AS (x, y) -> x = y;
|
||||||
|
SET optimize_rewrite_array_exists_to_has = 1;
|
||||||
|
|
||||||
|
EXPLAIN SYNTAX SELECT 1 FROM v0 JOIN v0 vx ON ${CLICKHOUSE_DATABASE}_second(v0.c0, vx.c0); -- { serverError INVALID_JOIN_ON_EXPRESSION }
|
||||||
|
EXPLAIN SYNTAX SELECT 1 FROM v0 JOIN v0 vx ON ${CLICKHOUSE_DATABASE}_equals(v0.c0, vx.c0);
|
||||||
|
|
||||||
|
SELECT 1 FROM v0 JOIN v0 vx ON ${CLICKHOUSE_DATABASE}_equals(v0.c0, vx.c0);
|
||||||
|
|
||||||
|
DROP view v0;
|
||||||
|
DROP FUNCTION ${CLICKHOUSE_DATABASE}_second;
|
||||||
|
DROP FUNCTION ${CLICKHOUSE_DATABASE}_equals;
|
||||||
|
"
|
@ -1,4 +1,4 @@
|
|||||||
personal_ws-1.1 en 2984
|
personal_ws-1.1 en 2985
|
||||||
AArch
|
AArch
|
||||||
ACLs
|
ACLs
|
||||||
ALTERs
|
ALTERs
|
||||||
@ -2825,6 +2825,7 @@ summapwithoverflow
|
|||||||
summingmergetree
|
summingmergetree
|
||||||
sumwithoverflow
|
sumwithoverflow
|
||||||
superaggregates
|
superaggregates
|
||||||
|
superset
|
||||||
supertype
|
supertype
|
||||||
supremum
|
supremum
|
||||||
symlink
|
symlink
|
||||||
|
Loading…
Reference in New Issue
Block a user