Compare commits

...

66 Commits

Author SHA1 Message Date
Dmitry Novik
715d7ac31b
Merge c4b108b2b2 into de13b819f0 2024-11-24 03:10:41 +08:00
Raúl Marín
de13b819f0
Merge pull request #72319 from ClickHouse/revert-71774-enable_http_compression_default
Revert "Set enable_http_compression default value to 1"
2024-11-23 15:59:24 +00:00
Antonio Andelic
558f639f2a
Merge pull request #72283 from ClickHouse/make-terminal-beautiful-again
Apply colors correctly to terminal output
2024-11-23 15:27:04 +00:00
Julia Kartseva
d8de6d1dce
Merge pull request #71439 from jkartseva/fix-transaction-rollback-plain-rw
Fix transaction rollback if `WriteBuffer::finalize` fails in plain_rewritable disk during directory creation
2024-11-23 06:20:27 +00:00
Julia Kartseva
559ea73045 fix flaky test 2024-11-23 01:29:46 +00:00
Julia Kartseva
e192d6c558 better 2024-11-23 01:29:46 +00:00
Julia Kartseva
16f3447ef9 fix and simplify 2024-11-23 01:29:46 +00:00
Julia Kartseva
208e96f360 add plain_object_storage_write_fail_on_directory_move fault injection 2024-11-23 01:29:46 +00:00
Julia Kartseva
8de4ecf513 add failpoint and test 2024-11-23 01:29:46 +00:00
Julia Kartseva
9602e338ad address feedback
Introduce removePathIfExists method.
2024-11-23 01:29:46 +00:00
Julia Kartseva
f3cd9d4c72 fix transaction rollback when file write finalize fails 2024-11-23 01:29:46 +00:00
Alexey Milovidov
904be0a2e8
Merge pull request #72254 from rschu1ze/usearch-clang-19
Enable SimSIMD on ARM with Clang 19
2024-11-23 00:19:15 +00:00
Alexey Milovidov
8baa1bea43
Merge pull request #72291 from Algunenano/s3Exception
00002_log_and_exception_messages_formatting: Use PreformattedMessage in S3Exception
2024-11-23 00:16:51 +00:00
Han Fei
cb582abbb5
Merge pull request #72292 from ClickHouse/hanfei/disable-shared-set-join
disable a cloud setting
2024-11-22 22:01:27 +00:00
Raúl Marín
52391a8271
Revert "Set enable_http_compression default value to 1" 2024-11-22 22:51:28 +01:00
Nikita Taranov
c30f8aecd5
Merge pull request #72233 from ClickHouse/fix_grace_hash_race
Fix race in `GraceHashJoin`
2024-11-22 21:14:38 +00:00
Nikita Taranov
559c4c0b17
Merge pull request #72278 from ClickHouse/fix_dyn_col_race
Fix race in `ColumnDynamic::dumpStructure()`
2024-11-22 20:03:30 +00:00
Raúl Marín
6c7cde5702 Better 2024-11-22 19:56:07 +01:00
Han Fei
34bf8d3b2c
Merge branch 'master' into hanfei/disable-shared-set-join 2024-11-22 18:16:45 +01:00
Han Fei
ff6f10843f disable a cloud setting 2024-11-22 17:15:16 +00:00
Raúl Marín
4ac9aa255b
Merge pull request #72179 from Algunenano/fix_udf_in_join_on
Fix bugs when using UDF in join on expression with the old analyzer
2024-11-22 17:14:08 +00:00
Raúl Marín
0ca97d3036 Use PreformattedMessage in S3Exception 2024-11-22 18:08:00 +01:00
Vladimir Cherkasov
26de447039
Merge pull request #70603 from zwy991114/check-table-structure-completely-setting
Add setting enforce_index_structure_match_on_partition_manipu..
2024-11-22 16:30:34 +00:00
Antonio Andelic
844513b1d0 correctly cast write buffer 2024-11-22 16:16:22 +01:00
Robert Schulze
93dc65ad88
Can we enable SimSIMD on ARM with Clang 19? 2024-11-22 14:30:16 +00:00
Nikita Taranov
c3a32cc215 impl 2024-11-22 15:01:14 +01:00
Vladimir Cherkasov
ebcce35e8c
Update SettingsChangesHistory.cpp 2024-11-22 12:53:30 +01:00
Vladimir Cherkasov
0d65fbf0e2
enforce_index_structure_match_on_partition_manipulation 24.11 2024-11-22 12:45:29 +01:00
Nikita Taranov
ed45abbc1a add test 2024-11-22 11:42:21 +01:00
Nikita Taranov
e5cfc7daee impl 2024-11-22 00:00:26 +01:00
Raúl Marín
51356a4e56 This test is not fast 2024-11-21 13:18:38 +01:00
Raúl Marín
7baaf24fc3 Enable setting 2024-11-21 12:35:14 +01:00
Yakov Olkhovskiy
c4b108b2b2 fix 2024-11-20 20:30:57 +00:00
Yakov Olkhovskiy
ccedab3b78 fix style 2024-11-20 17:34:03 +00:00
Yakov Olkhovskiy
e5e99d09ab fix ConstantNode comparison 2024-11-20 17:18:06 +00:00
Raúl Marín
f6610790a8 Fix broken check 2024-11-20 18:13:44 +01:00
Raúl Marín
27fb90bb58 Fix bugs when using UDF in join on expression with the old analyzer 2024-11-20 17:37:32 +01:00
Yakov Olkhovskiy
00662106f8 better fix ConstantNode comparison 2024-11-20 16:26:14 +00:00
Yakov Olkhovskiy
c8ae99a975 fix ConstantNode comparison 2024-11-20 15:14:13 +00:00
Yakov Olkhovskiy
1ecbad4151 fix test 2024-11-19 09:05:53 +00:00
Yakov Olkhovskiy
de9416c7df fix unit tests 2024-11-19 05:47:42 +00:00
zhangwanyun1
b8c70bf059 Re run pipeline 2024-11-19 11:19:23 +08:00
Yakov Olkhovskiy
b670ccad80 fix test 2024-11-15 21:15:10 +00:00
Yakov Olkhovskiy
3f9565f41b fix test 2024-11-15 08:18:03 +00:00
Yakov Olkhovskiy
098c5c6a0b revert some changes 2024-11-15 06:17:30 +00:00
Yakov Olkhovskiy
cf460844e9 fix 2024-11-14 01:44:05 +00:00
Yakov Olkhovskiy
5eb44471c8 fix 2024-11-12 11:40:01 +00:00
Yakov Olkhovskiy
bd595a7ceb fix 2024-11-12 08:09:40 +00:00
Yakov Olkhovskiy
fdae103fc1 fix 2024-11-12 03:12:19 +00:00
Yakov Olkhovskiy
a16071d6e0 fix - Field::safeGet() returns reference to temporary 2024-11-11 03:04:38 +00:00
Yakov Olkhovskiy
f7ff94c332 Merge branch 'master' into analyzer-constants-refactoring 2024-11-10 19:55:01 +00:00
Yakov Olkhovskiy
8c7b10e85e fix - Field::safeGet() returns reference to temporary 2024-11-10 19:25:09 +00:00
Vladimir Cherkasov
cdac4f600f
Merge branch 'master' into check-table-structure-completely-setting 2024-11-01 13:38:40 +01:00
zwy991114
f878c551c7
Merge branch 'master' into check-table-structure-completely-setting 2024-10-25 12:02:51 +08:00
Vladimir Cherkasov
eab454f3b9
upd aspell-dict.txt 2024-10-21 15:13:58 +02:00
Vladimir Cherkasov
aa58fde1d7
Cosmetic changes, upd setting name 2024-10-21 12:55:53 +02:00
zhangwanyun1
33426ac58a retry ci 2024-10-20 00:54:29 +08:00
zhangwanyun1
ac2140d3af fix the location of the new setting 2024-10-19 20:12:16 +08:00
zhangwanyun1
66de8310c3 fix ci 2024-10-18 01:34:14 +08:00
Vladimir Cherkasov
c0fa7bca61
Merge branch 'master' into check-table-structure-completely-setting 2024-10-17 15:04:28 +02:00
zhangwanyun1
d8230416d1 clarify the documentation 2024-10-16 11:29:06 +08:00
zhangwanyun1
6065de3073 Add setting check_table_structure_completely 2024-10-13 17:52:35 +08:00
Dmitry Novik
9b148f74d4 Merge remote-tracking branch 'origin/master' into analyzer-constants-refactoring 2024-09-19 18:02:48 +02:00
Dmitry Novik
e2a1ed9621 Fix crash 2024-05-02 18:42:47 +02:00
Dmitry Novik
ef4775c395 Store only ColumnConst in ConstantValue 2024-05-02 18:16:55 +02:00
Dmitry Novik
054c5e3c80 Analyzer: Refactor ConstantNode 2024-04-30 18:26:37 +02:00
60 changed files with 967 additions and 245 deletions

View File

@ -12,7 +12,7 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
- Backward Incompatible Change
- Build/Testing/Packaging Improvement
- Documentation (changelog entry is not required)
- Critical Bug Fix (crash, data loss, RBAC)
- Critical Bug Fix (crash, data loss, RBAC) or LOGICAL_ERROR
- Bug Fix (user-visible misbehavior in an official stable release)
- CI Fix or Improvement (changelog entry is not required)
- Not for changelog (changelog entry is not required)

View File

@ -13,7 +13,7 @@ execute_process(COMMAND ${CMAKE_CXX_COMPILER} --version
message (STATUS "Using compiler:\n${COMPILER_SELF_IDENTIFICATION}")
# Require minimum compiler versions
set (CLANG_MINIMUM_VERSION 17)
set (CLANG_MINIMUM_VERSION 18)
set (XCODE_MINIMUM_VERSION 12.0)
set (APPLE_CLANG_MINIMUM_VERSION 12.0.0)

2
contrib/SimSIMD vendored

@ -1 +1 @@
Subproject commit fa60f1b8e3582c50978f0ae86c2ebb6c9af957f3
Subproject commit da2d38537299ade247c2499131d936fb8db38f03

View File

@ -1,5 +1,5 @@
# See contrib/usearch-cmake/CMakeLists.txt, why only enabled on x86
if (ARCH_AMD64)
# See contrib/usearch-cmake/CMakeLists.txt, why only enabled on x86 and ARM
if (ARCH_AMD64 OR (ARCH_AARCH64 AND NOT NO_ARMV81_OR_HIGHER AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 19))
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD")
set(SIMSIMD_SRCS ${SIMSIMD_PROJECT_DIR}/c/lib.c)
add_library(_simsimd ${SIMSIMD_SRCS})

2
contrib/usearch vendored

@ -1 +1 @@
Subproject commit 7efe8b710c9831bfe06573b1df0fad001b04a2b5
Subproject commit 9561fcae1249ea8effbf71250e8a7a7ea97e5dfe

View File

@ -6,9 +6,8 @@ target_include_directories(_usearch SYSTEM INTERFACE ${USEARCH_PROJECT_DIR}/incl
target_link_libraries(_usearch INTERFACE _fp16)
target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB)
# Only x86 for now. On ARM, the linker goes down in flames. To make SimSIMD compile, I had to remove a macro checks in SimSIMD
# for AVX512 (x86, worked nicely) and __ARM_BF16_FORMAT_ALTERNATIVE. It is probably because of that.
if (ARCH_AMD64)
# SimSIMD supports x86 and ARM platforms. The latter requires Clang 19 because older versions had a buggy bf16 implementation.
if (ARCH_AMD64 OR (ARCH_AARCH64 AND NOT NO_ARMV81_OR_HIGHER AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 19))
target_link_libraries(_usearch INTERFACE _simsimd)
target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD)
@ -17,52 +16,3 @@ if (ARCH_AMD64)
endif ()
add_library(ch_contrib::usearch ALIAS _usearch)
# Cf. https://github.com/llvm/llvm-project/issues/107810 (though it is not 100% the same stack)
#
# LLVM ERROR: Cannot select: 0x7996e7a73150: f32,ch = load<(load (s16) from %ir.22, !tbaa !54231), anyext from bf16> 0x79961cb737c0, 0x7996e7a1a500, undef:i64, ./contrib/SimSIMD/include/simsimd/dot.h:215:1
# 0x7996e7a1a500: i64 = add 0x79961e770d00, Constant:i64<-16>, ./contrib/SimSIMD/include/simsimd/dot.h:215:1
# 0x79961e770d00: i64,ch = CopyFromReg 0x79961cb737c0, Register:i64 %4, ./contrib/SimSIMD/include/simsimd/dot.h:215:1
# 0x7996e7a1ae10: i64 = Register %4
# 0x7996e7a1b5f0: i64 = Constant<-16>
# 0x7996e7a1a730: i64 = undef
# In function: _ZL23simsimd_dot_bf16_serialPKu6__bf16S0_yPd
# PLEASE submit a bug report to https://github.com/llvm/llvm-project/issues/ and include the crash backtrace.
# Stack dump:
# 0. Running pass 'Function Pass Manager' on module 'src/libdbms.a(MergeTreeIndexVectorSimilarity.cpp.o at 2312737440)'.
# 1. Running pass 'AArch64 Instruction Selection' on function '@_ZL23simsimd_dot_bf16_serialPKu6__bf16S0_yPd'
# #0 0x00007999e83a63bf llvm::sys::PrintStackTrace(llvm::raw_ostream&, int) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda63bf)
# #1 0x00007999e83a44f9 llvm::sys::RunSignalHandlers() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda44f9)
# #2 0x00007999e83a6b00 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda6b00)
# #3 0x00007999e6e45320 (/lib/x86_64-linux-gnu/libc.so.6+0x45320)
# #4 0x00007999e6e9eb1c pthread_kill (/lib/x86_64-linux-gnu/libc.so.6+0x9eb1c)
# #5 0x00007999e6e4526e raise (/lib/x86_64-linux-gnu/libc.so.6+0x4526e)
# #6 0x00007999e6e288ff abort (/lib/x86_64-linux-gnu/libc.so.6+0x288ff)
# #7 0x00007999e82fe0c2 llvm::report_fatal_error(llvm::Twine const&, bool) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xcfe0c2)
# #8 0x00007999e8c2f8e3 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162f8e3)
# #9 0x00007999e8c2ed76 llvm::SelectionDAGISel::SelectCodeCommon(llvm::SDNode*, unsigned char const*, unsigned int) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162ed76)
# #10 0x00007999ea1adbcb (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x2badbcb)
# #11 0x00007999e8c2611f llvm::SelectionDAGISel::DoInstructionSelection() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162611f)
# #12 0x00007999e8c25790 llvm::SelectionDAGISel::CodeGenAndEmitDAG() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x1625790)
# #13 0x00007999e8c248de llvm::SelectionDAGISel::SelectAllBasicBlocks(llvm::Function const&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x16248de)
# #14 0x00007999e8c22934 llvm::SelectionDAGISel::runOnMachineFunction(llvm::MachineFunction&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x1622934)
# #15 0x00007999e87826b9 llvm::MachineFunctionPass::runOnFunction(llvm::Function&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x11826b9)
# #16 0x00007999e84f7772 llvm::FPPassManager::runOnFunction(llvm::Function&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xef7772)
# #17 0x00007999e84fd2f4 llvm::FPPassManager::runOnModule(llvm::Module&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xefd2f4)
# #18 0x00007999e84f7e9f llvm::legacy::PassManagerImpl::run(llvm::Module&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xef7e9f)
# #19 0x00007999e99f7d61 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f7d61)
# #20 0x00007999e99f8c91 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f8c91)
# #21 0x00007999e99f8b10 llvm::lto::thinBackend(llvm::lto::Config const&, unsigned int, std::function<llvm::Expected<std::unique_ptr<llvm::CachedFileStream, std::default_delete<llvm::CachedFileStream>>> (unsigned int, llvm::Twine const&)>, llvm::Module&, llvm::ModuleSummaryIndex const&, llvm::DenseMap<llvm::StringRef, std::unordered_set<unsigned long, std::hash<unsigned long>, std::equal_to<unsigned long>, std::allocator<unsigned long>>, llvm::DenseMapInfo<llvm::StringRef, void
# >, llvm::detail::DenseMapPair<llvm::StringRef, std::unordered_set<unsigned long, std::hash<unsigned long>, std::equal_to<unsigned long>, std::allocator<unsigned long>>>> const&, llvm::DenseMap<unsigned long, llvm::GlobalValueSummary*, llvm::DenseMapInfo<unsigned long, void>, llvm::detail::DenseMapPair<unsigned long, llvm::GlobalValueSummary*>> const&, llvm::MapVector<llvm::StringRef, llvm::BitcodeModule, llvm::DenseMap<llvm::StringRef, unsigned int, llvm::DenseMapInfo<llvm::S
# tringRef, void>, llvm::detail::DenseMapPair<llvm::StringRef, unsigned int>>, llvm::SmallVector<std::pair<llvm::StringRef, llvm::BitcodeModule>, 0u>>*, std::vector<unsigned char, std::allocator<unsigned char>> const&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f8b10)
# #22 0x00007999e99f248d (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f248d)
# #23 0x00007999e99f1cd6 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f1cd6)
# #24 0x00007999e82c9beb (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xcc9beb)
# #25 0x00007999e834ebe3 llvm::ThreadPool::processTasks(llvm::ThreadPoolTaskGroup*) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xd4ebe3)
# #26 0x00007999e834f704 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xd4f704)
# #27 0x00007999e6e9ca94 (/lib/x86_64-linux-gnu/libc.so.6+0x9ca94)
# #28 0x00007999e6f29c3c (/lib/x86_64-linux-gnu/libc.so.6+0x129c3c)
# clang++-18: error: unable to execute command: Aborted (core dumped)
# clang++-18: error: linker command failed due to signal (use -v to see invocation)
# ^[[A^Cninja: build stopped: interrupted by user.

View File

@ -131,8 +131,8 @@ For the query to run successfully, the following conditions must be met:
- Both tables must have the same structure.
- Both tables must have the same partition key, the same order by key and the same primary key.
- Both tables must have the same indices and projections.
- Both tables must have the same storage policy.
- The destination table must include all indices and projections from the source table. If the `enforce_index_structure_match_on_partition_manipulation` setting is enabled in destination table, the indices and projections must be identical. Otherwise, the destination table can have a superset of the source tables indices and projections.
## REPLACE PARTITION
@ -151,8 +151,8 @@ For the query to run successfully, the following conditions must be met:
- Both tables must have the same structure.
- Both tables must have the same partition key, the same order by key and the same primary key.
- Both tables must have the same indices and projections.
- Both tables must have the same storage policy.
- The destination table must include all indices and projections from the source table. If the `enforce_index_structure_match_on_partition_manipulation` setting is enabled in destination table, the indices and projections must be identical. Otherwise, the destination table can have a superset of the source tables indices and projections.
## MOVE PARTITION TO TABLE
@ -166,9 +166,9 @@ For the query to run successfully, the following conditions must be met:
- Both tables must have the same structure.
- Both tables must have the same partition key, the same order by key and the same primary key.
- Both tables must have the same indices and projections.
- Both tables must have the same storage policy.
- Both tables must be the same engine family (replicated or non-replicated).
- The destination table must include all indices and projections from the source table. If the `enforce_index_structure_match_on_partition_manipulation` setting is enabled in destination table, the indices and projections must be identical. Otherwise, the destination table can have a superset of the source tables indices and projections.
## CLEAR COLUMN IN PARTITION

View File

@ -136,7 +136,7 @@ ClickHouse применяет настройку в тех случаях, ко
- 0 — выключена.
- 1 — включена.
Значение по умолчанию: 1.
Значение по умолчанию: 0.
## http_zlib_compression_level {#settings-http_zlib_compression_level}

View File

@ -97,7 +97,7 @@ ClickHouse从表的过时副本中选择最相关的副本。
- 0 — Disabled.
- 1 — Enabled.
默认值:1
默认值:0
## http_zlib_compression_level {#settings-http_zlib_compression_level}

View File

@ -1,7 +1,9 @@
#include "DataTypes/IDataType.h"
#include <Analyzer/ConstantNode.h>
#include <Analyzer/FunctionNode.h>
#include <Columns/ColumnNullable.h>
#include <Common/assert_cast.h>
#include <Common/FieldVisitorToString.h>
#include <Common/SipHash.h>
@ -21,32 +23,44 @@
namespace DB
{
ConstantNode::ConstantNode(ConstantValuePtr constant_value_, QueryTreeNodePtr source_expression_)
ConstantNode::ConstantNode(ConstantValue constant_value_, QueryTreeNodePtr source_expression_)
: IQueryTreeNode(children_size)
, constant_value(std::move(constant_value_))
, value_string(applyVisitor(FieldVisitorToString(), constant_value->getValue()))
{
source_expression = std::move(source_expression_);
}
ConstantNode::ConstantNode(ConstantValuePtr constant_value_)
ConstantNode::ConstantNode(ConstantValue constant_value_)
: ConstantNode(constant_value_, nullptr /*source_expression*/)
{}
ConstantNode::ConstantNode(ColumnPtr constant_column_, DataTypePtr value_data_type_)
: ConstantNode(ConstantValue{std::move(constant_column_), value_data_type_})
{}
ConstantNode::ConstantNode(ColumnPtr constant_column_)
: ConstantNode(constant_column_, applyVisitor(FieldToDataType(), (*constant_column_)[0]))
{}
ConstantNode::ConstantNode(Field value_, DataTypePtr value_data_type_)
: ConstantNode(std::make_shared<ConstantValue>(convertFieldToTypeOrThrow(value_, *value_data_type_), value_data_type_))
: ConstantNode(ConstantValue{convertFieldToTypeOrThrow(value_, *value_data_type_), value_data_type_})
{}
ConstantNode::ConstantNode(Field value_)
: ConstantNode(value_, applyVisitor(FieldToDataType(), value_))
{}
String ConstantNode::getValueStringRepresentation() const
{
return applyVisitor(FieldVisitorToString(), getValue());
}
bool ConstantNode::requiresCastCall() const
{
const auto & constant_value_literal = constant_value->getValue();
const auto & constant_value_literal = getValue();
bool need_to_add_cast_function = false;
auto constant_value_literal_type = constant_value_literal.getType();
WhichDataType constant_value_type(constant_value->getType());
WhichDataType constant_value_type(constant_value.getType());
switch (constant_value_literal_type)
{
@ -116,9 +130,9 @@ void ConstantNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state
if (mask_id)
buffer << "[HIDDEN id: " << mask_id << "]";
else
buffer << constant_value->getValue().dump();
buffer << getValue().dump();
buffer << ", constant_value_type: " << constant_value->getType()->getName();
buffer << ", constant_value_type: " << constant_value.getType()->getName();
if (!mask_id && getSourceExpression())
{
@ -129,30 +143,39 @@ void ConstantNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state
void ConstantNode::convertToNullable()
{
constant_value = std::make_shared<ConstantValue>(constant_value->getValue(), makeNullableSafe(constant_value->getType()));
constant_value = { makeNullableSafe(constant_value.getColumn()), makeNullableSafe(constant_value.getType()) };
}
bool ConstantNode::isEqualImpl(const IQueryTreeNode & rhs, CompareOptions compare_options) const
{
const auto & rhs_typed = assert_cast<const ConstantNode &>(rhs);
if (value_string != rhs_typed.value_string || constant_value->getValue() != rhs_typed.constant_value->getValue())
const auto & value_type = constant_value.getType();
const auto & rhs_value_type = rhs_typed.constant_value.getType();
if ((isArray(value_type) || isTuple(value_type) || isMap(value_type) ||
isArray(rhs_value_type) || isTuple(rhs_value_type) || isMap(rhs_value_type))
&& !value_type->equals(*rhs_value_type)
)
return false;
return !compare_options.compare_types || constant_value->getType()->equals(*rhs_typed.constant_value->getType());
const auto & column = constant_value.getColumn();
const auto & rhs_column = rhs_typed.constant_value.getColumn();
if (column->getDataType() != rhs_column->getDataType() || column->compareAt(0, 0, *rhs_column, 1) != 0)
return false;
return !compare_options.compare_types || constant_value.getType()->equals(*rhs_typed.constant_value.getType());
}
void ConstantNode::updateTreeHashImpl(HashState & hash_state, CompareOptions compare_options) const
{
constant_value.getColumn()->updateHashFast(hash_state);
if (compare_options.compare_types)
{
auto type_name = constant_value->getType()->getName();
auto type_name = constant_value.getType()->getName();
hash_state.update(type_name.size());
hash_state.update(type_name);
}
hash_state.update(value_string.size());
hash_state.update(value_string);
}
QueryTreeNodePtr ConstantNode::cloneImpl() const
@ -162,8 +185,8 @@ QueryTreeNodePtr ConstantNode::cloneImpl() const
ASTPtr ConstantNode::toASTImpl(const ConvertToASTOptions & options) const
{
const auto & constant_value_literal = constant_value->getValue();
const auto & constant_value_type = constant_value->getType();
const auto constant_value_literal = getValue();
const auto & constant_value_type = constant_value.getType();
auto constant_value_ast = std::make_shared<ASTLiteral>(constant_value_literal);
if (!options.add_cast_for_constants)

View File

@ -4,7 +4,9 @@
#include <Analyzer/IQueryTreeNode.h>
#include <Analyzer/ConstantValue.h>
#include <Columns/IColumn.h>
#include <DataTypes/DataTypeNullable.h>
#include <Interpreters/convertFieldToType.h>
namespace DB
{
@ -22,10 +24,19 @@ class ConstantNode final : public IQueryTreeNode
{
public:
/// Construct constant query tree node from constant value and source expression
explicit ConstantNode(ConstantValuePtr constant_value_, QueryTreeNodePtr source_expression);
explicit ConstantNode(ConstantValue constant_value_, QueryTreeNodePtr source_expression);
/// Construct constant query tree node from constant value
explicit ConstantNode(ConstantValuePtr constant_value_);
explicit ConstantNode(ConstantValue constant_value_);
/** Construct constant query tree node from column and data type.
*
* Throws exception if value cannot be converted to value data type.
*/
explicit ConstantNode(ColumnPtr constant_column_, DataTypePtr value_data_type_);
/// Construct constant query tree node from column, data type will be derived from field value
explicit ConstantNode(ColumnPtr constant_column_);
/** Construct constant query tree node from field and data type.
*
@ -37,16 +48,21 @@ public:
explicit ConstantNode(Field value_);
/// Get constant value
const Field & getValue() const
const ColumnPtr & getColumn() const
{
return constant_value->getValue();
return constant_value.getColumn();
}
/// Get constant value
Field getValue() const
{
Field out;
constant_value.getColumn()->get(0, out);
return out;
}
/// Get constant value string representation
const String & getValueStringRepresentation() const
{
return value_string;
}
String getValueStringRepresentation() const;
/// Returns true if constant node has source expression, false otherwise
bool hasSourceExpression() const
@ -73,7 +89,7 @@ public:
DataTypePtr getResultType() const override
{
return constant_value->getType();
return constant_value.getType();
}
/// Check if conversion to AST requires wrapping with _CAST function.
@ -101,8 +117,7 @@ protected:
ASTPtr toASTImpl(const ConvertToASTOptions & options) const override;
private:
ConstantValuePtr constant_value;
String value_string;
ConstantValue constant_value;
QueryTreeNodePtr source_expression;
size_t mask_id = 0;

View File

@ -1,28 +1,29 @@
#pragma once
#include <Columns/ColumnConst.h>
#include <Columns/IColumn.h>
#include <Core/Field.h>
#include <DataTypes/IDataType.h>
namespace DB
{
/** Immutable constant value representation during analysis stage.
* Some query nodes can be represented by constant (scalar subqueries, functions with constant arguments).
*/
class ConstantValue;
using ConstantValuePtr = std::shared_ptr<ConstantValue>;
class ConstantValue
{
public:
ConstantValue(Field value_, DataTypePtr data_type_)
: value(std::move(value_))
ConstantValue(ColumnPtr column_, DataTypePtr data_type_)
: column(wrapToColumnConst(column_))
, data_type(std::move(data_type_))
{}
const Field & getValue() const
ConstantValue(const Field & field_, DataTypePtr data_type_)
: column(data_type_->createColumnConst(1, field_))
, data_type(std::move(data_type_))
{}
const ColumnPtr & getColumn() const
{
return value;
return column;
}
const DataTypePtr & getType() const
@ -30,7 +31,15 @@ public:
return data_type;
}
private:
Field value;
static ColumnPtr wrapToColumnConst(ColumnPtr column_)
{
if (!isColumnConst(*column_))
return ColumnConst::create(column_, 1);
return column_;
}
ColumnPtr column;
DataTypePtr data_type;
};

View File

@ -75,7 +75,7 @@ ColumnsWithTypeAndName FunctionNode::getArgumentColumns() const
argument_column.type = argument->getResultType();
if (constant && !isNotCreatable(argument_column.type))
argument_column.column = argument_column.type->createColumnConst(1, constant->getValue());
argument_column.column = constant->getColumn();
argument_columns.push_back(std::move(argument_column));
}

View File

@ -155,8 +155,7 @@ private:
if (function_arguments_nodes_size == 1)
{
auto comparison_argument_constant_value = std::make_shared<ConstantValue>(constant_tuple[0], tuple_data_type_elements[0]);
auto comparison_argument_constant_node = std::make_shared<ConstantNode>(std::move(comparison_argument_constant_value));
auto comparison_argument_constant_node = std::make_shared<ConstantNode>(constant_tuple[0], tuple_data_type_elements[0]);
return makeComparisonFunction(function_arguments_nodes[0], std::move(comparison_argument_constant_node), comparison_function_name);
}
@ -165,8 +164,7 @@ private:
for (size_t i = 0; i < function_arguments_nodes_size; ++i)
{
auto equals_argument_constant_value = std::make_shared<ConstantValue>(constant_tuple[i], tuple_data_type_elements[i]);
auto equals_argument_constant_node = std::make_shared<ConstantNode>(std::move(equals_argument_constant_value));
auto equals_argument_constant_node = std::make_shared<ConstantNode>(constant_tuple[i], tuple_data_type_elements[i]);
auto equals_function = makeEqualsFunction(function_arguments_nodes[i], std::move(equals_argument_constant_node));
tuple_arguments_equals_functions.push_back(std::move(equals_function));
}

View File

@ -518,8 +518,7 @@ private:
if (collapse_to_false)
{
auto false_value = std::make_shared<ConstantValue>(0u, function_node.getResultType());
auto false_node = std::make_shared<ConstantNode>(std::move(false_value));
auto false_node = std::make_shared<ConstantNode>(0u, function_node.getResultType());
node = std::move(false_node);
return;
}

View File

@ -343,11 +343,11 @@ static FunctionNodePtr wrapExpressionNodeInFunctionWithSecondConstantStringArgum
auto function_node = std::make_shared<FunctionNode>(std::move(function_name));
auto constant_node_type = std::make_shared<DataTypeString>();
auto constant_value = std::make_shared<ConstantValue>(std::move(second_argument), std::move(constant_node_type));
auto constant_value = ConstantValue{second_argument, std::move(constant_node_type)};
ColumnsWithTypeAndName argument_columns;
argument_columns.push_back({nullptr, expression->getResultType(), {}});
argument_columns.push_back({constant_value->getType()->createColumnConst(1, constant_value->getValue()), constant_value->getType(), {}});
argument_columns.push_back({constant_value.getColumn(), constant_value.getType(), {}});
auto function = FunctionFactory::instance().tryGet(function_node->getFunctionName(), context);
auto function_base = function->build(argument_columns);
@ -1308,7 +1308,7 @@ QueryTreeNodePtr IdentifierResolver::matchArrayJoinSubcolumns(
if (!second_argument || second_argument->getValue().getType() != Field::Types::String)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected constant string as second argument of getSubcolumn function {}", resolved_function->dumpTree());
const auto & resolved_subcolumn_path = second_argument->getValue().safeGet<String &>();
auto resolved_subcolumn_path = second_argument->getValue().safeGet<String>();
if (!startsWith(resolved_subcolumn_path, array_join_subcolumn_prefix))
return {};
@ -1352,7 +1352,7 @@ QueryTreeNodePtr IdentifierResolver::tryResolveExpressionFromArrayJoinExpression
size_t nested_function_arguments_size = nested_function_arguments.size();
const auto & nested_keys_names_constant_node = nested_function_arguments[0]->as<ConstantNode & >();
const auto & nested_keys_names = nested_keys_names_constant_node.getValue().safeGet<Array &>();
auto nested_keys_names = nested_keys_names_constant_node.getValue().safeGet<Array>();
size_t nested_keys_names_size = nested_keys_names.size();
if (nested_keys_names_size == nested_function_arguments_size - 1)

View File

@ -690,9 +690,6 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
const auto & scalar_column_with_type = scalar_block.safeGetByPosition(0);
const auto & scalar_type = scalar_column_with_type.type;
Field scalar_value;
scalar_column_with_type.column->get(0, scalar_value);
const auto * scalar_type_name = scalar_block.safeGetByPosition(0).type->getFamilyName();
static const std::set<std::string_view> useless_literal_types = {"Array", "Tuple", "AggregateFunction", "Function", "Set", "LowCardinality"};
auto * nearest_query_scope = scope.getNearestQueryScope();
@ -701,10 +698,10 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
if (!context->getSettingsRef()[Setting::enable_scalar_subquery_optimization] || !useless_literal_types.contains(scalar_type_name)
|| !context->hasQueryContext() || !nearest_query_scope)
{
auto constant_value = std::make_shared<ConstantValue>(std::move(scalar_value), scalar_type);
ConstantValue constant_value{ scalar_column_with_type.column, scalar_type };
auto constant_node = std::make_shared<ConstantNode>(constant_value, node);
if (constant_node->getValue().isNull())
if (scalar_column_with_type.column->isNullAt(0))
{
node = buildCastFunction(constant_node, constant_node->getResultType(), context);
node = std::make_shared<ConstantNode>(std::move(constant_value), node);
@ -727,8 +724,7 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
std::string get_scalar_function_name = "__getScalar";
auto scalar_query_hash_constant_value = std::make_shared<ConstantValue>(std::move(scalar_query_hash_string), std::make_shared<DataTypeString>());
auto scalar_query_hash_constant_node = std::make_shared<ConstantNode>(std::move(scalar_query_hash_constant_value));
auto scalar_query_hash_constant_node = std::make_shared<ConstantNode>(std::move(scalar_query_hash_string), std::make_shared<DataTypeString>());
auto get_scalar_function_node = std::make_shared<FunctionNode>(get_scalar_function_name);
get_scalar_function_node->getArguments().getNodes().push_back(std::move(scalar_query_hash_constant_node));
@ -870,8 +866,7 @@ void QueryAnalyzer::convertLimitOffsetExpression(QueryTreeNodePtr & expression_n
"{} numeric constant expression is not representable as UInt64",
expression_description);
auto constant_value = std::make_shared<ConstantValue>(std::move(converted_value), std::make_shared<DataTypeUInt64>());
auto result_constant_node = std::make_shared<ConstantNode>(std::move(constant_value));
auto result_constant_node = std::make_shared<ConstantNode>(std::move(converted_value), std::make_shared<DataTypeUInt64>());
result_constant_node->getSourceExpression() = limit_offset_constant_node->getSourceExpression();
expression_node = std::move(result_constant_node);
@ -3054,7 +3049,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
const auto * constant_node = function_argument->as<ConstantNode>();
if (constant_node)
{
argument_column.column = constant_node->getResultType()->createColumnConst(1, constant_node->getValue());
argument_column.column = constant_node->getColumn();
argument_column.type = constant_node->getResultType();
argument_is_constant = true;
}
@ -3458,7 +3453,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
if (first_argument_constant_node && second_argument_constant_node)
{
const auto & first_argument_constant_type = first_argument_constant_node->getResultType();
const auto & second_argument_constant_literal = second_argument_constant_node->getValue();
const auto second_argument_constant_literal = second_argument_constant_node->getValue();
const auto & second_argument_constant_type = second_argument_constant_node->getResultType();
const auto & settings = scope.context->getSettingsRef();
@ -3485,7 +3480,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
argument_columns[1].type = std::make_shared<DataTypeSet>();
}
std::shared_ptr<ConstantValue> constant_value;
ConstantNodePtr constant_node;
try
{
@ -3541,9 +3536,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
column->byteSize() < 1_MiB)
{
/// Replace function node with result constant node
Field column_constant_value;
column->get(0, column_constant_value);
constant_value = std::make_shared<ConstantValue>(std::move(column_constant_value), result_type);
constant_node = std::make_shared<ConstantNode>(ConstantValue{ std::move(column), std::move(result_type) }, node);
}
}
@ -3555,8 +3548,8 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
throw;
}
if (constant_value)
node = std::make_shared<ConstantNode>(std::move(constant_value), node);
if (constant_node)
node = std::move(constant_node);
return result_projection_names;
}

View File

@ -210,8 +210,7 @@ QueryTreeNodePtr buildCastFunction(const QueryTreeNodePtr & expression,
bool resolve)
{
std::string cast_type = type->getName();
auto cast_type_constant_value = std::make_shared<ConstantValue>(std::move(cast_type), std::make_shared<DataTypeString>());
auto cast_type_constant_node = std::make_shared<ConstantNode>(std::move(cast_type_constant_value));
auto cast_type_constant_node = std::make_shared<ConstantNode>(std::move(cast_type), std::make_shared<DataTypeString>());
std::string cast_function_name = "_CAST";
auto cast_function_node = std::make_shared<FunctionNode>(cast_function_name);
@ -787,8 +786,7 @@ NameSet collectIdentifiersFullNames(const QueryTreeNodePtr & node)
QueryTreeNodePtr createCastFunction(QueryTreeNodePtr node, DataTypePtr result_type, ContextPtr context)
{
auto enum_literal = std::make_shared<ConstantValue>(result_type->getName(), std::make_shared<DataTypeString>());
auto enum_literal_node = std::make_shared<ConstantNode>(std::move(enum_literal));
auto enum_literal_node = std::make_shared<ConstantNode>(result_type->getName(), std::make_shared<DataTypeString>());
auto cast_function = FunctionFactory::instance().get("_CAST", std::move(context));
QueryTreeNodes arguments{ std::move(node), std::move(enum_literal_node) };

View File

@ -319,6 +319,8 @@ public:
variant_column_ptr = assert_cast<ColumnVariant *>(variant_column.get());
}
void forEachSubcolumn(ColumnCallback callback) const override { callback(variant_column); }
void forEachSubcolumnRecursively(RecursiveMutableColumnCallback callback) override
{
callback(*variant_column);

View File

@ -41,7 +41,7 @@ static struct InitFiu
REGULAR(use_delayed_remote_source) \
REGULAR(cluster_discovery_faults) \
REGULAR(replicated_sends_failpoint) \
REGULAR(stripe_log_sink_write_fallpoint)\
REGULAR(stripe_log_sink_write_fallpoint) \
ONCE(smt_commit_merge_mutate_zk_fail_after_op) \
ONCE(smt_commit_merge_mutate_zk_fail_before_op) \
ONCE(smt_commit_write_zk_fail_after_op) \
@ -77,6 +77,8 @@ static struct InitFiu
REGULAR(replicated_merge_tree_all_replicas_stale) \
REGULAR(zero_copy_lock_zk_fail_before_op) \
REGULAR(zero_copy_lock_zk_fail_after_op) \
REGULAR(plain_object_storage_write_fail_on_directory_create) \
REGULAR(plain_object_storage_write_fail_on_directory_move) \
namespace FailPoints

View File

@ -1800,7 +1800,7 @@ Possible values:
- 0 Disabled.
- 1 Enabled.
)", 1) \
)", 0) \
DECLARE(Int64, http_zlib_compression_level, 3, R"(
Sets the level of data compression in the response to an HTTP request if [enable_http_compression = 1](#enable_http_compression).
@ -5782,7 +5782,7 @@ Allow JSON data type
DECLARE(Bool, allow_experimental_codecs, false, R"(
If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).
)", EXPERIMENTAL) \
DECLARE(Bool, allow_experimental_shared_set_join, true, R"(
DECLARE(Bool, allow_experimental_shared_set_join, false, R"(
Only in ClickHouse Cloud. Allow to create ShareSet and SharedJoin
)", EXPERIMENTAL) \
DECLARE(UInt64, max_limit_for_ann_queries, 1'000'000, R"(

View File

@ -64,7 +64,6 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
},
{"24.11",
{
{"enable_http_compression", false, true, "Improvement for read-only clients since they can't change settings"},
{"validate_mutation_query", false, true, "New setting to validate mutation queries by default."},
{"enable_job_stack_trace", false, true, "Enable by default collecting stack traces from job's scheduling."},
{"allow_suspicious_types_in_group_by", true, false, "Don't allow Variant/Dynamic types in GROUP BY by default"},
@ -80,6 +79,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{"backup_restore_finish_timeout_after_error_sec", 0, 180, "New setting."},
{"query_plan_merge_filters", false, true, "Allow to merge filters in the query plan. This is required to properly support filter-push-down with a new analyzer."},
{"parallel_replicas_local_plan", false, true, "Use local plan for local replica in a query with parallel replicas"},
{"allow_experimental_shared_set_join", 1, 0, "Disable a setting for ClickHouse Cloud"},
{"merge_tree_use_v1_object_and_dynamic_serialization", true, false, "Add new serialization V2 version for JSON and Dynamic types"},
{"min_joined_block_size_bytes", 524288, 524288, "New setting."},
{"allow_experimental_bfloat16_type", false, false, "Add new experimental BFloat16 type"},
@ -605,6 +605,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
{
{"24.12",
{
{"enforce_index_structure_match_on_partition_manipulation", true, false, "Add new setting to allow attach when source table's projections and secondary indices is a subset of those in the target table."}
}
},
{"24.11",

View File

@ -57,6 +57,12 @@ struct InMemoryDirectoryPathMap
return it->second;
}
bool removePathIfExists(const std::filesystem::path & path)
{
std::lock_guard lock(mutex);
return map.erase(path) != 0;
}
mutable SharedMutex mutex;
#ifdef OS_LINUX

View File

@ -7,6 +7,7 @@
#include <IO/WriteHelpers.h>
#include <Poco/Timestamp.h>
#include <Common/Exception.h>
#include <Common/FailPoint.h>
#include <Common/SharedLockGuard.h>
#include <Common/logger_useful.h>
@ -18,8 +19,15 @@ namespace ErrorCodes
extern const int FILE_DOESNT_EXIST;
extern const int FILE_ALREADY_EXISTS;
extern const int INCORRECT_DATA;
extern const int FAULT_INJECTED;
};
namespace FailPoints
{
extern const char plain_object_storage_write_fail_on_directory_create[];
extern const char plain_object_storage_write_fail_on_directory_move[];
}
namespace
{
@ -72,8 +80,14 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std:
/* buf_size */ DBMS_DEFAULT_BUFFER_SIZE,
/* settings */ {});
write_created = true;
writeString(path.string(), *buf);
fiu_do_on(FailPoints::plain_object_storage_write_fail_on_directory_create, {
throw Exception(ErrorCodes::FAULT_INJECTED, "Injecting fault when creating '{}' directory", path);
});
buf->finalize();
auto event = object_storage->getMetadataStorageMetrics().directory_created;
ProfileEvents::increment(event);
{
std::lock_guard lock(path_map.mutex);
auto & map = path_map.map;
@ -83,34 +97,20 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::execute(std:
}
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
CurrentMetrics::add(metric, 1);
writeString(path.string(), *buf);
buf->finalize();
write_finalized = true;
auto event = object_storage->getMetadataStorageMetrics().directory_created;
ProfileEvents::increment(event);
}
void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::undo(std::unique_lock<SharedMutex> &)
{
auto metadata_object_key = createMetadataObjectKey(object_key_prefix, metadata_key_prefix);
if (write_finalized)
LOG_TRACE(getLogger("MetadataStorageFromPlainObjectStorageCreateDirectoryOperation"), "Undoing '{}' directory creation", path);
const auto base_path = path.parent_path();
if (path_map.removePathIfExists(base_path))
{
const auto base_path = path.parent_path();
{
std::lock_guard lock(path_map.mutex);
path_map.map.erase(base_path);
}
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
CurrentMetrics::sub(metric, 1);
object_storage->removeObjectIfExists(StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME));
}
else if (write_created)
object_storage->removeObjectIfExists(StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME));
auto metadata_object_key = createMetadataObjectKey(object_key_prefix, metadata_key_prefix);
object_storage->removeObjectIfExists(StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME));
}
MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::MetadataStorageFromPlainObjectStorageMoveDirectoryOperation(
@ -184,8 +184,10 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::execute(std::u
getLogger("MetadataStorageFromPlainObjectStorageMoveDirectoryOperation"), "Moving directory '{}' to '{}'", path_from, path_to);
auto write_buf = createWriteBuf(path_from, path_to, /* validate_content */ true);
write_created = true;
writeString(path_to.string(), *write_buf);
fiu_do_on(FailPoints::plain_object_storage_write_fail_on_directory_move, {
throw Exception(ErrorCodes::FAULT_INJECTED, "Injecting fault when moving from '{}' to '{}'", path_from, path_to);
});
write_buf->finalize();
/// parent_path() removes the trailing '/'.
@ -207,13 +209,12 @@ void MetadataStorageFromPlainObjectStorageMoveDirectoryOperation::undo(std::uniq
{
if (write_finalized)
{
std::lock_guard lock(path_map.mutex);
auto & map = path_map.map;
map.emplace(path_from.parent_path(), map.extract(path_to.parent_path()).mapped());
}
{
std::lock_guard lock(path_map.mutex);
auto & map = path_map.map;
map.emplace(path_from.parent_path(), map.extract(path_to.parent_path()).mapped());
}
if (write_created)
{
auto write_buf = createWriteBuf(path_to, path_from, /* verify_content */ false);
writeString(path_from.string(), *write_buf);
write_buf->finalize();
@ -249,26 +250,31 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std:
auto metadata_object = StoredObject(/*remote_path*/ metadata_object_key.serialize(), /*local_path*/ path / PREFIX_PATH_FILE_NAME);
object_storage->removeObjectIfExists(metadata_object);
if (path_map.removePathIfExists(base_path))
{
std::lock_guard lock(path_map.mutex);
auto & map = path_map.map;
map.erase(base_path);
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
CurrentMetrics::sub(metric, 1);
auto event = object_storage->getMetadataStorageMetrics().directory_removed;
ProfileEvents::increment(event);
}
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
CurrentMetrics::sub(metric, 1);
removed = true;
auto event = object_storage->getMetadataStorageMetrics().directory_removed;
ProfileEvents::increment(event);
remove_attempted = true;
}
void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::undo(std::unique_lock<SharedMutex> &)
{
if (!removed)
if (!remove_attempted)
return;
{
std::lock_guard lock(path_map.mutex);
auto & map = path_map.map;
map.emplace(path.parent_path(), key_prefix);
}
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
CurrentMetrics::add(metric, 1);
auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix);
auto metadata_object = StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME);
auto buf = object_storage->writeObject(
@ -279,14 +285,6 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::undo(std::un
/* settings */ {});
writeString(path.string(), *buf);
buf->finalize();
{
std::lock_guard lock(path_map.mutex);
auto & map = path_map.map;
map.emplace(path.parent_path(), std::move(key_prefix));
}
auto metric = object_storage->getMetadataStorageMetrics().directory_map_size;
CurrentMetrics::add(metric, 1);
}
MetadataStorageFromPlainObjectStorageWriteFileOperation::MetadataStorageFromPlainObjectStorageWriteFileOperation(

View File

@ -19,9 +19,6 @@ private:
const std::string metadata_key_prefix;
const std::string object_key_prefix;
bool write_created = false;
bool write_finalized = false;
public:
MetadataStorageFromPlainObjectStorageCreateDirectoryOperation(
/// path_ must end with a trailing '/'.
@ -43,7 +40,6 @@ private:
ObjectStoragePtr object_storage;
const std::string metadata_key_prefix;
bool write_created = false;
bool write_finalized = false;
std::unique_ptr<WriteBufferFromFileBase>
@ -73,7 +69,7 @@ private:
const std::string metadata_key_prefix;
std::string key_prefix;
bool removed = false;
bool remove_attempted = false;
public:
MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation(

View File

@ -26,14 +26,6 @@ void UserDefinedSQLFunctionVisitor::visit(ASTPtr & ast)
{
chassert(ast);
if (const auto * function = ast->template as<ASTFunction>())
{
std::unordered_set<std::string> udf_in_replace_process;
auto replace_result = tryToReplaceFunction(*function, udf_in_replace_process);
if (replace_result)
ast = replace_result;
}
for (auto & child : ast->children)
{
if (!child)
@ -48,6 +40,14 @@ void UserDefinedSQLFunctionVisitor::visit(ASTPtr & ast)
if (new_ptr != old_ptr)
ast->updatePointerToChild(old_ptr, new_ptr);
}
if (const auto * function = ast->template as<ASTFunction>())
{
std::unordered_set<std::string> udf_in_replace_process;
auto replace_result = tryToReplaceFunction(*function, udf_in_replace_process);
if (replace_result)
ast = replace_result;
}
}
void UserDefinedSQLFunctionVisitor::visit(IAST * ast)

View File

@ -32,9 +32,8 @@ public:
// Format message with fmt::format, like the logging functions.
template <typename... Args>
S3Exception(Aws::S3::S3Errors code_, fmt::format_string<Args...> fmt, Args &&... args)
: Exception(fmt::format(fmt, std::forward<Args>(args)...), ErrorCodes::S3_ERROR)
, code(code_)
S3Exception(Aws::S3::S3Errors code_, FormatStringHelper<Args...> fmt, Args &&... args)
: Exception(PreformattedMessage{fmt.format(std::forward<Args>(args)...)}, ErrorCodes::S3_ERROR), code(code_)
{
}

View File

@ -1451,7 +1451,7 @@ String fourSpaceIndent(size_t indent);
bool inline isWritingToTerminal(const WriteBuffer & buf)
{
const auto * write_buffer_to_descriptor = typeid_cast<const WriteBufferFromFileDescriptor *>(&buf);
const auto * write_buffer_to_descriptor = dynamic_cast<const WriteBufferFromFileDescriptor *>(&buf);
return write_buffer_to_descriptor && write_buffer_to_descriptor->getFD() == STDOUT_FILENO && isatty(STDOUT_FILENO);
}

View File

@ -90,26 +90,26 @@ std::string functionName(const ASTPtr & node)
return node->as<ASTFunction &>().name;
}
const Field * tryGetConstantValue(const QueryTreeNodePtr & node)
std::optional<Field> tryGetConstantValue(const QueryTreeNodePtr & node)
{
if (const auto * constant = node->as<ConstantNode>())
return &constant->getValue();
return constant->getValue();
return nullptr;
return {};
}
const Field * tryGetConstantValue(const ASTPtr & node)
std::optional<Field> tryGetConstantValue(const ASTPtr & node)
{
if (const auto * constant = node->as<ASTLiteral>())
return &constant->value;
return constant->value;
return nullptr;
return {};
}
template <typename Node>
const Field & getConstantValue(const Node & node)
Field getConstantValue(const Node & node)
{
const auto * constant = tryGetConstantValue(node);
const auto constant = tryGetConstantValue(node);
assert(constant);
return *constant;
}
@ -514,7 +514,7 @@ void ComparisonGraph<Node>::EqualComponent::buildConstants()
constant_index.reset();
for (size_t i = 0; i < nodes.size(); ++i)
{
if (tryGetConstantValue(nodes[i]) != nullptr)
if (tryGetConstantValue(nodes[i]))
{
constant_index = i;
return;
@ -562,7 +562,7 @@ std::optional<Node> ComparisonGraph<Node>::getEqualConst(const Node & node) cons
template <ComparisonGraphNodeType Node>
std::optional<std::pair<Field, bool>> ComparisonGraph<Node>::getConstUpperBound(const Node & node) const
{
if (const auto * constant = tryGetConstantValue(node))
if (const auto constant = tryGetConstantValue(node))
return std::make_pair(*constant, false);
const auto it = graph.node_hash_to_component.find(Graph::getHash(node));
@ -580,7 +580,7 @@ std::optional<std::pair<Field, bool>> ComparisonGraph<Node>::getConstUpperBound(
template <ComparisonGraphNodeType Node>
std::optional<std::pair<Field, bool>> ComparisonGraph<Node>::getConstLowerBound(const Node & node) const
{
if (const auto * constant = tryGetConstantValue(node))
if (const auto constant = tryGetConstantValue(node))
return std::make_pair(*constant, false);
const auto it = graph.node_hash_to_component.find(Graph::getHash(node));

View File

@ -70,7 +70,7 @@ struct JoinedElement
join->strictness = JoinStrictness::All;
join->on_expression = on_expression;
join->children.push_back(join->on_expression);
join->children = {join->on_expression};
return true;
}

View File

@ -13,6 +13,7 @@
#include <Core/Settings.h>
#include <numeric>
#include <shared_mutex>
#include <fmt/format.h>
@ -524,6 +525,7 @@ public:
Block nextImpl() override
{
ExtraBlockPtr not_processed = nullptr;
std::shared_lock shared(eof_mutex);
{
std::lock_guard lock(extra_block_mutex);
@ -557,7 +559,24 @@ public:
block = left_reader.read();
if (!block)
{
return {};
shared.unlock();
bool there_are_still_might_be_rows_to_process = false;
{
/// The following race condition could happen without this mutex:
/// * we're called from `IBlocksStream::next()`
/// * another thread just read the last block from `left_reader` and now is in the process of or about to call `joinBlock()`
/// * it might be that `joinBlock()` will leave some rows in the `not_processed`
/// * but if the current thread will return now an empty block `finished` will be set to true in `IBlocksStream::next()` and
/// these not processed rows will be lost
/// So we shouldn't finish execution while there is at least one in-flight `joinBlock()` call. Let's wait until we're alone
/// and double check if there are any not processed rows left.
std::unique_lock exclusive(eof_mutex);
std::lock_guard lock(extra_block_mutex);
if (!not_processed_blocks.empty())
there_are_still_might_be_rows_to_process = true;
}
return there_are_still_might_be_rows_to_process ? nextImpl() : Block();
}
// block comes from left_reader, need to join with right table to get the result.
@ -592,7 +611,7 @@ public:
return block;
}
size_t current_bucket;
const size_t current_bucket;
Buckets buckets;
InMemoryJoinPtr hash_join;
@ -603,6 +622,8 @@ public:
std::mutex extra_block_mutex;
std::list<ExtraBlockPtr> not_processed_blocks TSA_GUARDED_BY(extra_block_mutex);
std::shared_mutex eof_mutex;
};
IBlocksStreamPtr GraceHashJoin::getDelayedBlocks()

View File

@ -818,18 +818,18 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti
{
IndexDescription index_desc = IndexDescription::getIndexFromAST(index->clone(), properties.columns, getContext());
if (properties.indices.has(index_desc.name))
throw Exception(ErrorCodes::ILLEGAL_INDEX, "Duplicated index name {} is not allowed. Please use different index names.", backQuoteIfNeed(index_desc.name));
throw Exception(ErrorCodes::ILLEGAL_INDEX, "Duplicated index name {} is not allowed. Please use a different index name", backQuoteIfNeed(index_desc.name));
const auto & settings = getContext()->getSettingsRef();
if (index_desc.type == FULL_TEXT_INDEX_NAME && !settings[Setting::allow_experimental_full_text_index])
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental full-text index feature is disabled. Turn on setting 'allow_experimental_full_text_index'");
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "The experimental full-text index feature is disabled. Enable the setting 'allow_experimental_full_text_index' to use it");
/// ----
/// Temporary check during a transition period. Please remove at the end of 2024.
if (index_desc.type == INVERTED_INDEX_NAME && !settings[Setting::allow_experimental_inverted_index])
throw Exception(ErrorCodes::ILLEGAL_INDEX, "Please use index type 'full_text' instead of 'inverted'");
throw Exception(ErrorCodes::ILLEGAL_INDEX, "The 'inverted' index type is deprecated. Please use the 'full_text' index type instead");
/// ----
if (index_desc.type == "vector_similarity" && !settings[Setting::allow_experimental_vector_similarity_index])
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental vector similarity index is disabled. Turn on setting 'allow_experimental_vector_similarity_index'");
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "The experimental vector similarity index feature is disabled. Enable the setting 'allow_experimental_vector_similarity_index' to use it");
properties.indices.push_back(index_desc);
}

View File

@ -168,7 +168,7 @@ public:
{
if (isTuple(constant->getResultType()))
{
const auto & tuple = constant->getValue().safeGet<Tuple &>();
const auto tuple = constant->getValue().safeGet<Tuple>();
Tuple new_tuple;
new_tuple.reserve(tuple.size());

View File

@ -130,12 +130,25 @@ void ASTColumnDeclaration::formatImpl(const FormatSettings & format_settings, Fo
void ASTColumnDeclaration::forEachPointerToChild(std::function<void(void **)> f)
{
f(reinterpret_cast<void **>(&default_expression));
f(reinterpret_cast<void **>(&comment));
f(reinterpret_cast<void **>(&codec));
f(reinterpret_cast<void **>(&statistics_desc));
f(reinterpret_cast<void **>(&ttl));
f(reinterpret_cast<void **>(&collation));
f(reinterpret_cast<void **>(&settings));
auto visit_child = [&f](ASTPtr & member)
{
IAST * new_member_ptr = member.get();
f(reinterpret_cast<void **>(&new_member_ptr));
if (new_member_ptr != member.get())
{
if (new_member_ptr)
member = new_member_ptr->ptr();
else
member.reset();
}
};
visit_child(default_expression);
visit_child(comment);
visit_child(codec);
visit_child(statistics_desc);
visit_child(ttl);
visit_child(collation);
visit_child(settings);
}
}

View File

@ -61,6 +61,29 @@ ASTPtr ASTTableJoin::clone() const
return res;
}
void ASTTableJoin::forEachPointerToChild(std::function<void(void **)> f)
{
IAST * new_using_expression_list = using_expression_list.get();
f(reinterpret_cast<void **>(&new_using_expression_list));
if (new_using_expression_list != using_expression_list.get())
{
if (new_using_expression_list)
using_expression_list = new_using_expression_list->ptr();
else
using_expression_list.reset();
}
IAST * new_on_expression = on_expression.get();
f(reinterpret_cast<void **>(&new_on_expression));
if (new_on_expression != on_expression.get())
{
if (new_on_expression)
on_expression = new_on_expression->ptr();
else
on_expression.reset();
}
}
void ASTArrayJoin::updateTreeHashImpl(SipHash & hash_state, bool ignore_aliases) const
{
hash_state.update(kind);

View File

@ -80,6 +80,9 @@ struct ASTTableJoin : public IAST
void formatImplAfterTable(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const;
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
void updateTreeHashImpl(SipHash & hash_state, bool ignore_aliases) const override;
protected:
void forEachPointerToChild(std::function<void(void **)> f) override;
};
/// Specification of ARRAY JOIN.

View File

@ -233,6 +233,7 @@ namespace MergeTreeSetting
extern const MergeTreeSettingsString storage_policy;
extern const MergeTreeSettingsFloat zero_copy_concurrent_part_removal_max_postpone_ratio;
extern const MergeTreeSettingsUInt64 zero_copy_concurrent_part_removal_max_split_times;
extern const MergeTreeSettingsBool enforce_index_structure_match_on_partition_manipulation;
extern const MergeTreeSettingsBool prewarm_mark_cache;
}
@ -7533,10 +7534,11 @@ MergeTreeData & MergeTreeData::checkStructureAndGetMergeTreeData(IStorage & sour
if (query_to_string(my_snapshot->getPrimaryKeyAST()) != query_to_string(src_snapshot->getPrimaryKeyAST()))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Tables have different primary key");
const auto check_definitions = [](const auto & my_descriptions, const auto & src_descriptions)
const auto check_definitions = [this](const auto & my_descriptions, const auto & src_descriptions)
{
if (my_descriptions.size() != src_descriptions.size())
bool strict_match = (*getSettings())[MergeTreeSetting::enforce_index_structure_match_on_partition_manipulation];
if ((my_descriptions.size() < src_descriptions.size()) ||
(strict_match && my_descriptions.size() != src_descriptions.size()))
return false;
std::unordered_set<std::string> my_query_strings;

View File

@ -100,6 +100,7 @@ namespace ErrorCodes
DECLARE(String, merge_workload, "", "Name of workload to be used to access resources for merges", 0) \
DECLARE(String, mutation_workload, "", "Name of workload to be used to access resources for mutations", 0) \
DECLARE(Milliseconds, background_task_preferred_step_execution_time_ms, 50, "Target time to execution of one step of merge or mutation. Can be exceeded if one step takes longer time", 0) \
DECLARE(Bool, enforce_index_structure_match_on_partition_manipulation, false, "If this setting is enabled for destination table of a partition manipulation query (`ATTACH/MOVE/REPLACE PARTITION`), the indices and projections must be identical between the source and destination tables. Otherwise, the destination table can have a superset of the source table's indices and projections.", 0) \
DECLARE(MergeSelectorAlgorithm, merge_selector_algorithm, MergeSelectorAlgorithm::SIMPLE, "The algorithm to select parts for merges assignment", EXPERIMENTAL) \
DECLARE(Bool, merge_selector_enable_heuristic_to_remove_small_parts_at_right, true, "Enable heuristic for selecting parts for merge which removes parts from right side of range, if their size is less than specified ratio (0.01) of sum_size. Works for Simple and StochasticSimple merge selectors", 0) \
DECLARE(Float, merge_selector_base, 5.0, "Affects write amplification of assigned merges (expert level setting, don't change if you don't understand what it is doing). Works for Simple and StochasticSimple merge selectors", 0) \

View File

@ -299,8 +299,9 @@ TEST(TransformQueryForExternalDatabase, Issue7245)
const State & state = State::instance();
check(state, 1, {"apply_id", "apply_type", "apply_status", "create_time"},
"SELECT apply_id FROM test.table WHERE apply_type = 2 AND create_time > addDays(toDateTime('2019-01-01 01:02:03'),-7) AND apply_status IN (3,4)",
R"(SELECT "apply_id", "apply_type", "apply_status", "create_time" FROM "test"."table" WHERE ("apply_type" = 2) AND ("create_time" > '2018-12-25 01:02:03') AND ("apply_status" IN (3, 4)))");
"SELECT apply_id FROM test.table WHERE apply_type = 2 AND create_time > addDays(toDateTime('2019-01-01 01:02:03', 'UTC'),-7) AND apply_status IN (3,4)",
R"(SELECT "apply_id", "apply_type", "apply_status", "create_time" FROM "test"."table" WHERE ("apply_type" = 2) AND ("create_time" > '2018-12-25 01:02:03') AND ("apply_status" IN (3, 4)))",
R"(SELECT "apply_id", "apply_type", "apply_status", "create_time" FROM "test"."table" WHERE ("apply_type" = 2) AND ("create_time" > 1545699723) AND ("apply_status" IN (3, 4)))");
}
TEST(TransformQueryForExternalDatabase, Aliases)
@ -393,8 +394,9 @@ TEST(TransformQueryForExternalDatabase, ToDate)
const State & state = State::instance();
check(state, 1, {"a", "b", "foo"},
"SELECT foo FROM table WHERE a=10 AND b=toDate('2019-10-05')",
R"(SELECT "a", "b", "foo" FROM "test"."table" WHERE ("a" = 10) AND ("b" = '2019-10-05'))");
"SELECT foo FROM table WHERE a=10 AND b=toDate('2019-10-05', 'UTC')",
R"(SELECT "a", "b", "foo" FROM "test"."table" WHERE ("a" = 10) AND ("b" = '2019-10-05'))",
R"(SELECT "a", "b", "foo" FROM "test"."table" WHERE ("a" = 10) AND ("b" = 18174))");
}
TEST(TransformQueryForExternalDatabase, Analyzer)
@ -419,7 +421,8 @@ TEST(TransformQueryForExternalDatabase, Analyzer)
check(state, 1, {"is_value"},
"SELECT is_value FROM table WHERE is_value = true",
R"(SELECT "is_value" FROM "test"."table" WHERE "is_value" = true)");
R"(SELECT "is_value" FROM "test"."table" WHERE "is_value" = true)",
R"(SELECT "is_value" FROM "test"."table" WHERE "is_value" = 1)");
check(state, 1, {"is_value"},
"SELECT is_value FROM table WHERE is_value = 1",

View File

@ -49,7 +49,7 @@ public:
WriteBufferFromOwnString out;
result_type->getDefaultSerialization()->serializeText(inner_column, 0, out, FormatSettings());
node = std::make_shared<ConstantNode>(std::make_shared<ConstantValue>(out.str(), result_type));
node = std::make_shared<ConstantNode>(out.str(), std::move(result_type));
}
}
}

View File

@ -56,7 +56,9 @@ LABEL_CATEGORIES = {
"Bug Fix (user-visible misbehaviour in official stable or prestable release)",
"Bug Fix (user-visible misbehavior in official stable or prestable release)",
],
"pr-critical-bugfix": ["Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)"],
"pr-critical-bugfix": [
"Critical Bug Fix (crash, data loss, RBAC) or LOGICAL_ERROR"
],
"pr-build": [
"Build/Testing/Packaging Improvement",
"Build Improvement",

View File

@ -0,0 +1,5 @@
<clickhouse>
<merge_tree>
<enforce_index_structure_match_on_partition_manipulation>true</enforce_index_structure_match_on_partition_manipulation>
</merge_tree>
</clickhouse>

View File

@ -0,0 +1,5 @@
<clickhouse>
<merge_tree>
<enforce_index_structure_match_on_partition_manipulation>false</enforce_index_structure_match_on_partition_manipulation>
</merge_tree>
</clickhouse>

View File

@ -0,0 +1,473 @@
import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
"node1", main_configs=["configs/config_with_check_table_structure_completely.xml"]
)
# node1 = cluster.add_instance("node1")
node2 = cluster.add_instance(
"node2",
main_configs=["configs/config_without_check_table_structure_completely.xml"],
)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
# def test_setting_check_table_structure_completely(start_cluster):
# assert node1.query("""select value from system.merge_tree_settings where name='enforce_index_structure_match_on_partition_manipulation';""") == "0\n"
def test_check_completely_attach_with_different_indices(start_cluster):
node1.query(
"""
CREATE TABLE attach_partition_t1
(
`a` UInt32,
`b` String,
`c` String,
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY a
"""
)
node1.query(
"INSERT INTO attach_partition_t1 SELECT number, toString(number), toString(number) FROM numbers(10);"
)
node1.query(
"""
CREATE TABLE attach_partition_t2
(
`a` UInt32,
`b` String,
`c` String,
INDEX bf b TYPE bloom_filter GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY a
"""
)
# serverError 36
with pytest.raises(QueryRuntimeException) as exc:
node1.query(
"ALTER TABLE attach_partition_t2 ATTACH PARTITION tuple() FROM attach_partition_t1;"
)
assert "Tables have different secondary indices" in str(exc.value)
node1.query(
"""
CREATE TABLE attach_partition_t3
(
`a` UInt32,
`b` String,
`c` String,
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1,
INDEX cf c TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY a
"""
)
# serverError 36
with pytest.raises(QueryRuntimeException) as exc:
node1.query(
"ALTER TABLE attach_partition_t3 ATTACH PARTITION tuple() FROM attach_partition_t1;"
)
assert "Tables have different secondary indices" in str(exc.value)
node1.query("DROP TABLE attach_partition_t1")
node1.query("DROP TABLE attach_partition_t2")
node1.query("DROP TABLE attach_partition_t3")
def test_check_attach_with_different_indices(start_cluster):
node2.query(
"""
CREATE TABLE attach_partition_t1
(
`a` UInt32,
`b` String,
`c` String,
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY a
"""
)
node2.query(
"INSERT INTO attach_partition_t1 SELECT number, toString(number), toString(number) FROM numbers(10);"
)
node2.query(
"""
CREATE TABLE attach_partition_t2
(
`a` UInt32,
`b` String,
`c` String,
INDEX bf b TYPE bloom_filter GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY a
"""
)
# serverError 36
with pytest.raises(QueryRuntimeException) as exc:
node2.query(
"ALTER TABLE attach_partition_t2 ATTACH PARTITION tuple() FROM attach_partition_t1;"
)
assert "Tables have different secondary indices" in str(exc.value)
node2.query(
"""
CREATE TABLE attach_partition_t3
(
`a` UInt32,
`b` String,
`c` String,
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1,
INDEX cf c TYPE bloom_filter GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY a
"""
)
node2.query(
"ALTER TABLE attach_partition_t3 ATTACH PARTITION tuple() FROM attach_partition_t1;"
)
assert node2.query("SELECT COUNT() FROM attach_partition_t3") == "10\n"
assert node2.query("SELECT `a` FROM attach_partition_t3 WHERE `b` = '1'") == "1\n"
assert node2.query("SELECT `a` FROM attach_partition_t3 WHERE `c` = '1'") == "1\n"
node2.query("DROP TABLE attach_partition_t1")
node2.query("DROP TABLE attach_partition_t2")
node2.query("DROP TABLE attach_partition_t3")
def test_check_completely_attach_with_different_projections(start_cluster):
node1.query(
"""
CREATE TABLE attach_partition_t1
(
`a` UInt32,
`b` String,
PROJECTION proj1 (
SELECT
b,
sum(a)
GROUP BY b
)
)
ENGINE = MergeTree
ORDER BY a
"""
)
node1.query(
"INSERT INTO attach_partition_t1 SELECT number, toString(number) FROM numbers(10);"
)
node1.query(
"""
CREATE TABLE attach_partition_t2
(
`a` UInt32,
`b` String,
PROJECTION differently_named_proj (
SELECT
b,
sum(a)
GROUP BY b
)
)
ENGINE = MergeTree
ORDER BY a;
"""
)
# serverError 36
with pytest.raises(QueryRuntimeException) as exc:
node1.query(
"ALTER TABLE attach_partition_t2 ATTACH PARTITION tuple() FROM attach_partition_t1;"
)
assert "Tables have different projections" in str(exc.value)
node1.query(
"""
CREATE TABLE attach_partition_t3
(
`a` UInt32,
`b` String,
PROJECTION proj1 (
SELECT
b,
sum(a)
GROUP BY b
),
PROJECTION proj2 (
SELECT
b,
avg(a)
GROUP BY b
)
)
ENGINE = MergeTree
ORDER BY a
"""
)
# serverError 36
with pytest.raises(QueryRuntimeException) as exc:
node1.query(
"ALTER TABLE attach_partition_t3 ATTACH PARTITION tuple() FROM attach_partition_t1;"
)
assert "Tables have different projections" in str(exc.value)
node1.query("DROP TABLE attach_partition_t1")
node1.query("DROP TABLE attach_partition_t2")
node1.query("DROP TABLE attach_partition_t3")
def test_check_attach_with_different_projections(start_cluster):
node2.query(
"""
CREATE TABLE attach_partition_t1
(
`a` UInt32,
`b` String,
PROJECTION proj1 (
SELECT
b,
sum(a)
GROUP BY b
)
)
ENGINE = MergeTree
ORDER BY a
"""
)
node2.query(
"INSERT INTO attach_partition_t1 SELECT number, toString(number) FROM numbers(10);"
)
node2.query(
"""
CREATE TABLE attach_partition_t2
(
`a` UInt32,
`b` String,
PROJECTION differently_named_proj (
SELECT
b,
sum(a)
GROUP BY b
)
)
ENGINE = MergeTree
ORDER BY a;
"""
)
# serverError 36
with pytest.raises(QueryRuntimeException) as exc:
node2.query(
"ALTER TABLE attach_partition_t2 ATTACH PARTITION tuple() FROM attach_partition_t1;"
)
assert "Tables have different projections" in str(exc.value)
node2.query(
"""
CREATE TABLE attach_partition_t3
(
`a` UInt32,
`b` String,
PROJECTION proj1 (
SELECT
b,
sum(a)
GROUP BY b
),
PROJECTION proj2 (
SELECT
b,
avg(a)
GROUP BY b
)
)
ENGINE = MergeTree
ORDER BY a
"""
)
node2.query(
"ALTER TABLE attach_partition_t3 ATTACH PARTITION tuple() FROM attach_partition_t1;"
)
assert node2.query("SELECT COUNT() FROM attach_partition_t3") == "10\n"
node2.query("DROP TABLE attach_partition_t1")
node2.query("DROP TABLE attach_partition_t2")
node2.query("DROP TABLE attach_partition_t3")
def test_check_completely_attach_with_different_indices_and_projections(start_cluster):
node1.query(
"""
CREATE TABLE attach_partition_t1
(
`a` UInt32,
`b` String,
`c` String,
PROJECTION proj1 (
SELECT
b,
sum(a)
GROUP BY b
),
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY a
"""
)
node1.query(
"INSERT INTO attach_partition_t1 SELECT number, toString(number), toString(number) FROM numbers(10);"
)
node1.query(
"""
CREATE TABLE attach_partition_t2
(
`a` UInt32,
`b` String,
`c` String,
PROJECTION proj (
SELECT
b,
sum(a)
GROUP BY b
),
INDEX bf b TYPE bloom_filter GRANULARITY 1,
INDEX cf c TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY a
"""
)
# serverError 36
with pytest.raises(QueryRuntimeException) as exc:
node1.query(
"ALTER TABLE attach_partition_t2 ATTACH PARTITION tuple() FROM attach_partition_t1;"
)
assert "Tables have different secondary indices" in str(exc.value)
node1.query(
"""
CREATE TABLE attach_partition_t3
(
`a` UInt32,
`b` String,
`c` String,
PROJECTION proj1 (
SELECT
b,
sum(a)
GROUP BY b
),
PROJECTION proj2 (
SELECT
b,
avg(a)
GROUP BY b
),
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1,
INDEX cf c TYPE bloom_filter GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY a
"""
)
# serverError 36
with pytest.raises(QueryRuntimeException) as exc:
node1.query(
"ALTER TABLE attach_partition_t3 ATTACH PARTITION tuple() FROM attach_partition_t1;"
)
assert "Tables have different secondary indices" in str(exc.value)
node1.query("DROP TABLE attach_partition_t1")
node1.query("DROP TABLE attach_partition_t2")
node1.query("DROP TABLE attach_partition_t3")
def test_check_attach_with_different_indices_and_projections(start_cluster):
node2.query(
"""
CREATE TABLE attach_partition_t1
(
`a` UInt32,
`b` String,
`c` String,
PROJECTION proj1 (
SELECT
b,
sum(a)
GROUP BY b
),
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY a
"""
)
node2.query(
"INSERT INTO attach_partition_t1 SELECT number, toString(number), toString(number) FROM numbers(10);"
)
node2.query(
"""
CREATE TABLE attach_partition_t2
(
`a` UInt32,
`b` String,
`c` String,
PROJECTION proj (
SELECT
b,
sum(a)
GROUP BY b
),
INDEX bf b TYPE bloom_filter GRANULARITY 1,
INDEX cf c TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY a
"""
)
# serverError 36
with pytest.raises(QueryRuntimeException) as exc:
node2.query(
"ALTER TABLE attach_partition_t2 ATTACH PARTITION tuple() FROM attach_partition_t1;"
)
assert "Tables have different secondary indices" in str(exc.value)
node2.query(
"""
CREATE TABLE attach_partition_t3
(
`a` UInt32,
`b` String,
`c` String,
PROJECTION proj1 (
SELECT
b,
sum(a)
GROUP BY b
),
PROJECTION proj2 (
SELECT
b,
avg(a)
GROUP BY b
),
INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1,
INDEX cf c TYPE bloom_filter GRANULARITY 1
)
ENGINE = MergeTree
ORDER BY a
"""
)
node2.query(
"ALTER TABLE attach_partition_t3 ATTACH PARTITION tuple() FROM attach_partition_t1;"
)
assert node2.query("SELECT COUNT() FROM attach_partition_t3") == "10\n"
assert node2.query("SELECT `a` FROM attach_partition_t3 WHERE `b` = '1'") == "1\n"
assert node2.query("SELECT `a` FROM attach_partition_t3 WHERE `c` = '1'") == "1\n"
node2.query("DROP TABLE attach_partition_t1")
node2.query("DROP TABLE attach_partition_t2")
node2.query("DROP TABLE attach_partition_t3")

View File

@ -13,3 +13,18 @@
Prewhere info
Prewhere filter
Prewhere filter column: less(multiply(2, b), 100)
Filter column: and(indexHint(greater(plus(i, 40), 0)), equals(a, 0)) (removed)
Prewhere info
Prewhere filter
Prewhere filter column: equals(a, 0)
Prewhere info
Prewhere filter
Prewhere filter column: less(a, 0) (removed)
Filter column: and(indexHint(greater(plus(i, 40), 0)), greaterOrEquals(a, 0)) (removed)
Prewhere info
Prewhere filter
Prewhere filter column: greaterOrEquals(a, 0)
Filter column: and(less(multiply(2, b), 100), indexHint(less(i, 100))) (removed)
Prewhere info
Prewhere filter
Prewhere filter column: less(multiply(2, b), 100)

View File

@ -17,10 +17,15 @@ SET optimize_move_to_prewhere = 1;
SET optimize_substitute_columns = 1;
SET optimize_append_index = 1;
SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a = 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a < 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a >= 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE 2 * b < 100) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%';
SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a = 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS allow_experimental_analyzer=0;
SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a < 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS allow_experimental_analyzer=0;
SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a >= 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS allow_experimental_analyzer=0;
SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE 2 * b < 100) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS allow_experimental_analyzer=0;
SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a = 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS allow_experimental_analyzer=1;
SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a < 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS allow_experimental_analyzer=1;
SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a >= 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS allow_experimental_analyzer=1;
SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE 2 * b < 100) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS allow_experimental_analyzer=1;
DROP TABLE index_append_test_test;
DROP DATABASE db_memory_01625;

View File

@ -32,7 +32,7 @@ QUERY id: 0
FUNCTION id: 5, function_name: and, function_type: ordinary, result_type: Bool
ARGUMENTS
LIST id: 6, nodes: 2
CONSTANT id: 7, constant_value: Bool_1, constant_value_type: Bool
CONSTANT id: 7, constant_value: UInt64_1, constant_value_type: Bool
FUNCTION id: 8, function_name: notIn, function_type: ordinary, result_type: UInt8
ARGUMENTS
LIST id: 9, nodes: 2

View File

@ -0,0 +1,12 @@
1 2
2 2
3 1
4 7
5 10
6 12
1 2
2 2
3 1
4 7
5 10
6 12

View File

@ -0,0 +1,62 @@
#!/usr/bin/env bash
# Tags: no-fasttest, no-shared-merge-tree, no-parallel
# Tag no-fasttest: requires S3
# Tag no-shared-merge-tree: does not support replication
# Tag no-parallel: uses failpoints
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
on_exit() {
${CLICKHOUSE_CLIENT} -m --query "
SYSTEM DISABLE FAILPOINT plain_object_storage_write_fail_on_directory_create;
SYSTEM DISABLE FAILPOINT plain_object_storage_write_fail_on_directory_move;
"
}
trap on_exit EXIT
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test_s3_mt_fault"
${CLICKHOUSE_CLIENT} --query "
CREATE TABLE test_s3_mt_fault (a Int32, b Int64) engine = MergeTree() ORDER BY tuple(a, b)
SETTINGS disk = disk(
name = 03008_s3_plain_rewritable_fault,
type = s3_plain_rewritable,
endpoint = 'http://localhost:11111/test/03008_test_s3_mt_fault/',
access_key_id = clickhouse,
secret_access_key = clickhouse);
"
${CLICKHOUSE_CLIENT} --query "
INSERT INTO test_s3_mt_fault (*) VALUES (1, 2), (2, 2), (3, 1), (4, 7), (5, 10), (6, 12);
OPTIMIZE TABLE test_s3_mt_fault FINAL;
"
${CLICKHOUSE_CLIENT} --query "
SYSTEM ENABLE FAILPOINT plain_object_storage_write_fail_on_directory_create
"
${CLICKHOUSE_CLIENT} --query "
INSERT INTO test_s3_mt_fault (*) select number, number from numbers_mt(100)" 2>&1 | grep -Fq "FAULT_INJECTED"
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_s3_mt_fault;"
${CLICKHOUSE_CLIENT} --query "
SYSTEM DISABLE FAILPOINT plain_object_storage_write_fail_on_directory_create;
SYSTEM ENABLE FAILPOINT plain_object_storage_write_fail_on_directory_move;
"
${CLICKHOUSE_CLIENT} --query "
INSERT INTO test_s3_mt_fault (*) select number, number from numbers_mt(100);
" 2>&1 | grep -Fq "FAULT_INJECTED"
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_s3_mt_fault;"
${CLICKHOUSE_CLIENT} --query "
SYSTEM DISABLE FAILPOINT plain_object_storage_write_fail_on_directory_move;
"
# Filter out 'Removing temporary directory' because the fault injection prevents directory rename.
${CLICKHOUSE_CLIENT} --query "DROP TABLE test_s3_mt_fault SYNC" 2>&1 | grep -v 'Removing temporary directory' ||:

View File

@ -1,4 +1,4 @@
-- Tags: no-parallel
-- Tags: no-fasttest, no-parallel
-- Tag no-parallel -- due to failpoints
create table data_r1 (key Int, value String) engine=ReplicatedMergeTree('/tables/{database}/data', '{table}') order by tuple();

View File

@ -0,0 +1,7 @@
SET join_algorithm = 'parallel_hash';
SET allow_experimental_dynamic_type = 1;
DROP TABLE IF EXISTS t0;
CREATE TABLE t0 (c0 Tuple(c1 Int,c2 Dynamic)) ENGINE = Memory();
SELECT 1 FROM t0 tx JOIN t0 ty ON tx.c0 = ty.c0;
DROP TABLE t0;

View File

@ -0,0 +1,15 @@
DROP TABLE IF EXISTS t0;
DROP TABLE IF EXISTS t1;
CREATE TABLE t0 (x UInt64) ENGINE = MergeTree ORDER BY x;
INSERT INTO t0 SELECT number from numbers(20);
CREATE TABLE t1 (x UInt64) ENGINE = MergeTree ORDER BY x;
INSERT INTO t1 SELECT number from numbers(5, 20);
SET max_joined_block_size_rows = 1;
SET grace_hash_join_initial_buckets = 2;
SET join_algorithm = 'grace_hash';
SELECT sum(x), count() FROM t0 JOIN t1 USING x;

View File

@ -0,0 +1,36 @@
#!/usr/bin/expect -f
set basedir [file dirname $argv0]
set basename [file tail $argv0]
if {[info exists env(CLICKHOUSE_TMP)]} {
set CLICKHOUSE_TMP $env(CLICKHOUSE_TMP)
} else {
set CLICKHOUSE_TMP "."
}
exp_internal -f $CLICKHOUSE_TMP/$basename.debuglog 0
set history_file $CLICKHOUSE_TMP/$basename.history
log_user 0
set timeout 60
match_max 100000
expect_after {
# Do not ignore eof from expect
-i $any_spawn_id eof { exp_continue }
# A default timeout action is to do nothing, change it to fail
-i $any_spawn_id timeout { exit 1 }
}
# useful debugging configuration
# exp_internal 1
spawn bash -c "source $basedir/../shell_config.sh ; \$CLICKHOUSE_CLIENT_BINARY \$CLICKHOUSE_CLIENT_OPT --disable_suggestion --enable-progress-table-toggle=0 --highlight 0 --history_file=$history_file"
expect ":) "
# Make a query
send -- "SELECT 1 as Hello\r"
expect -re "\\\[1mHello.*\\\[90m1\\\."
expect ":) "
send -- "exit\r"
expect eof

View File

@ -0,0 +1,7 @@
SELECT 1
FROM
(
SELECT 1 AS c0
) AS v0
ALL INNER JOIN v0 AS vx ON c0 = vx.c0
1

View File

@ -0,0 +1,21 @@
#!/usr/bin/env bash
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -q "
CREATE VIEW v0 AS SELECT 1 AS c0;
CREATE FUNCTION ${CLICKHOUSE_DATABASE}_second AS (x, y) -> y;
CREATE FUNCTION ${CLICKHOUSE_DATABASE}_equals AS (x, y) -> x = y;
SET optimize_rewrite_array_exists_to_has = 1;
EXPLAIN SYNTAX SELECT 1 FROM v0 JOIN v0 vx ON ${CLICKHOUSE_DATABASE}_second(v0.c0, vx.c0); -- { serverError INVALID_JOIN_ON_EXPRESSION }
EXPLAIN SYNTAX SELECT 1 FROM v0 JOIN v0 vx ON ${CLICKHOUSE_DATABASE}_equals(v0.c0, vx.c0);
SELECT 1 FROM v0 JOIN v0 vx ON ${CLICKHOUSE_DATABASE}_equals(v0.c0, vx.c0);
DROP view v0;
DROP FUNCTION ${CLICKHOUSE_DATABASE}_second;
DROP FUNCTION ${CLICKHOUSE_DATABASE}_equals;
"

View File

@ -1,4 +1,4 @@
personal_ws-1.1 en 2984
personal_ws-1.1 en 2985
AArch
ACLs
ALTERs
@ -2825,6 +2825,7 @@ summapwithoverflow
summingmergetree
sumwithoverflow
superaggregates
superset
supertype
supremum
symlink