From becbef9e489b477b2a3fdd0de6ab754941d14351 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 1 Apr 2024 02:08:35 +0000 Subject: [PATCH 001/171] sketch of read-in-order optimization --- .../IMergingAlgorithmWithDelayedChunk.cpp | 2 +- .../IMergingAlgorithmWithSharedChunks.cpp | 2 +- .../Algorithms/MergeTreePartLevelInfo.h | 25 ------------- .../Merges/Algorithms/MergeTreeReadInfo.h | 35 +++++++++++++++++++ .../Algorithms/MergingSortedAlgorithm.cpp | 10 ++++++ .../QueryPlan/ReadFromMergeTree.cpp | 12 +++++-- src/Processors/QueryPlan/ReadFromMergeTree.h | 2 +- .../MergeTree/MergeTreeRangeReader.cpp | 17 +++++++-- src/Storages/MergeTree/MergeTreeRangeReader.h | 2 +- src/Storages/MergeTree/MergeTreeReadTask.cpp | 8 ++++- src/Storages/MergeTree/MergeTreeReadTask.h | 6 ++++ .../MergeTree/MergeTreeSelectProcessor.cpp | 13 +++++-- .../MergeTree/MergeTreeSelectProcessor.h | 6 ++++ .../MergeTree/MergeTreeSequentialSource.cpp | 5 +-- 14 files changed, 106 insertions(+), 39 deletions(-) delete mode 100644 src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h create mode 100644 src/Processors/Merges/Algorithms/MergeTreeReadInfo.h diff --git a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp index cbad6813fbc..13b245717b3 100644 --- a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp +++ b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp @@ -1,5 +1,5 @@ #include -#include +#include namespace DB diff --git a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp index c8b69382e89..4fe50feaede 100644 --- a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp +++ b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp @@ -1,5 +1,5 @@ #include -#include +#include namespace DB { diff --git a/src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h b/src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h deleted file mode 100644 index bcf4e759024..00000000000 --- a/src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h +++ /dev/null @@ -1,25 +0,0 @@ -#pragma once - -#include - -namespace DB -{ - -/// To carry part level if chunk is produced by a merge tree source -class MergeTreePartLevelInfo : public ChunkInfo -{ -public: - MergeTreePartLevelInfo() = delete; - explicit MergeTreePartLevelInfo(ssize_t part_level) : origin_merge_tree_part_level(part_level) { } - size_t origin_merge_tree_part_level = 0; -}; - -inline size_t getPartLevelFromChunk(const Chunk & chunk) -{ - const auto & info = chunk.getChunkInfo(); - if (const auto * part_level_info = typeid_cast(info.get())) - return part_level_info->origin_merge_tree_part_level; - return 0; -} - -} diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h new file mode 100644 index 00000000000..e79df0fb8c8 --- /dev/null +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -0,0 +1,35 @@ +#pragma once + +#include + +namespace DB +{ + +/// To carry part level and virtual row if chunk is produced by a merge tree source +class MergeTreeReadInfo : public ChunkInfo +{ +public: + MergeTreeReadInfo() = delete; + explicit MergeTreeReadInfo(size_t part_level, bool virtual_row_) : + origin_merge_tree_part_level(part_level), virtual_row(virtual_row_) { } + size_t origin_merge_tree_part_level = 0; + bool virtual_row = false; +}; + +inline size_t getPartLevelFromChunk(const Chunk & chunk) +{ + const auto & info = chunk.getChunkInfo(); + if (const auto * read_info = typeid_cast(info.get())) + return read_info->origin_merge_tree_part_level; + return 0; +} + +inline bool getVirtualRowFromChunk(const Chunk & chunk) +{ + const auto & info = chunk.getChunkInfo(); + if (const auto * read_info = typeid_cast(info.get())) + return read_info->virtual_row; + return 0; +} + +} diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 1debfcec8e0..89f0193b05b 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -239,6 +240,15 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeBatchImpl(TSortingQueue & auto [current_ptr, initial_batch_size] = queue.current(); auto current = *current_ptr; + if (getVirtualRowFromChunk(current_inputs[current.impl->order].chunk)) + { + /// If virtual row is detected, there should be only one row as a single chunk, + /// and always skip this chunk to pull the next one. + assert(initial_batch_size == 1); + queue.removeTop(); + return Status(current.impl->order); + } + bool batch_skip_last_row = false; if (current.impl->isLast(initial_batch_size) && current_inputs[current.impl->order].skip_last_row) { diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index f4607cad040..91cd362f1d9 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -501,7 +501,8 @@ Pipe ReadFromMergeTree::readInOrder( Names required_columns, PoolSettings pool_settings, ReadType read_type, - UInt64 limit) + UInt64 limit, + bool need_virtual_row) { /// For reading in order it makes sense to read only /// one range per task to reduce number of read rows. @@ -596,6 +597,8 @@ Pipe ReadFromMergeTree::readInOrder( processor->addPartLevelToChunk(isQueryWithFinal()); + processor->addVirtualRowToChunk(need_virtual_row); + auto source = std::make_shared(std::move(processor)); if (set_rows_approx) source->addTotalRowsApprox(total_rows); @@ -1028,7 +1031,12 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( } for (auto && item : splitted_parts_and_ranges) - pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit)); + { + /// need_virtual_row = true means a MergingSortedTransform should occur. + /// If so, adding a virtual row might speedup in the case of multiple parts. + bool need_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; + pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, need_virtual_row)); + } } Block pipe_header; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 5ed742a9bfd..6a08622af11 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -251,7 +251,7 @@ private: Pipe read(RangesInDataParts parts_with_range, Names required_columns, ReadType read_type, size_t max_streams, size_t min_marks_for_concurrent_read, bool use_uncompressed_cache); Pipe readFromPool(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); Pipe readFromPoolParallelReplicas(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); - Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit); + Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit, bool need_virtual_row = false); Pipe spreadMarkRanges(RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, ActionsDAGPtr & result_projection); diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/src/Storages/MergeTree/MergeTreeRangeReader.cpp index 6932762f58b..0456a8e2787 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.cpp +++ b/src/Storages/MergeTree/MergeTreeRangeReader.cpp @@ -946,7 +946,7 @@ String addDummyColumnWithRowCount(Block & block, size_t num_rows) } -MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, MarkRanges & ranges) +MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, MarkRanges & ranges, bool add_virtual_row) { if (max_rows == 0) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected at least 1 row to read, got 0."); @@ -961,7 +961,7 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, Mar if (prev_reader) { - read_result = prev_reader->read(max_rows, ranges); + read_result = prev_reader->read(max_rows, ranges, add_virtual_row); size_t num_read_rows; Columns columns = continueReadingChain(read_result, num_read_rows); @@ -1026,8 +1026,15 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, Mar } else { + // if (add_virtual_row) + // { + // generate the virtual row + // } + // else + // { read_result = startReadingChain(max_rows, ranges); read_result.num_rows = read_result.numReadRows(); + // } LOG_TEST(log, "First reader returned: {}, requested columns: {}", read_result.dumpInfo(), dumpNames(merge_tree_reader->getColumns())); @@ -1062,7 +1069,11 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, Mar read_result.addNumBytesRead(total_bytes); } - executePrewhereActionsAndFilterColumns(read_result); + /// If add_virtual_row is enabled, don't turn on prewhere so that virtual row can always pass through. + // if (!add_virtual_row) + // { + executePrewhereActionsAndFilterColumns(read_result); + // } read_result.checkInternalConsistency(); diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.h b/src/Storages/MergeTree/MergeTreeRangeReader.h index 688a6b0922b..d8cf33b0340 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.h +++ b/src/Storages/MergeTree/MergeTreeRangeReader.h @@ -300,7 +300,7 @@ public: LoggerPtr log; }; - ReadResult read(size_t max_rows, MarkRanges & ranges); + ReadResult read(size_t max_rows, MarkRanges & ranges, bool add_virtual_row); const Block & getSampleBlock() const { return result_sample_block; } diff --git a/src/Storages/MergeTree/MergeTreeReadTask.cpp b/src/Storages/MergeTree/MergeTreeReadTask.cpp index 08b30e445e2..498c62080a9 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.cpp +++ b/src/Storages/MergeTree/MergeTreeReadTask.cpp @@ -158,7 +158,13 @@ MergeTreeReadTask::BlockAndProgress MergeTreeReadTask::read(const BlockSizeParam UInt64 recommended_rows = estimateNumRows(params); UInt64 rows_to_read = std::max(static_cast(1), std::min(params.max_block_size_rows, recommended_rows)); - auto read_result = range_readers.main.read(rows_to_read, mark_ranges); + auto read_result = range_readers.main.read(rows_to_read, mark_ranges, add_virtual_row); + + if (add_virtual_row) + { + /// Now we have the virtual row, which is at most once for each part. + add_virtual_row = false; + } /// All rows were filtered. Repeat. if (read_result.num_rows == 0) diff --git a/src/Storages/MergeTree/MergeTreeReadTask.h b/src/Storages/MergeTree/MergeTreeReadTask.h index c8bb501c0e8..73927d62959 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.h +++ b/src/Storages/MergeTree/MergeTreeReadTask.h @@ -117,6 +117,7 @@ public: size_t row_count = 0; size_t num_read_rows = 0; size_t num_read_bytes = 0; + bool is_virtual_row = false; }; MergeTreeReadTask( @@ -140,6 +141,8 @@ public: static Readers createReaders(const MergeTreeReadTaskInfoPtr & read_info, const Extras & extras, const MarkRanges & ranges); static RangeReaders createRangeReaders(const Readers & readers, const PrewhereExprInfo & prewhere_actions); + void addVirtualRow() { add_virtual_row = true; } + private: UInt64 estimateNumRows(const BlockSizeParams & params) const; @@ -158,6 +161,9 @@ private: /// Used to satistfy preferred_block_size_bytes limitation MergeTreeBlockSizePredictorPtr size_predictor; + + /// If true, add once, and then set false. + bool add_virtual_row = false; }; using MergeTreeReadTaskPtr = std::unique_ptr; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index fce733d47b7..f61365b0916 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include #include #include @@ -134,6 +134,13 @@ ChunkAndProgress MergeTreeSelectProcessor::read() if (!task->getMainRangeReader().isInitialized()) initializeRangeReaders(); + if (add_virtual_row) + { + /// Turn on virtual row just once. + task->addVirtualRow(); + add_virtual_row = false; + } + auto res = algorithm->readFromTask(*task, block_size_params); if (res.row_count) @@ -148,7 +155,9 @@ ChunkAndProgress MergeTreeSelectProcessor::read() } return ChunkAndProgress{ - .chunk = Chunk(ordered_columns, res.row_count, add_part_level ? std::make_shared(task->getInfo().data_part->info.level) : nullptr), + .chunk = Chunk(ordered_columns, res.row_count, + add_part_level || res.is_virtual_row ? std::make_shared( + (add_part_level ? task->getInfo().data_part->info.level : 0), res.is_virtual_row) : nullptr), .num_read_rows = res.num_read_rows, .num_read_bytes = res.num_read_bytes, .is_finished = false}; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 01bb3851e04..106190f15c3 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -65,6 +65,8 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } + void addVirtualRowToChunk(bool add_virtual_row_) { add_virtual_row = add_virtual_row_; } + private: /// This struct allow to return block with no columns but with non-zero number of rows similar to Chunk struct BlockAndProgress @@ -99,6 +101,10 @@ private: /// Should we add part level to produced chunk. Part level is useful for next steps if query has FINAL bool add_part_level = false; + /// Should we add a virtual row as the single first chunk. + /// Virtual row is useful for read-in-order optimization when multiple parts exist. + bool add_virtual_row = false; + LoggerPtr log = getLogger("MergeTreeSelectProcessor"); std::atomic is_cancelled{false}; }; diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index 81eb166b300..bffea59d5d6 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -13,7 +13,7 @@ #include #include #include -#include +#include namespace DB { @@ -262,7 +262,8 @@ try ++it; } - return Chunk(std::move(res_columns), rows_read, add_part_level ? std::make_shared(data_part->info.level) : nullptr); + return Chunk(std::move(res_columns), rows_read, + add_part_level ? std::make_shared(data_part->info.level, false) : nullptr); } } else From 72ebd3957251bc0ca9355f80d034b3f7d3083a3e Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 8 Apr 2024 02:27:54 +0000 Subject: [PATCH 002/171] add simple virtual row --- .../QueryPlan/ReadFromMergeTree.cpp | 2 +- .../MergeTree/MergeTreeRangeReader.cpp | 17 ++----- src/Storages/MergeTree/MergeTreeRangeReader.h | 2 +- src/Storages/MergeTree/MergeTreeReadTask.cpp | 2 +- src/Storages/MergeTree/MergeTreeReadTask.h | 3 -- .../MergeTree/MergeTreeSelectProcessor.cpp | 50 ++++++++++++++----- .../MergeTree/MergeTreeSelectProcessor.h | 8 ++- 7 files changed, 51 insertions(+), 33 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 91cd362f1d9..7f7f2673aee 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -597,7 +597,7 @@ Pipe ReadFromMergeTree::readInOrder( processor->addPartLevelToChunk(isQueryWithFinal()); - processor->addVirtualRowToChunk(need_virtual_row); + processor->addVirtualRowToChunk(need_virtual_row, part_with_ranges.data_part->getIndex()); auto source = std::make_shared(std::move(processor)); if (set_rows_approx) diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.cpp b/src/Storages/MergeTree/MergeTreeRangeReader.cpp index 0456a8e2787..6932762f58b 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.cpp +++ b/src/Storages/MergeTree/MergeTreeRangeReader.cpp @@ -946,7 +946,7 @@ String addDummyColumnWithRowCount(Block & block, size_t num_rows) } -MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, MarkRanges & ranges, bool add_virtual_row) +MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, MarkRanges & ranges) { if (max_rows == 0) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected at least 1 row to read, got 0."); @@ -961,7 +961,7 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, Mar if (prev_reader) { - read_result = prev_reader->read(max_rows, ranges, add_virtual_row); + read_result = prev_reader->read(max_rows, ranges); size_t num_read_rows; Columns columns = continueReadingChain(read_result, num_read_rows); @@ -1026,15 +1026,8 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, Mar } else { - // if (add_virtual_row) - // { - // generate the virtual row - // } - // else - // { read_result = startReadingChain(max_rows, ranges); read_result.num_rows = read_result.numReadRows(); - // } LOG_TEST(log, "First reader returned: {}, requested columns: {}", read_result.dumpInfo(), dumpNames(merge_tree_reader->getColumns())); @@ -1069,11 +1062,7 @@ MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, Mar read_result.addNumBytesRead(total_bytes); } - /// If add_virtual_row is enabled, don't turn on prewhere so that virtual row can always pass through. - // if (!add_virtual_row) - // { - executePrewhereActionsAndFilterColumns(read_result); - // } + executePrewhereActionsAndFilterColumns(read_result); read_result.checkInternalConsistency(); diff --git a/src/Storages/MergeTree/MergeTreeRangeReader.h b/src/Storages/MergeTree/MergeTreeRangeReader.h index d8cf33b0340..688a6b0922b 100644 --- a/src/Storages/MergeTree/MergeTreeRangeReader.h +++ b/src/Storages/MergeTree/MergeTreeRangeReader.h @@ -300,7 +300,7 @@ public: LoggerPtr log; }; - ReadResult read(size_t max_rows, MarkRanges & ranges, bool add_virtual_row); + ReadResult read(size_t max_rows, MarkRanges & ranges); const Block & getSampleBlock() const { return result_sample_block; } diff --git a/src/Storages/MergeTree/MergeTreeReadTask.cpp b/src/Storages/MergeTree/MergeTreeReadTask.cpp index 498c62080a9..3c4d121195f 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.cpp +++ b/src/Storages/MergeTree/MergeTreeReadTask.cpp @@ -158,7 +158,7 @@ MergeTreeReadTask::BlockAndProgress MergeTreeReadTask::read(const BlockSizeParam UInt64 recommended_rows = estimateNumRows(params); UInt64 rows_to_read = std::max(static_cast(1), std::min(params.max_block_size_rows, recommended_rows)); - auto read_result = range_readers.main.read(rows_to_read, mark_ranges, add_virtual_row); + auto read_result = range_readers.main.read(rows_to_read, mark_ranges); if (add_virtual_row) { diff --git a/src/Storages/MergeTree/MergeTreeReadTask.h b/src/Storages/MergeTree/MergeTreeReadTask.h index 73927d62959..709fc73f16e 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.h +++ b/src/Storages/MergeTree/MergeTreeReadTask.h @@ -117,7 +117,6 @@ public: size_t row_count = 0; size_t num_read_rows = 0; size_t num_read_bytes = 0; - bool is_virtual_row = false; }; MergeTreeReadTask( @@ -141,8 +140,6 @@ public: static Readers createReaders(const MergeTreeReadTaskInfoPtr & read_info, const Extras & extras, const MarkRanges & ranges); static RangeReaders createRangeReaders(const Readers & readers, const PrewhereExprInfo & prewhere_actions); - void addVirtualRow() { add_virtual_row = true; } - private: UInt64 estimateNumRows(const BlockSizeParams & params) const; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index f61365b0916..d75802c68f3 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -133,38 +133,64 @@ ChunkAndProgress MergeTreeSelectProcessor::read() if (!task->getMainRangeReader().isInitialized()) initializeRangeReaders(); - + add_virtual_row = false; if (add_virtual_row) { /// Turn on virtual row just once. - task->addVirtualRow(); add_virtual_row = false; - } - auto res = algorithm->readFromTask(*task, block_size_params); + const auto & primary_key = storage_snapshot->metadata->primary_key; + + MergeTreeReadTask::BlockAndProgress res; + res.row_count = 1; - if (res.row_count) - { /// Reorder the columns according to result_header Columns ordered_columns; ordered_columns.reserve(result_header.columns()); for (size_t i = 0; i < result_header.columns(); ++i) { - auto name = result_header.getByPosition(i).name; - ordered_columns.push_back(res.block.getByName(name).column); + // TODO: composite pk??? + const ColumnWithTypeAndName & type_and_name = result_header.getByPosition(i); + if (type_and_name.name == primary_key.column_names[0] && type_and_name.type == primary_key.data_types[0]) + ordered_columns.push_back(index[0]->cloneResized(1)); // TODO: use the first range pk whose range might contain results + else + ordered_columns.push_back(type_and_name.type->createColumn()->cloneResized(1)); } return ChunkAndProgress{ - .chunk = Chunk(ordered_columns, res.row_count, - add_part_level || res.is_virtual_row ? std::make_shared( - (add_part_level ? task->getInfo().data_part->info.level : 0), res.is_virtual_row) : nullptr), + .chunk = Chunk(ordered_columns, res.row_count, std::make_shared( + (add_part_level ? task->getInfo().data_part->info.level : 0), true)), .num_read_rows = res.num_read_rows, .num_read_bytes = res.num_read_bytes, .is_finished = false}; } else { - return {Chunk(), res.num_read_rows, res.num_read_bytes, false}; + auto res = algorithm->readFromTask(*task, block_size_params); + + if (res.row_count) + { + /// Reorder the columns according to result_header + Columns ordered_columns; + ordered_columns.reserve(result_header.columns()); + for (size_t i = 0; i < result_header.columns(); ++i) + { + auto name = result_header.getByPosition(i).name; + ordered_columns.push_back(res.block.getByName(name).column); + } + + return ChunkAndProgress{ + .chunk = Chunk(ordered_columns, res.row_count, + add_part_level ? std::make_shared( + (add_part_level ? task->getInfo().data_part->info.level : 0), false) : nullptr), + .num_read_rows = res.num_read_rows, + .num_read_bytes = res.num_read_bytes, + .is_finished = false}; + } + else + { + return {Chunk(), res.num_read_rows, res.num_read_bytes, false}; + } } } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 106190f15c3..67a03ca2533 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -65,7 +65,11 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } - void addVirtualRowToChunk(bool add_virtual_row_) { add_virtual_row = add_virtual_row_; } + void addVirtualRowToChunk(bool add_virtual_row_, const Columns& index_) + { + add_virtual_row = add_virtual_row_; + index = index_; + } private: /// This struct allow to return block with no columns but with non-zero number of rows similar to Chunk @@ -105,6 +109,8 @@ private: /// Virtual row is useful for read-in-order optimization when multiple parts exist. bool add_virtual_row = false; + Columns index; + LoggerPtr log = getLogger("MergeTreeSelectProcessor"); std::atomic is_cancelled{false}; }; From 57a2a20900176da28b73f027fc298f7cb7f91781 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 10 Apr 2024 04:02:15 +0000 Subject: [PATCH 003/171] support composite pk --- src/Storages/MergeTree/MergeTreeSelectProcessor.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index d75802c68f3..868e757e135 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -133,7 +133,7 @@ ChunkAndProgress MergeTreeSelectProcessor::read() if (!task->getMainRangeReader().isInitialized()) initializeRangeReaders(); - add_virtual_row = false; + if (add_virtual_row) { /// Turn on virtual row just once. @@ -147,12 +147,14 @@ ChunkAndProgress MergeTreeSelectProcessor::read() /// Reorder the columns according to result_header Columns ordered_columns; ordered_columns.reserve(result_header.columns()); - for (size_t i = 0; i < result_header.columns(); ++i) + for (size_t i = 0, j = 0; i < result_header.columns(); ++i) { - // TODO: composite pk??? const ColumnWithTypeAndName & type_and_name = result_header.getByPosition(i); - if (type_and_name.name == primary_key.column_names[0] && type_and_name.type == primary_key.data_types[0]) - ordered_columns.push_back(index[0]->cloneResized(1)); // TODO: use the first range pk whose range might contain results + if (j < index.size() && type_and_name.name == primary_key.column_names[j] && type_and_name.type == primary_key.data_types[j]) + { + ordered_columns.push_back(index[j]->cloneResized(1)); // TODO: use the first range pk whose range might contain results + ++j; + } else ordered_columns.push_back(type_and_name.type->createColumn()->cloneResized(1)); } From bd4385f969c5139870dcfc0ce9d72e6029ab9a59 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 15 Apr 2024 23:28:16 +0000 Subject: [PATCH 004/171] add test --- .../Merges/Algorithms/MergeTreeReadInfo.h | 2 +- ...03031_read_in_order_optimization.reference | 5 ++ .../03031_read_in_order_optimization.sql | 48 +++++++++++++++++++ 3 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03031_read_in_order_optimization.reference create mode 100644 tests/queries/0_stateless/03031_read_in_order_optimization.sql diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h index e79df0fb8c8..ca4bccb235f 100644 --- a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -29,7 +29,7 @@ inline bool getVirtualRowFromChunk(const Chunk & chunk) const auto & info = chunk.getChunkInfo(); if (const auto * read_info = typeid_cast(info.get())) return read_info->virtual_row; - return 0; + return false; } } diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.reference b/tests/queries/0_stateless/03031_read_in_order_optimization.reference new file mode 100644 index 00000000000..304f7f7a049 --- /dev/null +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.reference @@ -0,0 +1,5 @@ +0 +1 +2 +3 +24578 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.sql b/tests/queries/0_stateless/03031_read_in_order_optimization.sql new file mode 100644 index 00000000000..eecbfe64f6d --- /dev/null +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.sql @@ -0,0 +1,48 @@ + +DROP TABLE IF EXISTS t; + +CREATE TABLE t +( + `x` UInt64, + `y` UInt64, + `z` UInt64, + `k` UInt64 +) +ENGINE = MergeTree +ORDER BY (x, y, z) +SETTINGS index_granularity = 8192; + +INSERT INTO t SELECT + number, + number, + number, + number +FROM numbers(8192 * 3); + +INSERT INTO t SELECT + number + (8192 * 3), + number + (8192 * 3), + number + (8192 * 3), + number + (8192 * 3) +FROM numbers(8192 * 3); + +SELECT x +FROM t +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 0, +max_threads = 1, +optimize_read_in_order = 1; + +SYSTEM FLUSH LOGS; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND query like '%SELECT x%' +AND query not like '%system.query_log%' +ORDER BY query_start_time DESC, read_rows DESC +LIMIT 1; + +DROP TABLE t; \ No newline at end of file From cc3fd0e73693e877967b1f5572d8d6779088fa06 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 23 Apr 2024 02:43:49 +0000 Subject: [PATCH 005/171] minor change --- src/Storages/MergeTree/MergeTreeSelectProcessor.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 868e757e135..1f97fec2013 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -150,13 +150,17 @@ ChunkAndProgress MergeTreeSelectProcessor::read() for (size_t i = 0, j = 0; i < result_header.columns(); ++i) { const ColumnWithTypeAndName & type_and_name = result_header.getByPosition(i); + ColumnPtr current_column = type_and_name.type->createColumn(); + if (j < index.size() && type_and_name.name == primary_key.column_names[j] && type_and_name.type == primary_key.data_types[j]) { - ordered_columns.push_back(index[j]->cloneResized(1)); // TODO: use the first range pk whose range might contain results + auto column = current_column->cloneEmpty(); + column->insert((*index[j])[0]); // TODO: use the first range pk whose range might contain results + ordered_columns.push_back(std::move(column)); ++j; } else - ordered_columns.push_back(type_and_name.type->createColumn()->cloneResized(1)); + ordered_columns.push_back(current_column->cloneResized(1)); } return ChunkAndProgress{ From 7f6d6400230eb90e27a99226c89ac0c5acb0d709 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 24 Apr 2024 01:14:04 +0000 Subject: [PATCH 006/171] use a better range begin in virtual row --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 3 ++- src/Storages/MergeTree/MergeTreeSelectProcessor.cpp | 2 +- src/Storages/MergeTree/MergeTreeSelectProcessor.h | 7 +++++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 7f7f2673aee..f873bcb6104 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -597,7 +597,8 @@ Pipe ReadFromMergeTree::readInOrder( processor->addPartLevelToChunk(isQueryWithFinal()); - processor->addVirtualRowToChunk(need_virtual_row, part_with_ranges.data_part->getIndex()); + processor->addVirtualRowToChunk(need_virtual_row, part_with_ranges.data_part->getIndex(), + part_with_ranges.ranges.front().begin); auto source = std::make_shared(std::move(processor)); if (set_rows_approx) diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 1f97fec2013..a3fcfad3bb5 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -155,7 +155,7 @@ ChunkAndProgress MergeTreeSelectProcessor::read() if (j < index.size() && type_and_name.name == primary_key.column_names[j] && type_and_name.type == primary_key.data_types[j]) { auto column = current_column->cloneEmpty(); - column->insert((*index[j])[0]); // TODO: use the first range pk whose range might contain results + column->insert((*index[j])[mark_range_begin]); ordered_columns.push_back(std::move(column)); ++j; } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 67a03ca2533..352f771f9ce 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -65,10 +65,11 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } - void addVirtualRowToChunk(bool add_virtual_row_, const Columns& index_) + void addVirtualRowToChunk(bool add_virtual_row_, const Columns& index_, size_t mark_range_begin_) { add_virtual_row = add_virtual_row_; index = index_; + mark_range_begin = mark_range_begin_; } private: @@ -108,8 +109,10 @@ private: /// Should we add a virtual row as the single first chunk. /// Virtual row is useful for read-in-order optimization when multiple parts exist. bool add_virtual_row = false; - + /// PK index used in virtual row. Columns index; + /// The first range that might contain the candidate, used in virtual row. + size_t mark_range_begin; LoggerPtr log = getLogger("MergeTreeSelectProcessor"); std::atomic is_cancelled{false}; From ba049d85b3126b766575f958b61fc2f84bb3a11b Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 26 Apr 2024 02:45:50 +0000 Subject: [PATCH 007/171] fix test --- tests/queries/0_stateless/03031_read_in_order_optimization.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.sql b/tests/queries/0_stateless/03031_read_in_order_optimization.sql index eecbfe64f6d..f114a838ff3 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.sql @@ -10,7 +10,8 @@ CREATE TABLE t ) ENGINE = MergeTree ORDER BY (x, y, z) -SETTINGS index_granularity = 8192; +SETTINGS index_granularity = 8192, +index_granularity_bytes = 10485760; INSERT INTO t SELECT number, From 86c7488647750f65d7a75dd4774f84fcf44f763b Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 4 May 2024 02:09:17 +0000 Subject: [PATCH 008/171] only read one chunk in mergetramsform when meet virtual row --- src/Processors/Merges/IMergingTransform.cpp | 7 +++++-- .../MergeTree/MergeTreeSelectProcessor.h | 2 +- ...03031_read_in_order_optimization.reference | 2 +- .../03031_read_in_order_optimization.sql | 20 +++++++++++++++++++ 4 files changed, 27 insertions(+), 4 deletions(-) diff --git a/src/Processors/Merges/IMergingTransform.cpp b/src/Processors/Merges/IMergingTransform.cpp index fbb47969b2f..50b3e2ca634 100644 --- a/src/Processors/Merges/IMergingTransform.cpp +++ b/src/Processors/Merges/IMergingTransform.cpp @@ -1,3 +1,4 @@ +#include #include namespace DB @@ -101,8 +102,10 @@ IProcessor::Status IMergingTransformBase::prepareInitializeInputs() /// setNotNeeded after reading first chunk, because in optimismtic case /// (e.g. with optimized 'ORDER BY primary_key LIMIT n' and small 'n') /// we won't have to read any chunks anymore; - auto chunk = input.pull(limit_hint != 0); - if ((limit_hint && chunk.getNumRows() < limit_hint) || always_read_till_end) + /// If virtual row exists, test it first, so don't read more chunks. + auto chunk = input.pull(true); + if ((limit_hint == 0 && !getVirtualRowFromChunk(chunk)) + || (limit_hint && chunk.getNumRows() < limit_hint) || always_read_till_end) input.setNeeded(); if (!chunk.hasRows()) diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 352f771f9ce..255b4c65ff9 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -65,7 +65,7 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } - void addVirtualRowToChunk(bool add_virtual_row_, const Columns& index_, size_t mark_range_begin_) + void addVirtualRowToChunk(bool add_virtual_row_, const Columns & index_, size_t mark_range_begin_) { add_virtual_row = add_virtual_row_; index = index_; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.reference b/tests/queries/0_stateless/03031_read_in_order_optimization.reference index 304f7f7a049..70d79aecf43 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.reference @@ -2,4 +2,4 @@ 1 2 3 -24578 +16386 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.sql b/tests/queries/0_stateless/03031_read_in_order_optimization.sql index f114a838ff3..999d2e265d0 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.sql @@ -46,4 +46,24 @@ AND query not like '%system.query_log%' ORDER BY query_start_time DESC, read_rows DESC LIMIT 1; +-- SELECT x +-- FROM t +-- ORDER BY x ASC +-- LIMIT 4 +-- SETTINGS max_block_size = 8192, +-- read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge +-- max_threads = 1, +-- optimize_read_in_order = 1; + +-- SYSTEM FLUSH LOGS; + +-- -- without virtual row 16.38k, but with virtual row 24.58k, becasue read again (why?) in the non-target part after reading its virtual row and before sending the virtual row to the priority queue +-- SELECT read_rows +-- FROM system.query_log +-- WHERE current_database = currentDatabase() +-- AND query like '%SELECT x%' +-- AND query not like '%system.query_log%' +-- ORDER BY query_start_time DESC, read_rows DESC +-- LIMIT 1; + DROP TABLE t; \ No newline at end of file From 04a757eb71a0cdff42f804043025dd6e900e9283 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 4 May 2024 17:37:56 +0000 Subject: [PATCH 009/171] fix --- src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp | 2 +- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 7592f37ba22..7da73349c4a 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -234,7 +234,7 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeBatchImpl(TSortingQueue & { /// If virtual row is detected, there should be only one row as a single chunk, /// and always skip this chunk to pull the next one. - assert(initial_batch_size == 1); + chassert(initial_batch_size == 1); queue.removeTop(); return Status(current.impl->order); } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index cdf301f8044..d7e7f9ae758 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -598,8 +598,9 @@ Pipe ReadFromMergeTree::readInOrder( processor->addPartLevelToChunk(isQueryWithFinal()); - processor->addVirtualRowToChunk(need_virtual_row, part_with_ranges.data_part->getIndex(), - part_with_ranges.ranges.front().begin); + auto primary_key_index = part_with_ranges.data_part->getIndex(); + chassert(primary_key_index); + processor->addVirtualRowToChunk(need_virtual_row, *primary_key_index, part_with_ranges.ranges.front().begin); auto source = std::make_shared(std::move(processor)); if (set_rows_approx) From 1c2c3aed249ea77f0d667e1a9173ca39a5a88858 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 6 May 2024 13:25:19 +0000 Subject: [PATCH 010/171] support non-preliminary merge case --- src/Processors/Merges/IMergingTransform.cpp | 9 +++-- .../QueryPlan/ReadFromMergeTree.cpp | 8 ++--- .../MergeTree/MergeTreeSelectProcessor.cpp | 4 +-- .../MergeTree/MergeTreeSelectProcessor.h | 4 +-- ...03031_read_in_order_optimization.reference | 5 +++ .../03031_read_in_order_optimization.sql | 33 +++++++++---------- 6 files changed, 34 insertions(+), 29 deletions(-) diff --git a/src/Processors/Merges/IMergingTransform.cpp b/src/Processors/Merges/IMergingTransform.cpp index 50b3e2ca634..3daeca254ed 100644 --- a/src/Processors/Merges/IMergingTransform.cpp +++ b/src/Processors/Merges/IMergingTransform.cpp @@ -102,10 +102,13 @@ IProcessor::Status IMergingTransformBase::prepareInitializeInputs() /// setNotNeeded after reading first chunk, because in optimismtic case /// (e.g. with optimized 'ORDER BY primary_key LIMIT n' and small 'n') /// we won't have to read any chunks anymore; - /// If virtual row exists, test it first, so don't read more chunks. + /// If virtual row exists, let it pass through, so don't read more chunks. auto chunk = input.pull(true); - if ((limit_hint == 0 && !getVirtualRowFromChunk(chunk)) - || (limit_hint && chunk.getNumRows() < limit_hint) || always_read_till_end) + bool virtual_row = getVirtualRowFromChunk(chunk); + if (limit_hint == 0 && !virtual_row) + input.setNeeded(); + + if (!virtual_row && ((limit_hint && chunk.getNumRows() < limit_hint) || always_read_till_end)) input.setNeeded(); if (!chunk.hasRows()) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index d7e7f9ae758..4386d435732 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -597,10 +597,8 @@ Pipe ReadFromMergeTree::readInOrder( actions_settings, block_size, reader_settings); processor->addPartLevelToChunk(isQueryWithFinal()); - - auto primary_key_index = part_with_ranges.data_part->getIndex(); - chassert(primary_key_index); - processor->addVirtualRowToChunk(need_virtual_row, *primary_key_index, part_with_ranges.ranges.front().begin); + processor->addVirtualRowToChunk(need_virtual_row, part_with_ranges.data_part->getIndex(), + part_with_ranges.ranges.front().begin); auto source = std::make_shared(std::move(processor)); if (set_rows_approx) @@ -1037,7 +1035,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( { /// need_virtual_row = true means a MergingSortedTransform should occur. /// If so, adding a virtual row might speedup in the case of multiple parts. - bool need_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; + bool need_virtual_row = item.size() > 1; pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, need_virtual_row)); } } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index a3fcfad3bb5..4feef5115bf 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -152,10 +152,10 @@ ChunkAndProgress MergeTreeSelectProcessor::read() const ColumnWithTypeAndName & type_and_name = result_header.getByPosition(i); ColumnPtr current_column = type_and_name.type->createColumn(); - if (j < index.size() && type_and_name.name == primary_key.column_names[j] && type_and_name.type == primary_key.data_types[j]) + if (j < index->size() && type_and_name.name == primary_key.column_names[j] && type_and_name.type == primary_key.data_types[j]) { auto column = current_column->cloneEmpty(); - column->insert((*index[j])[mark_range_begin]); + column->insert((*(*index)[j])[mark_range_begin]); ordered_columns.push_back(std::move(column)); ++j; } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 3dab11b556c..7a562c1a115 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -65,7 +65,7 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } - void addVirtualRowToChunk(bool add_virtual_row_, const Columns & index_, size_t mark_range_begin_) + void addVirtualRowToChunk(bool add_virtual_row_, const IMergeTreeDataPart::Index & index_, size_t mark_range_begin_) { add_virtual_row = add_virtual_row_; index = index_; @@ -101,7 +101,7 @@ private: /// Virtual row is useful for read-in-order optimization when multiple parts exist. bool add_virtual_row = false; /// PK index used in virtual row. - Columns index; + IMergeTreeDataPart::Index index; /// The first range that might contain the candidate, used in virtual row. size_t mark_range_begin; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.reference b/tests/queries/0_stateless/03031_read_in_order_optimization.reference index 70d79aecf43..62e8669fbe0 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.reference @@ -3,3 +3,8 @@ 2 3 16386 +0 +1 +2 +3 +16386 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.sql b/tests/queries/0_stateless/03031_read_in_order_optimization.sql index 999d2e265d0..57f9838392f 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.sql @@ -46,24 +46,23 @@ AND query not like '%system.query_log%' ORDER BY query_start_time DESC, read_rows DESC LIMIT 1; --- SELECT x --- FROM t --- ORDER BY x ASC --- LIMIT 4 --- SETTINGS max_block_size = 8192, --- read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge --- max_threads = 1, --- optimize_read_in_order = 1; +SELECT x +FROM t +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge +max_threads = 1, +optimize_read_in_order = 1; --- SYSTEM FLUSH LOGS; +SYSTEM FLUSH LOGS; --- -- without virtual row 16.38k, but with virtual row 24.58k, becasue read again (why?) in the non-target part after reading its virtual row and before sending the virtual row to the priority queue --- SELECT read_rows --- FROM system.query_log --- WHERE current_database = currentDatabase() --- AND query like '%SELECT x%' --- AND query not like '%system.query_log%' --- ORDER BY query_start_time DESC, read_rows DESC --- LIMIT 1; +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND query like '%SELECT x%' +AND query not like '%system.query_log%' +ORDER BY query_start_time DESC, read_rows DESC +LIMIT 1; DROP TABLE t; \ No newline at end of file From 0537b8c833b69638c3868497f935f9bb7cf46e0a Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 8 May 2024 00:17:37 +0000 Subject: [PATCH 011/171] restrict to preliminary merge and add more tests --- .../QueryPlan/ReadFromMergeTree.cpp | 2 +- ...03031_read_in_order_optimization.reference | 7 ++- .../03031_read_in_order_optimization.sql | 48 +++++++++++++++---- 3 files changed, 45 insertions(+), 12 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 4386d435732..9a0469f49a8 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1035,7 +1035,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( { /// need_virtual_row = true means a MergingSortedTransform should occur. /// If so, adding a virtual row might speedup in the case of multiple parts. - bool need_virtual_row = item.size() > 1; + bool need_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, need_virtual_row)); } } diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.reference b/tests/queries/0_stateless/03031_read_in_order_optimization.reference index 62e8669fbe0..c73f79d8dce 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.reference @@ -3,8 +3,13 @@ 2 3 16386 +16385 +16386 +16387 +16388 +24578 0 1 2 3 -16386 +16384 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.sql b/tests/queries/0_stateless/03031_read_in_order_optimization.sql index 57f9838392f..597845564e4 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.sql @@ -24,28 +24,55 @@ INSERT INTO t SELECT number + (8192 * 3), number + (8192 * 3), number + (8192 * 3), - number + (8192 * 3) + number FROM numbers(8192 * 3); +-- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), +-- both chunks come from the same part. SELECT x FROM t ORDER BY x ASC LIMIT 4 SETTINGS max_block_size = 8192, -read_in_order_two_level_merge_threshold = 0, +read_in_order_two_level_merge_threshold = 0, --force preliminary merge max_threads = 1, -optimize_read_in_order = 1; +optimize_read_in_order = 1, +log_comment = 'no filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND query like '%SELECT x%' -AND query not like '%system.query_log%' -ORDER BY query_start_time DESC, read_rows DESC +AND log_comment = 'no filter' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC +limit 1; + +-- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192), +-- all chunks come from the same part. +SELECT k +FROM t +WHERE k > 8192 * 2 +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 0, --force preliminary merge +max_threads = 1, +optimize_read_in_order = 1, +log_comment = 'with filter'; + +SYSTEM FLUSH LOGS; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND log_comment = 'with filter' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC LIMIT 1; +-- Should not impact cases without preliminary merge (might read again when chunk row is less than limit) SELECT x FROM t ORDER BY x ASC @@ -53,16 +80,17 @@ LIMIT 4 SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge max_threads = 1, -optimize_read_in_order = 1; +optimize_read_in_order = 1, +log_comment = 'no impact'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND query like '%SELECT x%' -AND query not like '%system.query_log%' -ORDER BY query_start_time DESC, read_rows DESC +AND log_comment = 'no impact' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC LIMIT 1; DROP TABLE t; \ No newline at end of file From 8f8ba55ac3cc254d4a890ed6f45cb5a4ef411143 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 14 May 2024 19:43:47 +0000 Subject: [PATCH 012/171] add check flag --- .../Merges/Algorithms/MergeTreeReadInfo.h | 1 + .../QueryPlan/ReadFromMergeTree.cpp | 5 ++- src/Processors/QueryPlan/SortingStep.cpp | 31 ++++++++++++++ src/QueryPipeline/QueryPipelineBuilder.h | 2 + .../MergeTree/MergeTreeSelectProcessor.cpp | 4 +- .../MergeTree/MergeTreeSelectProcessor.h | 7 ++-- src/Storages/MergeTree/MergeTreeSource.h | 2 + ...03031_read_in_order_optimization.reference | 7 +++- .../03031_read_in_order_optimization.sql | 40 +++++++++++++++---- 9 files changed, 84 insertions(+), 15 deletions(-) diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h index ca4bccb235f..52ca92b471a 100644 --- a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -13,6 +13,7 @@ public: explicit MergeTreeReadInfo(size_t part_level, bool virtual_row_) : origin_merge_tree_part_level(part_level), virtual_row(virtual_row_) { } size_t origin_merge_tree_part_level = 0; + /// If virtual_row is true, the chunk must contain the virtual row only. bool virtual_row = false; }; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 9a0469f49a8..2f1db9539a6 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -597,8 +597,9 @@ Pipe ReadFromMergeTree::readInOrder( actions_settings, block_size, reader_settings); processor->addPartLevelToChunk(isQueryWithFinal()); - processor->addVirtualRowToChunk(need_virtual_row, part_with_ranges.data_part->getIndex(), - part_with_ranges.ranges.front().begin); + processor->addVirtualRowToChunk(part_with_ranges.data_part->getIndex(), part_with_ranges.ranges.front().begin); + if (need_virtual_row) + processor->enableVirtualRow(); auto source = std::make_shared(std::move(processor)); if (set_rows_approx) diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index d0491cb4b82..d728e8fb154 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -13,6 +13,9 @@ #include #include +#include +#include +#include namespace CurrentMetrics { @@ -243,6 +246,34 @@ void SortingStep::mergingSorted(QueryPipelineBuilder & pipeline, const SortDescr /// If there are several streams, then we merge them into one if (pipeline.getNumStreams() > 1) { + /// We check every step of this pipeline, to make sure virtual row can work correctly. + /// Currently ExpressionTransform is supported, should add other processors if possible. + const auto& pipe = pipeline.getPipe(); + bool enable_virtual_row = true; + std::vector> merge_tree_sources; + for (const auto & processor : pipe.getProcessors()) + { + if (auto merge_tree_source = std::dynamic_pointer_cast(processor)) + { + merge_tree_sources.push_back(merge_tree_source); + } + else if (!std::dynamic_pointer_cast(processor)) + { + enable_virtual_row = false; + break; + } + } + + /// If everything is okay, we enable virtual row in MergeTreeSelectProcessor + if (enable_virtual_row && merge_tree_sources.size() >= 2) + { + for (const auto & merge_tree_source : merge_tree_sources) + { + const auto& merge_tree_select_processor = merge_tree_source->getProcessor(); + merge_tree_select_processor->enableVirtualRow(); + } + } + auto transform = std::make_shared( pipeline.getHeader(), pipeline.getNumStreams(), diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index f0b2ead687e..50a77360d46 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -197,6 +197,8 @@ public: void setQueryIdHolder(std::shared_ptr query_id_holder) { resources.query_id_holders.emplace_back(std::move(query_id_holder)); } void addContext(ContextPtr context) { resources.interpreter_context.emplace_back(std::move(context)); } + const Pipe& getPipe() const { return pipe; } + /// Convert query pipeline to pipe. static Pipe getPipe(QueryPipelineBuilder pipeline, QueryPlanResourceHolder & resources); static QueryPipeline getPipeline(QueryPipelineBuilder builder); diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 4feef5115bf..0f4b68ddde9 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -134,10 +134,10 @@ ChunkAndProgress MergeTreeSelectProcessor::read() if (!task->getMainRangeReader().isInitialized()) initializeRangeReaders(); - if (add_virtual_row) + if (enable_virtual_row) { /// Turn on virtual row just once. - add_virtual_row = false; + enable_virtual_row = false; const auto & primary_key = storage_snapshot->metadata->primary_key; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 7a562c1a115..57da1039ba9 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -65,13 +65,14 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } - void addVirtualRowToChunk(bool add_virtual_row_, const IMergeTreeDataPart::Index & index_, size_t mark_range_begin_) + void addVirtualRowToChunk(const IMergeTreeDataPart::Index & index_, size_t mark_range_begin_) { - add_virtual_row = add_virtual_row_; index = index_; mark_range_begin = mark_range_begin_; } + void enableVirtualRow() { enable_virtual_row = true; } + private: /// Sets up range readers corresponding to data readers void initializeRangeReaders(); @@ -99,7 +100,7 @@ private: /// Should we add a virtual row as the single first chunk. /// Virtual row is useful for read-in-order optimization when multiple parts exist. - bool add_virtual_row = false; + bool enable_virtual_row = false; /// PK index used in virtual row. IMergeTreeDataPart::Index index; /// The first range that might contain the candidate, used in virtual row. diff --git a/src/Storages/MergeTree/MergeTreeSource.h b/src/Storages/MergeTree/MergeTreeSource.h index 655f0ee6ebe..486b8be2fef 100644 --- a/src/Storages/MergeTree/MergeTreeSource.h +++ b/src/Storages/MergeTree/MergeTreeSource.h @@ -19,6 +19,8 @@ public: Status prepare() override; + const MergeTreeSelectProcessorPtr& getProcessor() const { return processor; } + #if defined(OS_LINUX) int schedule() override; #endif diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.reference b/tests/queries/0_stateless/03031_read_in_order_optimization.reference index c73f79d8dce..c7cce7e60e9 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.reference @@ -12,4 +12,9 @@ 1 2 3 -16384 +16386 +16385 +16386 +16387 +16388 +24578 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.sql b/tests/queries/0_stateless/03031_read_in_order_optimization.sql index 597845564e4..332ee7f58dc 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization.sql @@ -27,6 +27,8 @@ INSERT INTO t SELECT number FROM numbers(8192 * 3); +SYSTEM STOP MERGES t; + -- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), -- both chunks come from the same part. SELECT x @@ -37,14 +39,14 @@ SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 0, --force preliminary merge max_threads = 1, optimize_read_in_order = 1, -log_comment = 'no filter'; +log_comment = 'preliminary merge, no filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND log_comment = 'no filter' +AND log_comment = 'preliminary merge, no filter' AND type = 'QueryFinish' ORDER BY query_start_time DESC limit 1; @@ -60,19 +62,20 @@ SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 0, --force preliminary merge max_threads = 1, optimize_read_in_order = 1, -log_comment = 'with filter'; +log_comment = 'preliminary merge with filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND log_comment = 'with filter' +AND log_comment = 'preliminary merge with filter' AND type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1; --- Should not impact cases without preliminary merge (might read again when chunk row is less than limit) +-- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), +-- both chunks come from the same part. SELECT x FROM t ORDER BY x ASC @@ -81,14 +84,37 @@ SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge max_threads = 1, optimize_read_in_order = 1, -log_comment = 'no impact'; +log_comment = 'no preliminary merge, no filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND log_comment = 'no impact' +AND log_comment = 'no preliminary merge, no filter' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC +LIMIT 1; + +-- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192), +-- all chunks come from the same part. +SELECT k +FROM t +WHERE k > 8192 * 2 +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge +max_threads = 1, +optimize_read_in_order = 1, +log_comment = 'no preliminary merge, with filter'; + +SYSTEM FLUSH LOGS; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND log_comment = 'no preliminary merge, with filter' AND type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1; From 3f6cdeb8802c04f39d01e4b048fe0381ff200242 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 15 May 2024 18:26:28 +0000 Subject: [PATCH 013/171] add more check --- .../Algorithms/MergingSortedAlgorithm.cpp | 8 ++ src/Processors/QueryPlan/SortingStep.cpp | 73 ++++++++++++------- src/Processors/QueryPlan/SortingStep.h | 2 + .../MergeTree/MergeTreeSelectProcessor.cpp | 2 +- .../MergeTree/MergeTreeSelectProcessor.h | 2 + .../02346_fulltext_index_search.sql | 8 +- ...r_optimization_with_virtual_row.reference} | 10 +++ ...n_order_optimization_with_virtual_row.sql} | 20 ++++- 8 files changed, 92 insertions(+), 33 deletions(-) rename tests/queries/0_stateless/{03031_read_in_order_optimization.reference => 03031_read_in_order_optimization_with_virtual_row.reference} (59%) rename tests/queries/0_stateless/{03031_read_in_order_optimization.sql => 03031_read_in_order_optimization_with_virtual_row.sql} (83%) diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 7da73349c4a..eb5805087c4 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -8,6 +8,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int NOT_IMPLEMENTED; +} + MergingSortedAlgorithm::MergingSortedAlgorithm( Block header_, size_t num_inputs, @@ -134,6 +139,9 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeImpl(TSortingHeap & queue auto current = queue.current(); + if (getVirtualRowFromChunk(current_inputs[current.impl->order].chunk)) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Virtual row is not implemented for Non-batch mode."); + if (current.impl->isLast() && current_inputs[current.impl->order].skip_last_row) { /// Get the next block from the corresponding source, if there is one. diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index d728e8fb154..97157b06f19 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -241,38 +241,57 @@ void SortingStep::finishSorting( }); } +void SortingStep::enableVirtualRow(const QueryPipelineBuilder & pipeline) const +{ + /// We check every step of this pipeline, to make sure virtual row can work correctly. + /// Currently ExpressionTransform is supported, should add other processors if possible. + const auto& pipe = pipeline.getPipe(); + bool enable_virtual_row = true; + std::vector> merge_tree_sources; + for (const auto & processor : pipe.getProcessors()) + { + if (auto merge_tree_source = std::dynamic_pointer_cast(processor)) + { + merge_tree_sources.push_back(merge_tree_source); + } + else if (!std::dynamic_pointer_cast(processor)) + { + enable_virtual_row = false; + break; + } + } + + /// If everything is okay, we enable virtual row in MergeTreeSelectProcessor + if (enable_virtual_row && merge_tree_sources.size() >= 2) + { + /// We have to check further in the case of fixed prefix, for example, + /// primary key ab, query SELECT a, b FROM t WHERE a = 1 ORDER BY b, + /// merge sort would sort based on b, leading to wrong result in comparison. + auto extractNameAfterDot = [](const String & name) + { + size_t pos = name.find_last_of('.'); + return (pos != String::npos) ? name.substr(pos + 1) : name; + }; + + const ColumnWithTypeAndName & type_and_name = pipeline.getHeader().getByPosition(0); + String column_name = extractNameAfterDot(type_and_name.name); + for (const auto & merge_tree_source : merge_tree_sources) + { + const auto& merge_tree_select_processor = merge_tree_source->getProcessor(); + + const auto & primary_key = merge_tree_select_processor->getPrimaryKey(); + if (primary_key.column_names[0] == column_name && primary_key.data_types[0] == type_and_name.type) + merge_tree_select_processor->enableVirtualRow(); + } + } +} + void SortingStep::mergingSorted(QueryPipelineBuilder & pipeline, const SortDescription & result_sort_desc, const UInt64 limit_) { /// If there are several streams, then we merge them into one if (pipeline.getNumStreams() > 1) { - /// We check every step of this pipeline, to make sure virtual row can work correctly. - /// Currently ExpressionTransform is supported, should add other processors if possible. - const auto& pipe = pipeline.getPipe(); - bool enable_virtual_row = true; - std::vector> merge_tree_sources; - for (const auto & processor : pipe.getProcessors()) - { - if (auto merge_tree_source = std::dynamic_pointer_cast(processor)) - { - merge_tree_sources.push_back(merge_tree_source); - } - else if (!std::dynamic_pointer_cast(processor)) - { - enable_virtual_row = false; - break; - } - } - - /// If everything is okay, we enable virtual row in MergeTreeSelectProcessor - if (enable_virtual_row && merge_tree_sources.size() >= 2) - { - for (const auto & merge_tree_source : merge_tree_sources) - { - const auto& merge_tree_select_processor = merge_tree_source->getProcessor(); - merge_tree_select_processor->enableVirtualRow(); - } - } + enableVirtualRow(pipeline); auto transform = std::make_shared( pipeline.getHeader(), diff --git a/src/Processors/QueryPlan/SortingStep.h b/src/Processors/QueryPlan/SortingStep.h index 52f48f66a32..5f3820c346b 100644 --- a/src/Processors/QueryPlan/SortingStep.h +++ b/src/Processors/QueryPlan/SortingStep.h @@ -116,6 +116,8 @@ private: UInt64 limit_, bool skip_partial_sort = false); + void enableVirtualRow(const QueryPipelineBuilder & pipeline) const; + Type type; SortDescription prefix_description; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 0f4b68ddde9..67b58b53a0d 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -139,7 +139,7 @@ ChunkAndProgress MergeTreeSelectProcessor::read() /// Turn on virtual row just once. enable_virtual_row = false; - const auto & primary_key = storage_snapshot->metadata->primary_key; + const auto & primary_key = getPrimaryKey(); MergeTreeReadTask::BlockAndProgress res; res.row_count = 1; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 57da1039ba9..14481be24d3 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -73,6 +73,8 @@ public: void enableVirtualRow() { enable_virtual_row = true; } + const KeyDescription & getPrimaryKey() const { return storage_snapshot->metadata->primary_key; } + private: /// Sets up range readers corresponding to data readers void initializeRangeReaders(); diff --git a/tests/queries/0_stateless/02346_fulltext_index_search.sql b/tests/queries/0_stateless/02346_fulltext_index_search.sql index 3c172bfdaf7..fb6da10a115 100644 --- a/tests/queries/0_stateless/02346_fulltext_index_search.sql +++ b/tests/queries/0_stateless/02346_fulltext_index_search.sql @@ -195,14 +195,14 @@ INSERT INTO tab VALUES (201, 'rick c01'), (202, 'mick c02'), (203, 'nick c03'); SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab' AND database = currentDatabase() LIMIT 1; -- search full_text index -SELECT * FROM tab WHERE s LIKE '%01%' ORDER BY k; +SELECT * FROM tab WHERE s LIKE '%01%' ORDER BY k SETTINGS optimize_read_in_order = 1; --- check the query only read 3 granules (6 rows total; each granule has 2 rows) +-- check the query only read 3 granules (6 rows total; each granule has 2 rows; there are 2 extra virtual rows) SYSTEM FLUSH LOGS; -SELECT read_rows==6 from system.query_log +SELECT read_rows==8 from system.query_log WHERE query_kind ='Select' AND current_database = currentDatabase() - AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%01%\' ORDER BY k;') + AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%01%\' ORDER BY k SETTINGS optimize_read_in_order = 1;') AND type='QueryFinish' AND result_rows==3 LIMIT 1; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference similarity index 59% rename from tests/queries/0_stateless/03031_read_in_order_optimization.reference rename to tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference index c7cce7e60e9..12c4056ac27 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference @@ -3,18 +3,28 @@ 2 3 16386 +======== 16385 16386 16387 16388 24578 +======== 0 1 2 3 16386 +======== 16385 16386 16387 16388 24578 +======== +1 2 +1 2 +1 3 +1 3 +1 4 +1 4 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql similarity index 83% rename from tests/queries/0_stateless/03031_read_in_order_optimization.sql rename to tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 332ee7f58dc..ddcc1498af9 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -51,6 +51,7 @@ AND type = 'QueryFinish' ORDER BY query_start_time DESC limit 1; +SELECT '========'; -- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192), -- all chunks come from the same part. SELECT k @@ -74,6 +75,7 @@ AND type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1; +SELECT '========'; -- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), -- both chunks come from the same part. SELECT x @@ -96,6 +98,7 @@ AND type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1; +SELECT '========'; -- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192), -- all chunks come from the same part. SELECT k @@ -119,4 +122,19 @@ AND type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1; -DROP TABLE t; \ No newline at end of file +DROP TABLE t; + +SELECT '========'; +-- from 02149_read_in_order_fixed_prefix +DROP TABLE IF EXISTS t_read_in_order; + +CREATE TABLE t_read_in_order(a UInt32, b UInt32) +ENGINE = MergeTree ORDER BY (a, b) +SETTINGS index_granularity = 3; + +SYSTEM STOP MERGES t_read_in_order; + +INSERT INTO t_read_in_order VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); +INSERT INTO t_read_in_order VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); + +SELECT a, b FROM t_read_in_order WHERE a = 1 ORDER BY b SETTINGS max_threads = 1; From 4a0a4c68b2e66c0d4abfef417376d3225965b459 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 18 May 2024 03:33:42 +0000 Subject: [PATCH 014/171] restrict the case of func pk --- src/Processors/QueryPlan/SortingStep.cpp | 24 +++++-- ...er_optimization_with_virtual_row.reference | 4 ++ ...in_order_optimization_with_virtual_row.sql | 64 +++++++++++++------ 3 files changed, 66 insertions(+), 26 deletions(-) diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index 84f90fa782f..addbdd020bb 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -262,12 +262,9 @@ void SortingStep::enableVirtualRow(const QueryPipelineBuilder & pipeline) const } } - /// If everything is okay, we enable virtual row in MergeTreeSelectProcessor + /// If everything is okay, enable virtual row in MergeTreeSelectProcessor. if (enable_virtual_row && merge_tree_sources.size() >= 2) { - /// We have to check further in the case of fixed prefix, for example, - /// primary key ab, query SELECT a, b FROM t WHERE a = 1 ORDER BY b, - /// merge sort would sort based on b, leading to wrong result in comparison. auto extractNameAfterDot = [](const String & name) { size_t pos = name.find_last_of('.'); @@ -278,10 +275,25 @@ void SortingStep::enableVirtualRow(const QueryPipelineBuilder & pipeline) const String column_name = extractNameAfterDot(type_and_name.name); for (const auto & merge_tree_source : merge_tree_sources) { - const auto& merge_tree_select_processor = merge_tree_source->getProcessor(); + const auto & merge_tree_select_processor = merge_tree_source->getProcessor(); + /// Check pk is not func based, as we only check type and name in filling in primary key of virtual row. const auto & primary_key = merge_tree_select_processor->getPrimaryKey(); - if (primary_key.column_names[0] == column_name && primary_key.data_types[0] == type_and_name.type) + const auto & actions = primary_key.expression->getActions(); + bool is_okay = true; + for (const auto & action : actions) + { + if (action.node->type != ActionsDAG::ActionType::INPUT) + { + is_okay = false; + break; + } + } + + /// We have to check further in the case of fixed prefix, for example, + /// primary key ab, query SELECT a, b FROM t WHERE a = 1 ORDER BY b, + /// merge sort would sort based on b, leading to wrong result in comparison. + if (is_okay && primary_key.column_names[0] == column_name && primary_key.data_types[0] == type_and_name.type) merge_tree_select_processor->enableVirtualRow(); } } diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference index 12c4056ac27..b4b1554a7d4 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference @@ -28,3 +28,7 @@ 1 3 1 4 1 4 +======== +1 3 +1 2 +1 1 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index ddcc1498af9..198bf1eb307 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -13,22 +13,22 @@ ORDER BY (x, y, z) SETTINGS index_granularity = 8192, index_granularity_bytes = 10485760; -INSERT INTO t SELECT - number, - number, - number, - number -FROM numbers(8192 * 3); - -INSERT INTO t SELECT - number + (8192 * 3), - number + (8192 * 3), - number + (8192 * 3), - number -FROM numbers(8192 * 3); - SYSTEM STOP MERGES t; +INSERT INTO t SELECT + number, + number, + number, + number +FROM numbers(8192 * 3); + +INSERT INTO t SELECT + number + (8192 * 3), + number + (8192 * 3), + number + (8192 * 3), + number +FROM numbers(8192 * 3); + -- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), -- both chunks come from the same part. SELECT x @@ -126,15 +126,39 @@ DROP TABLE t; SELECT '========'; -- from 02149_read_in_order_fixed_prefix -DROP TABLE IF EXISTS t_read_in_order; +DROP TABLE IF EXISTS fixed_prefix; -CREATE TABLE t_read_in_order(a UInt32, b UInt32) +CREATE TABLE fixed_prefix(a UInt32, b UInt32) ENGINE = MergeTree ORDER BY (a, b) SETTINGS index_granularity = 3; -SYSTEM STOP MERGES t_read_in_order; +SYSTEM STOP MERGES fixed_prefix; -INSERT INTO t_read_in_order VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); -INSERT INTO t_read_in_order VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); +INSERT INTO fixed_prefix VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); +INSERT INTO fixed_prefix VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); -SELECT a, b FROM t_read_in_order WHERE a = 1 ORDER BY b SETTINGS max_threads = 1; +SELECT a, b FROM fixed_prefix WHERE a = 1 ORDER BY b SETTINGS max_threads = 1; + +DROP TABLE fixed_prefix; + +SELECT '========'; +-- currently don't support virtual row in this case +DROP TABLE IF EXISTS function_pk; + +CREATE TABLE function_pk +( + `A` Int64, + `B` Int64 +) +ENGINE = MergeTree ORDER BY (A, -B) +SETTINGS index_granularity = 1; + +SYSTEM STOP MERGES function_pk; + +INSERT INTO function_pk values(1,1); +INSERT INTO function_pk values(1,3); +INSERT INTO function_pk values(1,2); + +SELECT * FROM function_pk ORDER BY (A,-B) ASC limit 3 SETTINGS max_threads = 1; + +DROP TABLE function_pk; From bd05771faac142853dde6b1461d34e6d3d47e89e Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 21 May 2024 04:43:26 +0000 Subject: [PATCH 015/171] temporarily disable a test --- .../03031_read_in_order_optimization_with_virtual_row.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 198bf1eb307..aff9faf3968 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -159,6 +159,8 @@ INSERT INTO function_pk values(1,1); INSERT INTO function_pk values(1,3); INSERT INTO function_pk values(1,2); +-- TODO: handle preliminary merge for this case, temporarily disable it +SET optimize_read_in_order = 0; SELECT * FROM function_pk ORDER BY (A,-B) ASC limit 3 SETTINGS max_threads = 1; DROP TABLE function_pk; From f8b3987d5292ed1e2acfc7cab2b7bfcd80f1aee1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Tue, 25 Jun 2024 03:26:17 +0300 Subject: [PATCH 016/171] Delete attaching prefix for deduplicated parts --- .../MergeTree/ReplicatedMergeTreeSink.cpp | 9 ++- .../__init__.py | 0 .../test.py | 61 +++++++++++++++++++ 3 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 tests/integration/test_deduplicated_attached_part_rename/__init__.py create mode 100644 tests/integration/test_deduplicated_attached_part_rename/test.py diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 4b4f4c33e7d..4190e3cce5e 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -561,8 +561,15 @@ bool ReplicatedMergeTreeSinkImpl::writeExistingPart(MergeTreeData::Mutabl String block_id = deduplicate ? fmt::format("{}_{}", part->info.partition_id, part->checksums.getTotalChecksumHex()) : ""; bool deduplicated = commitPart(zookeeper, part, block_id, replicas_num).second; + int error = 0; /// Set a special error code if the block is duplicate - int error = (deduplicate && deduplicated) ? ErrorCodes::INSERT_WAS_DEDUPLICATED : 0; + /// And remove attaching_ prefix + if (deduplicate && deduplicated) + { + error = ErrorCodes::INSERT_WAS_DEDUPLICATED; + fs::path new_relative_path = fs::path("detached") / part->getNewName(part->info); + part->renameTo(new_relative_path, false); + } PartLog::addNewPart(storage.getContext(), PartLog::PartLogEntry(part, watch.elapsed(), profile_events_scope.getSnapshot()), ExecutionStatus(error)); return deduplicated; } diff --git a/tests/integration/test_deduplicated_attached_part_rename/__init__.py b/tests/integration/test_deduplicated_attached_part_rename/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_deduplicated_attached_part_rename/test.py b/tests/integration/test_deduplicated_attached_part_rename/test.py new file mode 100644 index 00000000000..362b2bad37a --- /dev/null +++ b/tests/integration/test_deduplicated_attached_part_rename/test.py @@ -0,0 +1,61 @@ +import pytest +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +ch1 = cluster.add_instance( + "ch1", + with_zookeeper=True, + macros={"replica": "node1"}, + stay_alive=True, +) + +database_name = "dedup_attach" + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + yield cluster + + finally: + cluster.shutdown() + + +def q(query): + return ch1.query(database=database_name, sql=query) + + +def test_deduplicated_attached_part_renamed_after_attach(started_cluster): + ch1.query(f"CREATE DATABASE {database_name}") + + q("CREATE TABLE dedup (id UInt32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/dedup_attach/dedup/s1', 'r1') ORDER BY id;") + q("INSERT INTO dedup VALUES (1),(2),(3);") + + table_data_path = q("SELECT data_paths FROM system.tables WHERE database=currentDatabase() AND table='dedup'").strip("'[]\n") + + ch1.exec_in_container( + [ + "bash", + "-c", + f"cp -r {table_data_path}/all_0_0_0 {table_data_path}/detached/all_0_0_0", + ] + ) + # Part is attached as all_1_1_0 + q("ALTER TABLE dedup ATTACH PART 'all_0_0_0'") + + assert 2 == int(q(f"SELECT count() FROM system.parts WHERE database='{database_name}' AND table = 'dedup'").strip()) + + ch1.exec_in_container( + [ + "bash", + "-c", + f"cp -r {table_data_path}/all_1_1_0 {table_data_path}/detached/all_1_1_0", + ] + ) + # Part is deduplicated and not attached + q("ALTER TABLE dedup ATTACH PART 'all_1_1_0'") + + assert 2 == int(q(f"SELECT count() FROM system.parts WHERE database='{database_name}' AND table = 'dedup'").strip()) + assert 1 == int(q(f"SELECT count() FROM system.detached_parts WHERE database='{database_name}' AND table = 'dedup'").strip()) + # Check that it is not 'attaching_all_1_1_0' + assert "all_1_1_0" == q(f"SELECT name FROM system.detached_parts WHERE database='{database_name}' AND table = 'dedup'").strip() From 6601ded4a1332548ae4cfe35c7ba8f276214d153 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Wed, 10 Jul 2024 23:02:11 +0300 Subject: [PATCH 017/171] Fix black --- .../test.py | 34 +++++++++++++++---- 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/tests/integration/test_deduplicated_attached_part_rename/test.py b/tests/integration/test_deduplicated_attached_part_rename/test.py index 362b2bad37a..2b7ab0934d1 100644 --- a/tests/integration/test_deduplicated_attached_part_rename/test.py +++ b/tests/integration/test_deduplicated_attached_part_rename/test.py @@ -11,6 +11,7 @@ ch1 = cluster.add_instance( database_name = "dedup_attach" + @pytest.fixture(scope="module") def started_cluster(): try: @@ -28,10 +29,14 @@ def q(query): def test_deduplicated_attached_part_renamed_after_attach(started_cluster): ch1.query(f"CREATE DATABASE {database_name}") - q("CREATE TABLE dedup (id UInt32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/dedup_attach/dedup/s1', 'r1') ORDER BY id;") + q( + "CREATE TABLE dedup (id UInt32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/dedup_attach/dedup/s1', 'r1') ORDER BY id;" + ) q("INSERT INTO dedup VALUES (1),(2),(3);") - table_data_path = q("SELECT data_paths FROM system.tables WHERE database=currentDatabase() AND table='dedup'").strip("'[]\n") + table_data_path = q( + "SELECT data_paths FROM system.tables WHERE database=currentDatabase() AND table='dedup'" + ).strip("'[]\n") ch1.exec_in_container( [ @@ -43,7 +48,11 @@ def test_deduplicated_attached_part_renamed_after_attach(started_cluster): # Part is attached as all_1_1_0 q("ALTER TABLE dedup ATTACH PART 'all_0_0_0'") - assert 2 == int(q(f"SELECT count() FROM system.parts WHERE database='{database_name}' AND table = 'dedup'").strip()) + assert 2 == int( + q( + f"SELECT count() FROM system.parts WHERE database='{database_name}' AND table = 'dedup'" + ).strip() + ) ch1.exec_in_container( [ @@ -55,7 +64,20 @@ def test_deduplicated_attached_part_renamed_after_attach(started_cluster): # Part is deduplicated and not attached q("ALTER TABLE dedup ATTACH PART 'all_1_1_0'") - assert 2 == int(q(f"SELECT count() FROM system.parts WHERE database='{database_name}' AND table = 'dedup'").strip()) - assert 1 == int(q(f"SELECT count() FROM system.detached_parts WHERE database='{database_name}' AND table = 'dedup'").strip()) + assert 2 == int( + q( + f"SELECT count() FROM system.parts WHERE database='{database_name}' AND table = 'dedup'" + ).strip() + ) + assert 1 == int( + q( + f"SELECT count() FROM system.detached_parts WHERE database='{database_name}' AND table = 'dedup'" + ).strip() + ) # Check that it is not 'attaching_all_1_1_0' - assert "all_1_1_0" == q(f"SELECT name FROM system.detached_parts WHERE database='{database_name}' AND table = 'dedup'").strip() + assert ( + "all_1_1_0" + == q( + f"SELECT name FROM system.detached_parts WHERE database='{database_name}' AND table = 'dedup'" + ).strip() + ) From 351ba3ef102979714d546e7575a9f9f54325498a Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 8 Aug 2024 10:07:39 +0200 Subject: [PATCH 018/171] Revert "Revert "Use `Atomic` database by default in `clickhouse-local`"" --- programs/local/LocalServer.cpp | 21 +++++---- src/Databases/DatabaseAtomic.cpp | 24 ++++++++-- src/Databases/DatabaseAtomic.h | 3 ++ src/Databases/DatabaseLazy.cpp | 3 +- src/Databases/DatabaseLazy.h | 2 +- src/Databases/DatabaseOnDisk.cpp | 28 ++++++++--- src/Databases/DatabaseOnDisk.h | 7 ++- src/Databases/DatabaseOrdinary.cpp | 4 +- src/Databases/DatabasesOverlay.cpp | 47 +++++++++++++++++++ src/Databases/DatabasesOverlay.h | 9 ++++ src/Databases/IDatabase.h | 1 + .../MySQL/DatabaseMaterializedMySQL.cpp | 1 + src/Interpreters/StorageID.h | 1 - .../0_stateless/01191_rename_dictionary.sql | 1 + ...ickhouse_local_interactive_table.reference | 4 +- ...2141_clickhouse_local_interactive_table.sh | 4 +- .../03199_atomic_clickhouse_local.reference | 6 +++ .../03199_atomic_clickhouse_local.sh | 24 ++++++++++ 18 files changed, 161 insertions(+), 29 deletions(-) create mode 100644 tests/queries/0_stateless/03199_atomic_clickhouse_local.reference create mode 100755 tests/queries/0_stateless/03199_atomic_clickhouse_local.sh diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 6b0b8fc5b50..0d731ed0e14 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -50,7 +51,6 @@ #include #include #include -#include #include #include #include @@ -216,12 +216,12 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str return system_database; } -static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_) +static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context) { - auto databaseCombiner = std::make_shared(name_, context_); - databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_)); - databaseCombiner->registerNextDatabase(std::make_shared(name_, context_)); - return databaseCombiner; + auto overlay = std::make_shared(name_, context); + overlay->registerNextDatabase(std::make_shared(name_, fs::weakly_canonical(context->getPath()), UUIDHelpers::generateV4(), context)); + overlay->registerNextDatabase(std::make_shared(name_, "", context)); + return overlay; } /// If path is specified and not empty, will try to setup server environment and load existing metadata @@ -367,7 +367,7 @@ std::string LocalServer::getInitialCreateTableQuery() else table_structure = "(" + table_structure + ")"; - return fmt::format("CREATE TABLE {} {} ENGINE = File({}, {});", + return fmt::format("CREATE TEMPORARY TABLE {} {} ENGINE = File({}, {});", table_name, table_structure, data_format, table_file); } @@ -761,7 +761,12 @@ void LocalServer::processConfig() DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase(); std::string default_database = server_settings.default_database; - DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context)); + { + DatabasePtr database = createClickHouseLocalDatabaseOverlay(default_database, global_context); + if (UUID uuid = database->getUUID(); uuid != UUIDHelpers::Nil) + DatabaseCatalog::instance().addUUIDMapping(uuid); + DatabaseCatalog::instance().attachDatabase(default_database, database); + } global_context->setCurrentDatabase(default_database); if (getClientConfiguration().has("path")) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index d86e29ca915..83b82976e4f 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -53,9 +53,6 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, c , db_uuid(uuid) { assert(db_uuid != UUIDHelpers::Nil); - fs::create_directories(fs::path(getContext()->getPath()) / "metadata"); - fs::create_directories(path_to_table_symlinks); - tryCreateMetadataSymlink(); } DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, ContextPtr context_) @@ -63,6 +60,16 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, C { } +void DatabaseAtomic::createDirectories() +{ + if (database_atomic_directories_created.test_and_set()) + return; + DatabaseOnDisk::createDirectories(); + fs::create_directories(fs::path(getContext()->getPath()) / "metadata"); + fs::create_directories(path_to_table_symlinks); + tryCreateMetadataSymlink(); +} + String DatabaseAtomic::getTableDataPath(const String & table_name) const { std::lock_guard lock(mutex); @@ -99,6 +106,7 @@ void DatabaseAtomic::drop(ContextPtr) void DatabaseAtomic::attachTable(ContextPtr /* context_ */, const String & name, const StoragePtr & table, const String & relative_table_path) { assert(relative_table_path != data_path && !relative_table_path.empty()); + createDirectories(); DetachedTables not_in_use; std::lock_guard lock(mutex); not_in_use = cleanupDetachedTables(); @@ -200,11 +208,15 @@ void DatabaseAtomic::renameTable(ContextPtr local_context, const String & table_ if (exchange && !supportsAtomicRename()) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "RENAME EXCHANGE is not supported"); + createDirectories(); waitDatabaseStarted(); auto & other_db = dynamic_cast(to_database); bool inside_database = this == &other_db; + if (!inside_database) + other_db.createDirectories(); + String old_metadata_path = getObjectMetadataPath(table_name); String new_metadata_path = to_database.getObjectMetadataPath(to_table_name); @@ -325,6 +337,7 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora const String & table_metadata_tmp_path, const String & table_metadata_path, ContextPtr query_context) { + createDirectories(); DetachedTables not_in_use; auto table_data_path = getTableDataPath(query); try @@ -461,6 +474,9 @@ void DatabaseAtomic::beforeLoadingMetadata(ContextMutablePtr /*context*/, Loadin if (mode < LoadingStrictnessLevel::FORCE_RESTORE) return; + if (!fs::exists(path_to_table_symlinks)) + return; + /// Recreate symlinks to table data dirs in case of force restore, because some of them may be broken for (const auto & table_path : fs::directory_iterator(path_to_table_symlinks)) { @@ -588,6 +604,7 @@ void DatabaseAtomic::renameDatabase(ContextPtr query_context, const String & new { /// CREATE, ATTACH, DROP, DETACH and RENAME DATABASE must hold DDLGuard + createDirectories(); waitDatabaseStarted(); bool check_ref_deps = query_context->getSettingsRef().check_referential_table_dependencies; @@ -679,4 +696,5 @@ void registerDatabaseAtomic(DatabaseFactory & factory) }; factory.registerDatabase("Atomic", create_fn); } + } diff --git a/src/Databases/DatabaseAtomic.h b/src/Databases/DatabaseAtomic.h index 4a4ccfa2573..ca24494f600 100644 --- a/src/Databases/DatabaseAtomic.h +++ b/src/Databases/DatabaseAtomic.h @@ -76,6 +76,9 @@ protected: using DetachedTables = std::unordered_map; [[nodiscard]] DetachedTables cleanupDetachedTables() TSA_REQUIRES(mutex); + std::atomic_flag database_atomic_directories_created = ATOMIC_FLAG_INIT; + void createDirectories(); + void tryCreateMetadataSymlink(); virtual bool allowMoveTableToOtherDatabaseEngine(IDatabase & /*to_database*/) const { return false; } diff --git a/src/Databases/DatabaseLazy.cpp b/src/Databases/DatabaseLazy.cpp index 3fb6d30fcb8..e43adfc5d37 100644 --- a/src/Databases/DatabaseLazy.cpp +++ b/src/Databases/DatabaseLazy.cpp @@ -47,12 +47,13 @@ DatabaseLazy::DatabaseLazy(const String & name_, const String & metadata_path_, : DatabaseOnDisk(name_, metadata_path_, std::filesystem::path("data") / escapeForFileName(name_) / "", "DatabaseLazy (" + name_ + ")", context_) , expiration_time(expiration_time_) { + createDirectories(); } void DatabaseLazy::loadStoredObjects(ContextMutablePtr local_context, LoadingStrictnessLevel /*mode*/) { - iterateMetadataFiles(local_context, [this, &local_context](const String & file_name) + iterateMetadataFiles([this, &local_context](const String & file_name) { const std::string table_name = unescapeForFileName(file_name.substr(0, file_name.size() - 4)); diff --git a/src/Databases/DatabaseLazy.h b/src/Databases/DatabaseLazy.h index 41cfb751141..aeac130594f 100644 --- a/src/Databases/DatabaseLazy.h +++ b/src/Databases/DatabaseLazy.h @@ -12,7 +12,7 @@ class DatabaseLazyIterator; class Context; /** Lazy engine of databases. - * Works like DatabaseOrdinary, but stores in memory only the cache. + * Works like DatabaseOrdinary, but stores only recently accessed tables in memory. * Can be used only with *Log engines. */ class DatabaseLazy final : public DatabaseOnDisk diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 734f354d9a5..82a81b0b32d 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -172,7 +172,14 @@ DatabaseOnDisk::DatabaseOnDisk( , metadata_path(metadata_path_) , data_path(data_path_) { - fs::create_directories(local_context->getPath() + data_path); +} + + +void DatabaseOnDisk::createDirectories() +{ + if (directories_created.test_and_set()) + return; + fs::create_directories(std::filesystem::path(getContext()->getPath()) / data_path); fs::create_directories(metadata_path); } @@ -190,6 +197,8 @@ void DatabaseOnDisk::createTable( const StoragePtr & table, const ASTPtr & query) { + createDirectories(); + const auto & settings = local_context->getSettingsRef(); const auto & create = query->as(); assert(table_name == create.getTable()); @@ -257,7 +266,6 @@ void DatabaseOnDisk::createTable( } commitCreateTable(create, table, table_metadata_tmp_path, table_metadata_path, local_context); - removeDetachedPermanentlyFlag(local_context, table_name, table_metadata_path, false); } @@ -285,6 +293,8 @@ void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const Stora { try { + createDirectories(); + /// Add a table to the map of known tables. attachTable(query_context, query.getTable(), table, getTableDataPath(query)); @@ -420,6 +430,7 @@ void DatabaseOnDisk::renameTable( throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Moving tables between databases of different engines is not supported"); } + createDirectories(); waitDatabaseStarted(); auto table_data_relative_path = getTableDataPath(table_name); @@ -568,14 +579,14 @@ void DatabaseOnDisk::drop(ContextPtr local_context) assert(TSA_SUPPRESS_WARNING_FOR_READ(tables).empty()); if (local_context->getSettingsRef().force_remove_data_recursively_on_drop) { - (void)fs::remove_all(local_context->getPath() + getDataPath()); + (void)fs::remove_all(std::filesystem::path(getContext()->getPath()) / data_path); (void)fs::remove_all(getMetadataPath()); } else { try { - (void)fs::remove(local_context->getPath() + getDataPath()); + (void)fs::remove(std::filesystem::path(getContext()->getPath()) / data_path); (void)fs::remove(getMetadataPath()); } catch (const fs::filesystem_error & e) @@ -613,15 +624,18 @@ time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_n } } -void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const IteratingFunction & process_metadata_file) const +void DatabaseOnDisk::iterateMetadataFiles(const IteratingFunction & process_metadata_file) const { + if (!fs::exists(metadata_path)) + return; + auto process_tmp_drop_metadata_file = [&](const String & file_name) { assert(getUUID() == UUIDHelpers::Nil); static const char * tmp_drop_ext = ".sql.tmp_drop"; const std::string object_name = file_name.substr(0, file_name.size() - strlen(tmp_drop_ext)); - if (fs::exists(local_context->getPath() + getDataPath() + '/' + object_name)) + if (fs::exists(std::filesystem::path(getContext()->getPath()) / data_path / object_name)) { fs::rename(getMetadataPath() + file_name, getMetadataPath() + object_name + ".sql"); LOG_WARNING(log, "Object {} was not dropped previously and will be restored", backQuote(object_name)); @@ -638,7 +652,7 @@ void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const Iterat std::vector> metadata_files; fs::directory_iterator dir_end; - for (fs::directory_iterator dir_it(getMetadataPath()); dir_it != dir_end; ++dir_it) + for (fs::directory_iterator dir_it(metadata_path); dir_it != dir_end; ++dir_it) { String file_name = dir_it->path().filename(); /// For '.svn', '.gitignore' directory and similar. diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index 12656068643..0c0ecf76a26 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -64,7 +64,7 @@ public: time_t getObjectMetadataModificationTime(const String & object_name) const override; String getDataPath() const override { return data_path; } - String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; } + String getTableDataPath(const String & table_name) const override { return std::filesystem::path(data_path) / escapeForFileName(table_name) / ""; } String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); } String getMetadataPath() const override { return metadata_path; } @@ -83,7 +83,7 @@ protected: using IteratingFunction = std::function; - void iterateMetadataFiles(ContextPtr context, const IteratingFunction & process_metadata_file) const; + void iterateMetadataFiles(const IteratingFunction & process_metadata_file) const; ASTPtr getCreateTableQueryImpl( const String & table_name, @@ -99,6 +99,9 @@ protected: virtual void removeDetachedPermanentlyFlag(ContextPtr context, const String & table_name, const String & table_metadata_path, bool attach); virtual void setDetachedTableNotInUseForce(const UUID & /*uuid*/) {} + std::atomic_flag directories_created = ATOMIC_FLAG_INIT; + void createDirectories(); + const String metadata_path; const String data_path; }; diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 8808261654f..dd8a3f42ea8 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -55,7 +55,7 @@ static constexpr size_t METADATA_FILE_BUFFER_SIZE = 32768; static constexpr const char * const CONVERT_TO_REPLICATED_FLAG_NAME = "convert_to_replicated"; DatabaseOrdinary::DatabaseOrdinary(const String & name_, const String & metadata_path_, ContextPtr context_) - : DatabaseOrdinary(name_, metadata_path_, "data/" + escapeForFileName(name_) + "/", "DatabaseOrdinary (" + name_ + ")", context_) + : DatabaseOrdinary(name_, metadata_path_, std::filesystem::path("data") / escapeForFileName(name_) / "", "DatabaseOrdinary (" + name_ + ")", context_) { } @@ -265,7 +265,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables } }; - iterateMetadataFiles(local_context, process_metadata); + iterateMetadataFiles(process_metadata); size_t objects_in_database = metadata.parsed_tables.size() - prev_tables_count; size_t dictionaries_in_database = metadata.total_dictionaries - prev_total_dictionaries; diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp index 801356b3dd7..495733e15fd 100644 --- a/src/Databases/DatabasesOverlay.cpp +++ b/src/Databases/DatabasesOverlay.cpp @@ -14,6 +14,8 @@ namespace ErrorCodes { extern const int LOGICAL_ERROR; extern const int CANNOT_GET_CREATE_TABLE_QUERY; + extern const int BAD_ARGUMENTS; + extern const int UNKNOWN_TABLE; } DatabasesOverlay::DatabasesOverlay(const String & name_, ContextPtr context_) @@ -124,6 +126,39 @@ StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & tab getEngineName()); } +void DatabasesOverlay::renameTable( + ContextPtr current_context, + const String & name, + IDatabase & to_database, + const String & to_name, + bool exchange, + bool dictionary) +{ + for (auto & db : databases) + { + if (db->isTableExist(name, current_context)) + { + if (DatabasesOverlay * to_overlay_database = typeid_cast(&to_database)) + { + /// Renaming from Overlay database inside itself or into another Overlay database. + /// Just use the first database in the overlay as a destination. + if (to_overlay_database->databases.empty()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "The destination Overlay database {} does not have any members", to_database.getDatabaseName()); + + db->renameTable(current_context, name, *to_overlay_database->databases[0], to_name, exchange, dictionary); + } + else + { + /// Renaming into a different type of database. E.g. from Overlay on top of Atomic database into just Atomic database. + db->renameTable(current_context, name, to_database, to_name, exchange, dictionary); + } + + return; + } + } + throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist", backQuote(getDatabaseName()), backQuote(name)); +} + ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr context_, bool throw_on_error) const { ASTPtr result = nullptr; @@ -178,6 +213,18 @@ String DatabasesOverlay::getTableDataPath(const ASTCreateQuery & query) const return result; } +UUID DatabasesOverlay::getUUID() const +{ + UUID result = UUIDHelpers::Nil; + for (const auto & db : databases) + { + result = db->getUUID(); + if (result != UUIDHelpers::Nil) + break; + } + return result; +} + UUID DatabasesOverlay::tryGetTableUUID(const String & table_name) const { UUID result = UUIDHelpers::Nil; diff --git a/src/Databases/DatabasesOverlay.h b/src/Databases/DatabasesOverlay.h index b0c7e7e4032..40c653e5cb5 100644 --- a/src/Databases/DatabasesOverlay.h +++ b/src/Databases/DatabasesOverlay.h @@ -35,12 +35,21 @@ public: StoragePtr detachTable(ContextPtr context, const String & table_name) override; + void renameTable( + ContextPtr current_context, + const String & name, + IDatabase & to_database, + const String & to_name, + bool exchange, + bool dictionary) override; + ASTPtr getCreateTableQueryImpl(const String & name, ContextPtr context, bool throw_on_error) const override; ASTPtr getCreateDatabaseQuery() const override; String getTableDataPath(const String & table_name) const override; String getTableDataPath(const ASTCreateQuery & query) const override; + UUID getUUID() const override; UUID tryGetTableUUID(const String & table_name) const override; void drop(ContextPtr context) override; diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h index f94326d220e..02418abb2b0 100644 --- a/src/Databases/IDatabase.h +++ b/src/Databases/IDatabase.h @@ -416,6 +416,7 @@ public: std::lock_guard lock{mutex}; return database_name; } + /// Get UUID of database. virtual UUID getUUID() const { return UUIDHelpers::Nil; } diff --git a/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp b/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp index 2f5477a6b9d..8b3850c4e0c 100644 --- a/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp +++ b/src/Databases/MySQL/DatabaseMaterializedMySQL.cpp @@ -46,6 +46,7 @@ DatabaseMaterializedMySQL::DatabaseMaterializedMySQL( , settings(std::move(settings_)) , materialize_thread(context_, database_name_, mysql_database_name_, std::move(pool_), std::move(client_), binlog_client_, settings.get()) { + createDirectories(); } void DatabaseMaterializedMySQL::rethrowExceptionIfNeeded() const diff --git a/src/Interpreters/StorageID.h b/src/Interpreters/StorageID.h index f9afbc7b98d..ad55d16e284 100644 --- a/src/Interpreters/StorageID.h +++ b/src/Interpreters/StorageID.h @@ -27,7 +27,6 @@ class ASTQueryWithTableAndOutput; class ASTTableIdentifier; class Context; -// TODO(ilezhankin): refactor and merge |ASTTableIdentifier| struct StorageID { String database_name; diff --git a/tests/queries/0_stateless/01191_rename_dictionary.sql b/tests/queries/0_stateless/01191_rename_dictionary.sql index c5012dabc81..be95e5a7d4b 100644 --- a/tests/queries/0_stateless/01191_rename_dictionary.sql +++ b/tests/queries/0_stateless/01191_rename_dictionary.sql @@ -27,6 +27,7 @@ RENAME DICTIONARY test_01191.t TO test_01191.dict1; -- {serverError INCORRECT_QU DROP DICTIONARY test_01191.t; -- {serverError INCORRECT_QUERY} DROP TABLE test_01191.t; +DROP DATABASE IF EXISTS dummy_db; CREATE DATABASE dummy_db ENGINE=Atomic; RENAME DICTIONARY test_01191.dict TO dummy_db.dict1; RENAME DICTIONARY dummy_db.dict1 TO test_01191.dict; diff --git a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference index 0bb8966cbe4..0e74c0a083e 100644 --- a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference +++ b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.reference @@ -1,2 +1,2 @@ -CREATE TABLE default.`table`\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') -CREATE TABLE foo.`table`\n(\n `key` String\n)\nENGINE = File(\'TSVWithNamesAndTypes\', \'/dev/null\') +CREATE TEMPORARY TABLE `table`\n(\n `key` String\n)\nENGINE = File(TSVWithNamesAndTypes, \'/dev/null\') +CREATE TEMPORARY TABLE `table`\n(\n `key` String\n)\nENGINE = File(TSVWithNamesAndTypes, \'/dev/null\') diff --git a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh index 934d87616ac..3a95e59416a 100755 --- a/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh +++ b/tests/queries/0_stateless/02141_clickhouse_local_interactive_table.sh @@ -4,5 +4,5 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_LOCAL --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create table table' -$CLICKHOUSE_LOCAL --database foo --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create table table' +$CLICKHOUSE_LOCAL --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create temporary table table' +$CLICKHOUSE_LOCAL --database foo --file /dev/null --structure "key String" --input-format TSVWithNamesAndTypes --interactive --send_logs_level=trace <<<'show create temporary table table' diff --git a/tests/queries/0_stateless/03199_atomic_clickhouse_local.reference b/tests/queries/0_stateless/03199_atomic_clickhouse_local.reference new file mode 100644 index 00000000000..1975397394b --- /dev/null +++ b/tests/queries/0_stateless/03199_atomic_clickhouse_local.reference @@ -0,0 +1,6 @@ +123 +Hello +['Hello','world'] +Hello +Hello +['Hello','world'] diff --git a/tests/queries/0_stateless/03199_atomic_clickhouse_local.sh b/tests/queries/0_stateless/03199_atomic_clickhouse_local.sh new file mode 100755 index 00000000000..edaa83b8f95 --- /dev/null +++ b/tests/queries/0_stateless/03199_atomic_clickhouse_local.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_LOCAL} -n " +CREATE TABLE test (x UInt8) ORDER BY x; +INSERT INTO test VALUES (123); +SELECT * FROM test; +CREATE OR REPLACE TABLE test (s String) ORDER BY s; +INSERT INTO test VALUES ('Hello'); +SELECT * FROM test; +RENAME TABLE test TO test2; +CREATE OR REPLACE TABLE test (s Array(String)) ORDER BY s; +INSERT INTO test VALUES (['Hello', 'world']); +SELECT * FROM test; +SELECT * FROM test2; +EXCHANGE TABLES test AND test2; +SELECT * FROM test; +SELECT * FROM test2; +DROP TABLE test; +DROP TABLE test2; +" From 848285eabc5accf96084f847c86be5e583ab80a0 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 12 Aug 2024 15:40:03 +0000 Subject: [PATCH 019/171] Fix OrderByLimitByDuplicateEliminationPass with IGNORE NULLS --- src/Analyzer/FunctionNode.cpp | 7 +++ src/Analyzer/Passes/FuseFunctionsPass.cpp | 5 +- ...ore_nulls_query_tree_elimination.reference | 3 ++ ...22_ignore_nulls_query_tree_elimination.sql | 51 +++++++++++++++++++ 4 files changed, 63 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.reference create mode 100644 tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.sql diff --git a/src/Analyzer/FunctionNode.cpp b/src/Analyzer/FunctionNode.cpp index e98b04fe9a9..f402309c7be 100644 --- a/src/Analyzer/FunctionNode.cpp +++ b/src/Analyzer/FunctionNode.cpp @@ -88,6 +88,7 @@ void FunctionNode::resolveAsFunction(FunctionBasePtr function_value) function_name = function_value->getName(); function = std::move(function_value); kind = FunctionKind::ORDINARY; + nulls_action = NullsAction::EMPTY; } void FunctionNode::resolveAsAggregateFunction(AggregateFunctionPtr aggregate_function_value) @@ -95,6 +96,12 @@ void FunctionNode::resolveAsAggregateFunction(AggregateFunctionPtr aggregate_fun function_name = aggregate_function_value->getName(); function = std::move(aggregate_function_value); kind = FunctionKind::AGGREGATE; + /** When the function is resolved, we do not need the nulls action anymore. + * The only thing that the nulls action does is map from one function to another. + * Thus, the nulls action is encoded in the function name and does not make sense anymore. + * Keeping the nulls action may lead to incorrect comparison of functions, e.g., count() and count() IGNORE NULLS are the same function. + */ + nulls_action = NullsAction::EMPTY; } void FunctionNode::resolveAsWindowFunction(AggregateFunctionPtr window_function_value) diff --git a/src/Analyzer/Passes/FuseFunctionsPass.cpp b/src/Analyzer/Passes/FuseFunctionsPass.cpp index 0175e304a2b..1009e7981ea 100644 --- a/src/Analyzer/Passes/FuseFunctionsPass.cpp +++ b/src/Analyzer/Passes/FuseFunctionsPass.cpp @@ -81,10 +81,9 @@ QueryTreeNodePtr createResolvedFunction(const ContextPtr & context, const String } FunctionNodePtr createResolvedAggregateFunction( - const String & name, const QueryTreeNodePtr & argument, const Array & parameters = {}, NullsAction action = NullsAction::EMPTY) + const String & name, const QueryTreeNodePtr & argument, const Array & parameters = {}) { auto function_node = std::make_shared(name); - function_node->setNullsAction(action); if (!parameters.empty()) { @@ -96,7 +95,7 @@ FunctionNodePtr createResolvedAggregateFunction( function_node->getArguments().getNodes() = { argument }; AggregateFunctionProperties properties; - auto aggregate_function = AggregateFunctionFactory::instance().get(name, action, {argument->getResultType()}, parameters, properties); + auto aggregate_function = AggregateFunctionFactory::instance().get(name, NullsAction::EMPTY, {argument->getResultType()}, parameters, properties); function_node->resolveAsAggregateFunction(std::move(aggregate_function)); return function_node; diff --git a/tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.reference b/tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.reference new file mode 100644 index 00000000000..1f242fa6f00 --- /dev/null +++ b/tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.reference @@ -0,0 +1,3 @@ +3 +3 +3 diff --git a/tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.sql b/tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.sql new file mode 100644 index 00000000000..72f9781ed45 --- /dev/null +++ b/tests/queries/0_stateless/03222_ignore_nulls_query_tree_elimination.sql @@ -0,0 +1,51 @@ +#!/usr/bin/env -S ${HOME}/clickhouse-client --queries-file + +DROP TABLE IF EXISTS with_fill_date__fuzz_0; + +CREATE TABLE with_fill_date__fuzz_0 +( + `d` Date, + `d32` Nullable(Int32), + `d33` Int32 +) +ENGINE = Memory; + + +INSERT INTO with_fill_date__fuzz_0 VALUES (toDate('2020-03-03'), 1, 3), (toDate('2020-03-03'), NULL, 3), (toDate('2020-02-05'), 1, 1); + + +SELECT count() +FROM with_fill_date__fuzz_0 +ORDER BY + count(), + count() IGNORE NULLS, + max(d) +WITH FILL STEP toIntervalDay(10) +; + + +SELECT count() +FROM with_fill_date__fuzz_0 +ORDER BY + any(d32) RESPECT NULLS, + any_respect_nulls(d32), + max(d) +WITH FILL STEP toIntervalDay(10) +; + + +SELECT count() +FROM with_fill_date__fuzz_0 +ORDER BY + any(d32), + any(d32) IGNORE NULLS, + any(d32) RESPECT NULLS, + any_respect_nulls(d32) IGNORE NULLS, + any_respect_nulls(d32), + sum(d33), + sum(d33) IGNORE NULLS, + max(d) +WITH FILL STEP toIntervalDay(10) +; + + From 0abb330356245b27d929c750101dcfd1925cb6a4 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 13 Aug 2024 09:21:39 +0000 Subject: [PATCH 020/171] fix 03010_sum_to_to_count_if_nullable.reference --- .../0_stateless/03010_sum_to_to_count_if_nullable.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference index 79ebc7a5c0c..db8d26ccfea 100644 --- a/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference +++ b/tests/queries/0_stateless/03010_sum_to_to_count_if_nullable.reference @@ -83,7 +83,7 @@ QUERY id: 0 FUNCTION id: 4, function_name: tuple, function_type: ordinary, result_type: Tuple(Nullable(UInt64)) ARGUMENTS LIST id: 5, nodes: 1 - FUNCTION id: 6, function_name: sum, function_type: aggregate, nulls_action : IGNORE_NULLS, result_type: Nullable(UInt64) + FUNCTION id: 6, function_name: sum, function_type: aggregate, result_type: Nullable(UInt64) ARGUMENTS LIST id: 7, nodes: 1 FUNCTION id: 8, function_name: if, function_type: ordinary, result_type: Nullable(UInt8) From 1ba1efe3a77fc5181d3c8e228c93e5f20a087c86 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 20 Aug 2024 17:01:41 +0000 Subject: [PATCH 021/171] fix --- .../Algorithms/MergeTreePartLevelInfo.h | 29 ------------------- 1 file changed, 29 deletions(-) delete mode 100644 src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h diff --git a/src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h b/src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h deleted file mode 100644 index e4f22deec8d..00000000000 --- a/src/Processors/Merges/Algorithms/MergeTreePartLevelInfo.h +++ /dev/null @@ -1,29 +0,0 @@ -#pragma once - -#include - -namespace DB -{ - -/// To carry part level if chunk is produced by a merge tree source -class MergeTreePartLevelInfo : public ChunkInfoCloneable -{ -public: - MergeTreePartLevelInfo() = delete; - explicit MergeTreePartLevelInfo(ssize_t part_level) - : origin_merge_tree_part_level(part_level) - { } - MergeTreePartLevelInfo(const MergeTreePartLevelInfo & other) = default; - - size_t origin_merge_tree_part_level = 0; -}; - -inline size_t getPartLevelFromChunk(const Chunk & chunk) -{ - const auto part_level_info = chunk.getChunkInfos().get(); - if (part_level_info) - return part_level_info->origin_merge_tree_part_level; - return 0; -} - -} From 57996cc68463d750d31ff26071176b3f8cbfa9ae Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 3 Sep 2024 02:38:48 +0000 Subject: [PATCH 022/171] temp fix --- src/Storages/MergeTree/MergeTreeSelectProcessor.cpp | 6 +++--- tests/queries/0_stateless/02346_fulltext_index_search.sql | 8 ++++---- .../03031_read_in_order_optimization_with_virtual_row.sql | 1 + 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index cc28884df24..4f1df44f68a 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -164,8 +164,8 @@ ChunkAndProgress MergeTreeSelectProcessor::read() } auto chunk = Chunk(ordered_columns, res.row_count); - if (add_part_level) - chunk.getChunkInfos().add(std::make_shared(task->getInfo().data_part->info.level, true)); + chunk.getChunkInfos().add(std::make_shared( + add_part_level ? task->getInfo().data_part->info.level : 0, true)); return ChunkAndProgress{ .chunk = std::move(chunk), @@ -190,7 +190,7 @@ ChunkAndProgress MergeTreeSelectProcessor::read() auto chunk = Chunk(ordered_columns, res.row_count); if (add_part_level) - chunk.getChunkInfos().add(std::make_shared(task->getInfo().data_part->info.level, true)); + chunk.getChunkInfos().add(std::make_shared(task->getInfo().data_part->info.level, false)); return ChunkAndProgress{ .chunk = std::move(chunk), diff --git a/tests/queries/0_stateless/02346_fulltext_index_search.sql b/tests/queries/0_stateless/02346_fulltext_index_search.sql index 179d98a161b..f0505f63124 100644 --- a/tests/queries/0_stateless/02346_fulltext_index_search.sql +++ b/tests/queries/0_stateless/02346_fulltext_index_search.sql @@ -195,14 +195,14 @@ INSERT INTO tab VALUES (201, 'rick c01'), (202, 'mick c02'), (203, 'nick c03'); SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab' AND database = currentDatabase() LIMIT 1; -- search full_text index -SELECT * FROM tab WHERE s LIKE '%01%' ORDER BY k SETTINGS optimize_read_in_order = 1; +SELECT * FROM tab WHERE s LIKE '%01%' ORDER BY k SETTINGS optimize_read_in_order = 0; --- check the query only read 3 granules (6 rows total; each granule has 2 rows; there are 2 extra virtual rows) +-- check the query only read 3 granules (6 rows total; each granule has 2 rows) SYSTEM FLUSH LOGS; -SELECT read_rows==8 from system.query_log +SELECT read_rows==6 from system.query_log WHERE query_kind ='Select' AND current_database = currentDatabase() - AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%01%\' ORDER BY k SETTINGS optimize_read_in_order = 1;') + AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%01%\' ORDER BY k SETTINGS optimize_read_in_order = 0;') AND type='QueryFinish' AND result_rows==3 LIMIT 1; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index aff9faf3968..5bae739bc51 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -108,6 +108,7 @@ ORDER BY x ASC LIMIT 4 SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge +read_in_order_use_buffering = false, --avoid buffer max_threads = 1, optimize_read_in_order = 1, log_comment = 'no preliminary merge, with filter'; From 87c7a8b4fbfbea4e9b02ae6494b5baad7dd30b42 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 4 Sep 2024 23:08:02 +0000 Subject: [PATCH 023/171] virtualrow sketch --- .../QueryPlan/ReadFromMergeTree.cpp | 3 + src/Processors/QueryPlan/SortingStep.cpp | 3 +- .../Transforms/VirtualRowTransform.cpp | 99 +++++++++++++++++++ .../Transforms/VirtualRowTransform.h | 28 ++++++ 4 files changed, 132 insertions(+), 1 deletion(-) create mode 100644 src/Processors/Transforms/VirtualRowTransform.cpp create mode 100644 src/Processors/Transforms/VirtualRowTransform.h diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index fd1f09f1df8..90e499d02f7 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -635,6 +636,8 @@ Pipe ReadFromMergeTree::readInOrder( }); } + pipe.addSimpleTransform([](const Block & header){ return std::make_shared(header); }); + return pipe; } diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index f1ee68d64cf..aa909bef8a9 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -259,7 +260,7 @@ void SortingStep::enableVirtualRow(const QueryPipelineBuilder & pipeline) const { merge_tree_sources.push_back(merge_tree_source); } - else if (!std::dynamic_pointer_cast(processor)) + else if (!std::dynamic_pointer_cast(processor) && !std::dynamic_pointer_cast(processor)) { enable_virtual_row = false; break; diff --git a/src/Processors/Transforms/VirtualRowTransform.cpp b/src/Processors/Transforms/VirtualRowTransform.cpp new file mode 100644 index 00000000000..2e486616e8e --- /dev/null +++ b/src/Processors/Transforms/VirtualRowTransform.cpp @@ -0,0 +1,99 @@ +#include +#include "Processors/Chunk.h" + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +VirtualRowTransform::VirtualRowTransform(const Block & header) + : IInflatingTransform(header, header) +{ +} + +IInflatingTransform::Status VirtualRowTransform::prepare() +{ + /// Check can output. + + if (output.isFinished()) + { + input.close(); + return Status::Finished; + } + + if (!output.canPush()) + { + input.setNotNeeded(); + return Status::PortFull; + } + + /// Output if has data. + if (generated) + { + output.push(std::move(current_chunk)); + generated = false; + return Status::PortFull; + } + + if (can_generate) + return Status::Ready; + + /// Check can input. + if (!has_input) + { + if (input.isFinished()) + { + if (is_finished) + { + output.finish(); + return Status::Finished; + } + is_finished = true; + return Status::Ready; + } + + input.setNeeded(); + + if (!input.hasData()) + return Status::NeedData; + + /// Set input port NotNeeded after chunk was pulled. + current_chunk = input.pull(true); + has_input = true; + } + + /// Now transform. + return Status::Ready; +} + +void VirtualRowTransform::consume(Chunk chunk) +{ + if (!is_first) + { + temp_chunk = std::move(chunk); + return; + } + + is_first = false; + temp_chunk = std::move(chunk); +} + +Chunk VirtualRowTransform::generate() +{ + if (temp_chunk.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't generate chunk in VirtualRowTransform"); + + Chunk result; + result.swap(temp_chunk); + return result; +} + +bool VirtualRowTransform::canGenerate() +{ + return !temp_chunk.empty(); +} + +} diff --git a/src/Processors/Transforms/VirtualRowTransform.h b/src/Processors/Transforms/VirtualRowTransform.h new file mode 100644 index 00000000000..d054c798345 --- /dev/null +++ b/src/Processors/Transforms/VirtualRowTransform.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include + +namespace DB +{ + +class VirtualRowTransform : public IInflatingTransform +{ +public: + explicit VirtualRowTransform(const Block & header); + + String getName() const override { return "VirtualRowTransform"; } + + Status prepare() override; + +protected: + void consume(Chunk chunk) override; + bool canGenerate() override; + Chunk generate() override; + +private: + bool is_first = false; + Chunk temp_chunk; +}; + +} From 67ad7b592ce5152496bc8ddc5f3dce3cb7e9d571 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 6 Sep 2024 04:12:03 +0000 Subject: [PATCH 024/171] better --- .../Transforms/VirtualRowTransform.cpp | 41 ++++++++++++------- .../Transforms/VirtualRowTransform.h | 19 ++++++--- 2 files changed, 40 insertions(+), 20 deletions(-) diff --git a/src/Processors/Transforms/VirtualRowTransform.cpp b/src/Processors/Transforms/VirtualRowTransform.cpp index 2e486616e8e..e79ede2abec 100644 --- a/src/Processors/Transforms/VirtualRowTransform.cpp +++ b/src/Processors/Transforms/VirtualRowTransform.cpp @@ -10,11 +10,12 @@ namespace ErrorCodes } VirtualRowTransform::VirtualRowTransform(const Block & header) - : IInflatingTransform(header, header) + : IProcessor({header}, {header}) + , input(inputs.front()), output(outputs.front()) { } -IInflatingTransform::Status VirtualRowTransform::prepare() +VirtualRowTransform::Status VirtualRowTransform::prepare() { /// Check can output. @@ -46,13 +47,8 @@ IInflatingTransform::Status VirtualRowTransform::prepare() { if (input.isFinished()) { - if (is_finished) - { - output.finish(); - return Status::Finished; - } - is_finished = true; - return Status::Ready; + output.finish(); + return Status::Finished; } input.setNeeded(); @@ -69,6 +65,28 @@ IInflatingTransform::Status VirtualRowTransform::prepare() return Status::Ready; } +void VirtualRowTransform::work() +{ + if (can_generate) + { + if (generated) + throw Exception(ErrorCodes::LOGICAL_ERROR, "VirtualRowTransform cannot consume chunk because it already was generated"); + + current_chunk = generate(); + generated = true; + can_generate = false; + } + else + { + if (!has_input) + throw Exception(ErrorCodes::LOGICAL_ERROR, "VirtualRowTransform cannot consume chunk because it wasn't read"); + + consume(std::move(current_chunk)); + has_input = false; + can_generate = true; + } +} + void VirtualRowTransform::consume(Chunk chunk) { if (!is_first) @@ -91,9 +109,4 @@ Chunk VirtualRowTransform::generate() return result; } -bool VirtualRowTransform::canGenerate() -{ - return !temp_chunk.empty(); -} - } diff --git a/src/Processors/Transforms/VirtualRowTransform.h b/src/Processors/Transforms/VirtualRowTransform.h index d054c798345..7f6be5d792e 100644 --- a/src/Processors/Transforms/VirtualRowTransform.h +++ b/src/Processors/Transforms/VirtualRowTransform.h @@ -6,7 +6,7 @@ namespace DB { -class VirtualRowTransform : public IInflatingTransform +class VirtualRowTransform : public IProcessor { public: explicit VirtualRowTransform(const Block & header); @@ -14,13 +14,20 @@ public: String getName() const override { return "VirtualRowTransform"; } Status prepare() override; - -protected: - void consume(Chunk chunk) override; - bool canGenerate() override; - Chunk generate() override; + void work() override; private: + void consume(Chunk chunk); + Chunk generate(); + + InputPort & input; + OutputPort & output; + + Chunk current_chunk; + bool has_input = false; + bool generated = false; + bool can_generate = false; + bool is_first = false; Chunk temp_chunk; }; From 384617cfdf26539d5478120caa25f7e57a28d6b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Fri, 6 Sep 2024 18:12:16 +0300 Subject: [PATCH 025/171] Check for unexpected relative path --- src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index cf5537452f3..68aa370959c 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -582,6 +582,8 @@ bool ReplicatedMergeTreeSinkImpl::writeExistingPart(MergeTreeData::Mutabl if (deduplicate && deduplicated) { error = ErrorCodes::INSERT_WAS_DEDUPLICATED; + if (!startsWith(part->getDataPartStorage().getRelativePath(), "detached/attaching_")) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected relative path for a part: {}", part->getDataPartStorage().getRelativePath()); fs::path new_relative_path = fs::path("detached") / part->getNewName(part->info); part->renameTo(new_relative_path, false); } From 35e263a4205afa405a5f819fdf91e102ad0cd088 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Fri, 6 Sep 2024 18:12:44 +0300 Subject: [PATCH 026/171] Cleanup for flaky tests --- .../test_deduplicated_attached_part_rename/test.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/test_deduplicated_attached_part_rename/test.py b/tests/integration/test_deduplicated_attached_part_rename/test.py index 2b7ab0934d1..7afd85c62dc 100644 --- a/tests/integration/test_deduplicated_attached_part_rename/test.py +++ b/tests/integration/test_deduplicated_attached_part_rename/test.py @@ -81,3 +81,7 @@ def test_deduplicated_attached_part_renamed_after_attach(started_cluster): f"SELECT name FROM system.detached_parts WHERE database='{database_name}' AND table = 'dedup'" ).strip() ) + + q("DROP TABLE dedup") + q("SYSTEM DROP REPLICA 'r1' FROM ZKPATH '/clickhouse/tables/dedup_attach/dedup/s1'") + ch1.query(f"DROP DATABASE {database_name}") From 36f62334c40610ad062a41ef3edbb8ecd535afff Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sun, 8 Sep 2024 00:31:02 +0000 Subject: [PATCH 027/171] move logic to virtualrow transform --- .../QueryPlan/ReadFromMergeTree.cpp | 20 ++++-- .../Transforms/VirtualRowTransform.cpp | 68 +++++++++++-------- .../Transforms/VirtualRowTransform.h | 25 ++++--- .../MergeTree/MergeTreeSelectProcessor.cpp | 60 +++------------- .../MergeTree/MergeTreeSelectProcessor.h | 8 --- ...1_mergetree_read_in_order_spread.reference | 7 +- ...er_optimization_with_virtual_row.reference | 16 +---- ...in_order_optimization_with_virtual_row.sql | 58 ++-------------- 8 files changed, 90 insertions(+), 172 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 90e499d02f7..264d4cd095d 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -615,15 +615,25 @@ Pipe ReadFromMergeTree::readInOrder( actions_settings, block_size, reader_settings); processor->addPartLevelToChunk(isQueryWithFinal()); - processor->addVirtualRowToChunk(part_with_ranges.data_part->getIndex(), part_with_ranges.ranges.front().begin); - if (need_virtual_row) - processor->enableVirtualRow(); auto source = std::make_shared(std::move(processor), data.getLogName()); if (set_total_rows_approx) source->addTotalRowsApprox(total_rows); - pipes.emplace_back(std::move(source)); + Pipe pipe(source); + + if (need_virtual_row) + { + pipe.addSimpleTransform([&](const Block & header) + { + return std::make_shared(header, + storage_snapshot->metadata->primary_key, + part_with_ranges.data_part->getIndex(), + part_with_ranges.ranges.front().begin); + }); + } + + pipes.emplace_back(std::move(pipe)); } auto pipe = Pipe::unitePipes(std::move(pipes)); @@ -636,8 +646,6 @@ Pipe ReadFromMergeTree::readInOrder( }); } - pipe.addSimpleTransform([](const Block & header){ return std::make_shared(header); }); - return pipe; } diff --git a/src/Processors/Transforms/VirtualRowTransform.cpp b/src/Processors/Transforms/VirtualRowTransform.cpp index e79ede2abec..55b442cefb6 100644 --- a/src/Processors/Transforms/VirtualRowTransform.cpp +++ b/src/Processors/Transforms/VirtualRowTransform.cpp @@ -1,5 +1,5 @@ #include -#include "Processors/Chunk.h" +#include namespace DB { @@ -9,9 +9,14 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -VirtualRowTransform::VirtualRowTransform(const Block & header) - : IProcessor({header}, {header}) +VirtualRowTransform::VirtualRowTransform(const Block & header_, + const KeyDescription & primary_key_, + const IMergeTreeDataPart::Index & index_, + size_t mark_range_begin_) + : IProcessor({header_}, {header_}) , input(inputs.front()), output(outputs.front()) + , header(header_), primary_key(primary_key_) + , index(index_), mark_range_begin(mark_range_begin_) { } @@ -72,41 +77,50 @@ void VirtualRowTransform::work() if (generated) throw Exception(ErrorCodes::LOGICAL_ERROR, "VirtualRowTransform cannot consume chunk because it already was generated"); - current_chunk = generate(); generated = true; can_generate = false; + + if (!is_first) + { + if (current_chunk.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't generate chunk in VirtualRowTransform"); + return; + } + + is_first = false; + + /// Reorder the columns according to result_header + Columns ordered_columns; + ordered_columns.reserve(header.columns()); + for (size_t i = 0, j = 0; i < header.columns(); ++i) + { + const ColumnWithTypeAndName & type_and_name = header.getByPosition(i); + ColumnPtr current_column = type_and_name.type->createColumn(); + // ordered_columns.push_back(current_column->cloneResized(1)); + + if (j < index->size() && type_and_name.name == primary_key.column_names[j] + && type_and_name.type == primary_key.data_types[j]) + { + auto column = current_column->cloneEmpty(); + column->insert((*(*index)[j])[mark_range_begin]); + ordered_columns.push_back(std::move(column)); + ++j; + } + else + ordered_columns.push_back(current_column->cloneResized(1)); + } + + current_chunk.setColumns(ordered_columns, 1); + current_chunk.getChunkInfos().add(std::make_shared(0, true)); } else { if (!has_input) throw Exception(ErrorCodes::LOGICAL_ERROR, "VirtualRowTransform cannot consume chunk because it wasn't read"); - consume(std::move(current_chunk)); has_input = false; can_generate = true; } } -void VirtualRowTransform::consume(Chunk chunk) -{ - if (!is_first) - { - temp_chunk = std::move(chunk); - return; - } - - is_first = false; - temp_chunk = std::move(chunk); -} - -Chunk VirtualRowTransform::generate() -{ - if (temp_chunk.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't generate chunk in VirtualRowTransform"); - - Chunk result; - result.swap(temp_chunk); - return result; -} - } diff --git a/src/Processors/Transforms/VirtualRowTransform.h b/src/Processors/Transforms/VirtualRowTransform.h index 7f6be5d792e..b9f0cb46242 100644 --- a/src/Processors/Transforms/VirtualRowTransform.h +++ b/src/Processors/Transforms/VirtualRowTransform.h @@ -1,15 +1,20 @@ #pragma once -#include -#include +#include +#include +#include namespace DB { +/// Virtual row is useful for read-in-order optimization when multiple parts exist. class VirtualRowTransform : public IProcessor { public: - explicit VirtualRowTransform(const Block & header); + explicit VirtualRowTransform(const Block & header_, + const KeyDescription & primary_key_, + const IMergeTreeDataPart::Index & index_, + size_t mark_range_begin_); String getName() const override { return "VirtualRowTransform"; } @@ -17,19 +22,21 @@ public: void work() override; private: - void consume(Chunk chunk); - Chunk generate(); - InputPort & input; OutputPort & output; Chunk current_chunk; bool has_input = false; bool generated = false; - bool can_generate = false; + bool can_generate = true; + bool is_first = true; - bool is_first = false; - Chunk temp_chunk; + Block header; + KeyDescription primary_key; + /// PK index used in virtual row. + IMergeTreeDataPart::Index index; + /// The first range that might contain the candidate. + size_t mark_range_begin; }; } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 4f1df44f68a..ca368a94bd4 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -134,38 +134,22 @@ ChunkAndProgress MergeTreeSelectProcessor::read() if (!task->getMainRangeReader().isInitialized()) initializeRangeReaders(); - if (enable_virtual_row) + auto res = algorithm->readFromTask(*task, block_size_params); + + if (res.row_count) { - /// Turn on virtual row just once. - enable_virtual_row = false; - - const auto & primary_key = getPrimaryKey(); - - MergeTreeReadTask::BlockAndProgress res; - res.row_count = 1; - /// Reorder the columns according to result_header Columns ordered_columns; ordered_columns.reserve(result_header.columns()); - for (size_t i = 0, j = 0; i < result_header.columns(); ++i) + for (size_t i = 0; i < result_header.columns(); ++i) { - const ColumnWithTypeAndName & type_and_name = result_header.getByPosition(i); - ColumnPtr current_column = type_and_name.type->createColumn(); - - if (j < index->size() && type_and_name.name == primary_key.column_names[j] && type_and_name.type == primary_key.data_types[j]) - { - auto column = current_column->cloneEmpty(); - column->insert((*(*index)[j])[mark_range_begin]); - ordered_columns.push_back(std::move(column)); - ++j; - } - else - ordered_columns.push_back(current_column->cloneResized(1)); + auto name = result_header.getByPosition(i).name; + ordered_columns.push_back(res.block.getByName(name).column); } auto chunk = Chunk(ordered_columns, res.row_count); - chunk.getChunkInfos().add(std::make_shared( - add_part_level ? task->getInfo().data_part->info.level : 0, true)); + if (add_part_level) + chunk.getChunkInfos().add(std::make_shared(task->getInfo().data_part->info.level, false)); return ChunkAndProgress{ .chunk = std::move(chunk), @@ -175,33 +159,7 @@ ChunkAndProgress MergeTreeSelectProcessor::read() } else { - auto res = algorithm->readFromTask(*task, block_size_params); - - if (res.row_count) - { - /// Reorder the columns according to result_header - Columns ordered_columns; - ordered_columns.reserve(result_header.columns()); - for (size_t i = 0; i < result_header.columns(); ++i) - { - auto name = result_header.getByPosition(i).name; - ordered_columns.push_back(res.block.getByName(name).column); - } - - auto chunk = Chunk(ordered_columns, res.row_count); - if (add_part_level) - chunk.getChunkInfos().add(std::make_shared(task->getInfo().data_part->info.level, false)); - - return ChunkAndProgress{ - .chunk = std::move(chunk), - .num_read_rows = res.num_read_rows, - .num_read_bytes = res.num_read_bytes, - .is_finished = false}; - } - else - { - return {Chunk(), res.num_read_rows, res.num_read_bytes, false}; - } + return {Chunk(), res.num_read_rows, res.num_read_bytes, false}; } } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index d790d1e266f..6dcb6ca73d2 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -60,12 +60,6 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } - void addVirtualRowToChunk(const IMergeTreeDataPart::Index & index_, size_t mark_range_begin_) - { - index = index_; - mark_range_begin = mark_range_begin_; - } - void enableVirtualRow() { enable_virtual_row = true; } const KeyDescription & getPrimaryKey() const { return storage_snapshot->metadata->primary_key; } @@ -100,8 +94,6 @@ private: bool enable_virtual_row = false; /// PK index used in virtual row. IMergeTreeDataPart::Index index; - /// The first range that might contain the candidate, used in virtual row. - size_t mark_range_begin; LoggerPtr log = getLogger("MergeTreeSelectProcessor"); std::atomic is_cancelled{false}; diff --git a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference index 443f6d3ae93..44e61566deb 100644 --- a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference +++ b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference @@ -12,6 +12,7 @@ ExpressionTransform × 3 MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - ExpressionTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + ExpressionTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 \ No newline at end of file diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference index b4b1554a7d4..3c3a9cf532e 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference @@ -2,25 +2,13 @@ 1 2 3 -16386 +16384 ======== 16385 16386 16387 16388 -24578 -======== -0 -1 -2 -3 -16386 -======== -16385 -16386 -16387 -16388 -24578 +24576 ======== 1 2 1 2 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 5bae739bc51..688e427d19d 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -39,14 +39,14 @@ SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 0, --force preliminary merge max_threads = 1, optimize_read_in_order = 1, -log_comment = 'preliminary merge, no filter'; +log_comment = 'no filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND log_comment = 'preliminary merge, no filter' +AND log_comment = 'no filter' AND type = 'QueryFinish' ORDER BY query_start_time DESC limit 1; @@ -63,68 +63,18 @@ SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 0, --force preliminary merge max_threads = 1, optimize_read_in_order = 1, -log_comment = 'preliminary merge with filter'; +log_comment = 'with filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND log_comment = 'preliminary merge with filter' +AND log_comment = 'with filter' AND type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1; -SELECT '========'; --- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), --- both chunks come from the same part. -SELECT x -FROM t -ORDER BY x ASC -LIMIT 4 -SETTINGS max_block_size = 8192, -read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge -max_threads = 1, -optimize_read_in_order = 1, -log_comment = 'no preliminary merge, no filter'; - -SYSTEM FLUSH LOGS; - -SELECT read_rows -FROM system.query_log -WHERE current_database = currentDatabase() -AND log_comment = 'no preliminary merge, no filter' -AND type = 'QueryFinish' -ORDER BY query_start_time DESC -LIMIT 1; - -SELECT '========'; --- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192), --- all chunks come from the same part. -SELECT k -FROM t -WHERE k > 8192 * 2 -ORDER BY x ASC -LIMIT 4 -SETTINGS max_block_size = 8192, -read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge -read_in_order_use_buffering = false, --avoid buffer -max_threads = 1, -optimize_read_in_order = 1, -log_comment = 'no preliminary merge, with filter'; - -SYSTEM FLUSH LOGS; - -SELECT read_rows -FROM system.query_log -WHERE current_database = currentDatabase() -AND log_comment = 'no preliminary merge, with filter' -AND type = 'QueryFinish' -ORDER BY query_start_time DESC -LIMIT 1; - -DROP TABLE t; - SELECT '========'; -- from 02149_read_in_order_fixed_prefix DROP TABLE IF EXISTS fixed_prefix; From 503e7490d439e2f0969ef7b09cc2134af154fa1a Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sun, 8 Sep 2024 00:55:10 +0000 Subject: [PATCH 028/171] tidy --- src/Processors/Transforms/VirtualRowTransform.cpp | 2 +- src/Storages/MergeTree/MergeTreeReadTask.cpp | 6 ------ src/Storages/MergeTree/MergeTreeReadTask.h | 3 --- tests/queries/0_stateless/02346_fulltext_index_search.sql | 4 ++-- 4 files changed, 3 insertions(+), 12 deletions(-) diff --git a/src/Processors/Transforms/VirtualRowTransform.cpp b/src/Processors/Transforms/VirtualRowTransform.cpp index 55b442cefb6..9b904fc4ae2 100644 --- a/src/Processors/Transforms/VirtualRowTransform.cpp +++ b/src/Processors/Transforms/VirtualRowTransform.cpp @@ -98,7 +98,7 @@ void VirtualRowTransform::work() ColumnPtr current_column = type_and_name.type->createColumn(); // ordered_columns.push_back(current_column->cloneResized(1)); - if (j < index->size() && type_and_name.name == primary_key.column_names[j] + if (j < index->size() && type_and_name.name == primary_key.column_names[j] && type_and_name.type == primary_key.data_types[j]) { auto column = current_column->cloneEmpty(); diff --git a/src/Storages/MergeTree/MergeTreeReadTask.cpp b/src/Storages/MergeTree/MergeTreeReadTask.cpp index 491aa26343d..177a325ea5a 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.cpp +++ b/src/Storages/MergeTree/MergeTreeReadTask.cpp @@ -161,12 +161,6 @@ MergeTreeReadTask::BlockAndProgress MergeTreeReadTask::read(const BlockSizeParam auto read_result = range_readers.main.read(rows_to_read, mark_ranges); - if (add_virtual_row) - { - /// Now we have the virtual row, which is at most once for each part. - add_virtual_row = false; - } - /// All rows were filtered. Repeat. if (read_result.num_rows == 0) read_result.columns.clear(); diff --git a/src/Storages/MergeTree/MergeTreeReadTask.h b/src/Storages/MergeTree/MergeTreeReadTask.h index a44d4e4fabd..e90a07e0b55 100644 --- a/src/Storages/MergeTree/MergeTreeReadTask.h +++ b/src/Storages/MergeTree/MergeTreeReadTask.h @@ -162,9 +162,6 @@ private: /// Used to satistfy preferred_block_size_bytes limitation MergeTreeBlockSizePredictorPtr size_predictor; - - /// If true, add once, and then set false. - bool add_virtual_row = false; }; using MergeTreeReadTaskPtr = std::unique_ptr; diff --git a/tests/queries/0_stateless/02346_fulltext_index_search.sql b/tests/queries/0_stateless/02346_fulltext_index_search.sql index f0505f63124..80f49790201 100644 --- a/tests/queries/0_stateless/02346_fulltext_index_search.sql +++ b/tests/queries/0_stateless/02346_fulltext_index_search.sql @@ -195,14 +195,14 @@ INSERT INTO tab VALUES (201, 'rick c01'), (202, 'mick c02'), (203, 'nick c03'); SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab' AND database = currentDatabase() LIMIT 1; -- search full_text index -SELECT * FROM tab WHERE s LIKE '%01%' ORDER BY k SETTINGS optimize_read_in_order = 0; +SELECT * FROM tab WHERE s LIKE '%01%' ORDER BY k; -- check the query only read 3 granules (6 rows total; each granule has 2 rows) SYSTEM FLUSH LOGS; SELECT read_rows==6 from system.query_log WHERE query_kind ='Select' AND current_database = currentDatabase() - AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%01%\' ORDER BY k SETTINGS optimize_read_in_order = 0;') + AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%01%\' ORDER BY k;') AND type='QueryFinish' AND result_rows==3 LIMIT 1; From b232205b4407e185b3a17bc261c9fd977d0c0e11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Sun, 8 Sep 2024 22:22:06 +0300 Subject: [PATCH 029/171] Fix unexpected part path check --- src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 68aa370959c..fb2bc2fada7 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -582,7 +582,7 @@ bool ReplicatedMergeTreeSinkImpl::writeExistingPart(MergeTreeData::Mutabl if (deduplicate && deduplicated) { error = ErrorCodes::INSERT_WAS_DEDUPLICATED; - if (!startsWith(part->getDataPartStorage().getRelativePath(), "detached/attaching_")) + if (!endsWith(part->getDataPartStorage().getRelativePath(), "detached/attaching_" + part->name + "/")) throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected relative path for a part: {}", part->getDataPartStorage().getRelativePath()); fs::path new_relative_path = fs::path("detached") / part->getNewName(part->info); part->renameTo(new_relative_path, false); From 26e74bc9eec77da69c727fa2946041257bc877ce Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 9 Sep 2024 14:29:41 +0000 Subject: [PATCH 030/171] move virtual row flag to class member --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 11 +++++------ src/Processors/QueryPlan/ReadFromMergeTree.h | 4 +++- src/Storages/MergeTree/MergeTreeSequentialSource.cpp | 1 - .../01551_mergetree_read_in_order_spread.reference | 10 ++++++---- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 264d4cd095d..599a33f1777 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -513,8 +513,7 @@ Pipe ReadFromMergeTree::readInOrder( Names required_columns, PoolSettings pool_settings, ReadType read_type, - UInt64 read_limit, - bool need_virtual_row) + UInt64 read_limit) { /// For reading in order it makes sense to read only /// one range per task to reduce number of read rows. @@ -622,7 +621,7 @@ Pipe ReadFromMergeTree::readInOrder( Pipe pipe(source); - if (need_virtual_row) + if (enable_virtual_row) { pipe.addSimpleTransform([&](const Block & header) { @@ -1061,10 +1060,10 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( for (auto && item : splitted_parts_and_ranges) { - /// need_virtual_row = true means a MergingSortedTransform should occur. + /// enable_virtual_row = true means a MergingSortedTransform should occur. /// If so, adding a virtual row might speedup in the case of multiple parts. - bool need_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; - pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, need_virtual_row)); + enable_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; + pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit)); } } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 20c9cfafc7e..7a0b22d87c4 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -239,7 +239,7 @@ private: Pipe read(RangesInDataParts parts_with_range, Names required_columns, ReadType read_type, size_t max_streams, size_t min_marks_for_concurrent_read, bool use_uncompressed_cache); Pipe readFromPool(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); Pipe readFromPoolParallelReplicas(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); - Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit, bool need_virtual_row = false); + Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit); Pipe spreadMarkRanges(RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, std::optional & result_projection); @@ -269,6 +269,8 @@ private: std::optional read_task_callback; bool enable_vertical_final = false; bool enable_remove_parts_from_snapshot_optimization = true; + + bool enable_virtual_row = false; }; } diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index edeac12a1df..e799dc0b20e 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -14,7 +14,6 @@ #include #include #include - #include #include diff --git a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference index 44e61566deb..e83c2e906d1 100644 --- a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference +++ b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference @@ -12,7 +12,9 @@ ExpressionTransform × 3 MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - ExpressionTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 \ No newline at end of file + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + ExpressionTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 From 4a67c68d0bc6ef337a011c044ac56899265f3b0e Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 10 Sep 2024 01:31:01 +0000 Subject: [PATCH 031/171] only focus on the direct mergesort case --- .../QueryPlan/ReadFromMergeTree.cpp | 6 +- src/Processors/QueryPlan/SortingStep.cpp | 64 ---------- src/Processors/QueryPlan/SortingStep.h | 2 - src/QueryPipeline/QueryPipelineBuilder.h | 2 - .../MergeTree/MergeTreeSelectProcessor.cpp | 2 - .../MergeTree/MergeTreeSelectProcessor.h | 12 -- src/Storages/MergeTree/MergeTreeSource.h | 2 - .../02521_aggregation_by_partitions.reference | 112 +++++++++++++----- 8 files changed, 83 insertions(+), 119 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 599a33f1777..a5c7af01d55 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -392,7 +392,7 @@ Pipe ReadFromMergeTree::readFromPoolParallelReplicas( auto algorithm = std::make_unique(i); auto processor = std::make_unique( - pool, std::move(algorithm), storage_snapshot, prewhere_info, + pool, std::move(algorithm), prewhere_info, actions_settings, block_size_copy, reader_settings); auto source = std::make_shared(std::move(processor), data.getLogName()); @@ -491,7 +491,7 @@ Pipe ReadFromMergeTree::readFromPool( auto algorithm = std::make_unique(i); auto processor = std::make_unique( - pool, std::move(algorithm), storage_snapshot, prewhere_info, + pool, std::move(algorithm), prewhere_info, actions_settings, block_size_copy, reader_settings); auto source = std::make_shared(std::move(processor), data.getLogName()); @@ -610,7 +610,7 @@ Pipe ReadFromMergeTree::readInOrder( algorithm = std::make_unique(i); auto processor = std::make_unique( - pool, std::move(algorithm), storage_snapshot, prewhere_info, + pool, std::move(algorithm), prewhere_info, actions_settings, block_size, reader_settings); processor->addPartLevelToChunk(isQueryWithFinal()); diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index aa909bef8a9..48fad9f5fdb 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -14,9 +13,6 @@ #include #include -#include -#include -#include #include @@ -247,69 +243,11 @@ void SortingStep::finishSorting( }); } -void SortingStep::enableVirtualRow(const QueryPipelineBuilder & pipeline) const -{ - /// We check every step of this pipeline, to make sure virtual row can work correctly. - /// Currently ExpressionTransform is supported, should add other processors if possible. - const auto& pipe = pipeline.getPipe(); - bool enable_virtual_row = true; - std::vector> merge_tree_sources; - for (const auto & processor : pipe.getProcessors()) - { - if (auto merge_tree_source = std::dynamic_pointer_cast(processor)) - { - merge_tree_sources.push_back(merge_tree_source); - } - else if (!std::dynamic_pointer_cast(processor) && !std::dynamic_pointer_cast(processor)) - { - enable_virtual_row = false; - break; - } - } - - /// If everything is okay, enable virtual row in MergeTreeSelectProcessor. - if (enable_virtual_row && merge_tree_sources.size() >= 2) - { - auto extractNameAfterDot = [](const String & name) - { - size_t pos = name.find_last_of('.'); - return (pos != String::npos) ? name.substr(pos + 1) : name; - }; - - const ColumnWithTypeAndName & type_and_name = pipeline.getHeader().getByPosition(0); - String column_name = extractNameAfterDot(type_and_name.name); - for (const auto & merge_tree_source : merge_tree_sources) - { - const auto & merge_tree_select_processor = merge_tree_source->getProcessor(); - - /// Check pk is not func based, as we only check type and name in filling in primary key of virtual row. - const auto & primary_key = merge_tree_select_processor->getPrimaryKey(); - const auto & actions = primary_key.expression->getActions(); - bool is_okay = true; - for (const auto & action : actions) - { - if (action.node->type != ActionsDAG::ActionType::INPUT) - { - is_okay = false; - break; - } - } - - /// We have to check further in the case of fixed prefix, for example, - /// primary key ab, query SELECT a, b FROM t WHERE a = 1 ORDER BY b, - /// merge sort would sort based on b, leading to wrong result in comparison. - if (is_okay && primary_key.column_names[0] == column_name && primary_key.data_types[0] == type_and_name.type) - merge_tree_select_processor->enableVirtualRow(); - } - } -} - void SortingStep::mergingSorted(QueryPipelineBuilder & pipeline, const SortDescription & result_sort_desc, const UInt64 limit_) { /// If there are several streams, then we merge them into one if (pipeline.getNumStreams() > 1) { - if (use_buffering && sort_settings.read_in_order_use_buffering) { pipeline.addSimpleTransform([&](const Block & header) @@ -318,8 +256,6 @@ void SortingStep::mergingSorted(QueryPipelineBuilder & pipeline, const SortDescr }); } - enableVirtualRow(pipeline); - auto transform = std::make_shared( pipeline.getHeader(), pipeline.getNumStreams(), diff --git a/src/Processors/QueryPlan/SortingStep.h b/src/Processors/QueryPlan/SortingStep.h index e6f3a07b907..b4a49394a13 100644 --- a/src/Processors/QueryPlan/SortingStep.h +++ b/src/Processors/QueryPlan/SortingStep.h @@ -118,8 +118,6 @@ private: UInt64 limit_, bool skip_partial_sort = false); - void enableVirtualRow(const QueryPipelineBuilder & pipeline) const; - Type type; SortDescription prefix_description; diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index 22df1d8ea48..a9e5b1535c0 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -197,8 +197,6 @@ public: void setQueryIdHolder(std::shared_ptr query_id_holder) { resources.query_id_holders.emplace_back(std::move(query_id_holder)); } void addContext(ContextPtr context) { resources.interpreter_context.emplace_back(std::move(context)); } - const Pipe& getPipe() const { return pipe; } - /// Convert query pipeline to pipe. static Pipe getPipe(QueryPipelineBuilder pipeline, QueryPlanResourceHolder & resources); static QueryPipeline getPipeline(QueryPipelineBuilder builder); diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index ca368a94bd4..85f545d2a51 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -26,14 +26,12 @@ namespace ErrorCodes MergeTreeSelectProcessor::MergeTreeSelectProcessor( MergeTreeReadPoolPtr pool_, MergeTreeSelectAlgorithmPtr algorithm_, - const StorageSnapshotPtr & storage_snapshot_, const PrewhereInfoPtr & prewhere_info_, const ExpressionActionsSettings & actions_settings_, const MergeTreeReadTask::BlockSizeParams & block_size_params_, const MergeTreeReaderSettings & reader_settings_) : pool(std::move(pool_)) , algorithm(std::move(algorithm_)) - , storage_snapshot(storage_snapshot_) , prewhere_info(prewhere_info_) , actions_settings(actions_settings_) , prewhere_actions(getPrewhereActions(prewhere_info, actions_settings, reader_settings_.enable_multiple_prewhere_read_steps)) diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 6dcb6ca73d2..7a9cebbcb2e 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -36,7 +36,6 @@ public: MergeTreeSelectProcessor( MergeTreeReadPoolPtr pool_, MergeTreeSelectAlgorithmPtr algorithm_, - const StorageSnapshotPtr & storage_snapshot_, const PrewhereInfoPtr & prewhere_info_, const ExpressionActionsSettings & actions_settings_, const MergeTreeReadTask::BlockSizeParams & block_size_params_, @@ -60,17 +59,12 @@ public: void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } - void enableVirtualRow() { enable_virtual_row = true; } - - const KeyDescription & getPrimaryKey() const { return storage_snapshot->metadata->primary_key; } - private: /// Sets up range readers corresponding to data readers void initializeRangeReaders(); const MergeTreeReadPoolPtr pool; const MergeTreeSelectAlgorithmPtr algorithm; - const StorageSnapshotPtr storage_snapshot; const PrewhereInfoPtr prewhere_info; const ExpressionActionsSettings actions_settings; @@ -89,12 +83,6 @@ private: /// Should we add part level to produced chunk. Part level is useful for next steps if query has FINAL bool add_part_level = false; - /// Should we add a virtual row as the single first chunk. - /// Virtual row is useful for read-in-order optimization when multiple parts exist. - bool enable_virtual_row = false; - /// PK index used in virtual row. - IMergeTreeDataPart::Index index; - LoggerPtr log = getLogger("MergeTreeSelectProcessor"); std::atomic is_cancelled{false}; }; diff --git a/src/Storages/MergeTree/MergeTreeSource.h b/src/Storages/MergeTree/MergeTreeSource.h index 287f2f5ac63..7506af4f9b8 100644 --- a/src/Storages/MergeTree/MergeTreeSource.h +++ b/src/Storages/MergeTree/MergeTreeSource.h @@ -19,8 +19,6 @@ public: Status prepare() override; - const MergeTreeSelectProcessorPtr& getProcessor() const { return processor; } - #if defined(OS_LINUX) int schedule() override; #endif diff --git a/tests/queries/0_stateless/02521_aggregation_by_partitions.reference b/tests/queries/0_stateless/02521_aggregation_by_partitions.reference index 87b2d5c3430..addc36421c3 100644 --- a/tests/queries/0_stateless/02521_aggregation_by_partitions.reference +++ b/tests/queries/0_stateless/02521_aggregation_by_partitions.reference @@ -160,52 +160,100 @@ ExpressionTransform × 16 (ReadFromMergeTree) MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 1000000 Skip merging: 1 Skip merging: 1 From 79e1ce1d4bd1e032b7890f27386dbf9c043e49c0 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 12 Sep 2024 23:54:16 +0000 Subject: [PATCH 032/171] fix --- src/Processors/QueryPlan/ReadFromMergeTree.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index a09d31155dc..b43217db598 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -282,12 +282,8 @@ private: std::optional read_task_callback; bool enable_vertical_final = false; bool enable_remove_parts_from_snapshot_optimization = true; -<<<<<<< LessReadInOrder - bool enable_virtual_row = false; -======= std::optional number_of_current_replica; ->>>>>>> master }; } From 084c8115fe55440d363999dd498f77c02306c467 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 13 Sep 2024 21:09:03 +0000 Subject: [PATCH 033/171] support non-preliminary merge --- .../Optimizations/optimizeReadInOrder.cpp | 2 + .../QueryPlan/ReadFromMergeTree.cpp | 8 ++- src/Processors/QueryPlan/ReadFromMergeTree.h | 2 + ...er_optimization_with_virtual_row.reference | 12 ++++ ...in_order_optimization_with_virtual_row.sql | 58 +++++++++++++++++-- 5 files changed, 75 insertions(+), 7 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index ac7fcdcf83f..c41122c26b2 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -820,6 +820,8 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n bool can_read = reading->requestReadingInOrder(order_info->used_prefix_of_sorting_key_size, order_info->direction, order_info->limit); if (!can_read) return nullptr; + + reading->enableVirtualRow(); } return order_info; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 43b034b476a..7a297f6db3b 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1099,9 +1099,11 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( for (auto && item : splitted_parts_and_ranges) { - /// enable_virtual_row = true means a MergingSortedTransform should occur. - /// If so, adding a virtual row might speedup in the case of multiple parts. - enable_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; + /// If not enabled before, try to enable it when conditions meet as in the following section of preliminary merge, + /// only ExpressionTransform is added between MergingSortedTransform and readFromMergeTree. + if (!enable_virtual_row) + enable_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; + pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit)); } } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index b43217db598..ccb56c3f31a 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -210,6 +210,8 @@ public: void applyFilters(ActionDAGNodes added_filter_nodes) override; + void enableVirtualRow() { enable_virtual_row = true; } + private: int getSortDirection() const { diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference index 3c3a9cf532e..7106ddc157c 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference @@ -10,6 +10,18 @@ 16388 24576 ======== +0 +1 +2 +3 +16384 +======== +16385 +16386 +16387 +16388 +24578 +======== 1 2 1 2 1 3 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 688e427d19d..5bae739bc51 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -39,14 +39,14 @@ SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 0, --force preliminary merge max_threads = 1, optimize_read_in_order = 1, -log_comment = 'no filter'; +log_comment = 'preliminary merge, no filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND log_comment = 'no filter' +AND log_comment = 'preliminary merge, no filter' AND type = 'QueryFinish' ORDER BY query_start_time DESC limit 1; @@ -63,18 +63,68 @@ SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 0, --force preliminary merge max_threads = 1, optimize_read_in_order = 1, -log_comment = 'with filter'; +log_comment = 'preliminary merge with filter'; SYSTEM FLUSH LOGS; SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() -AND log_comment = 'with filter' +AND log_comment = 'preliminary merge with filter' AND type = 'QueryFinish' ORDER BY query_start_time DESC LIMIT 1; +SELECT '========'; +-- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), +-- both chunks come from the same part. +SELECT x +FROM t +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge +max_threads = 1, +optimize_read_in_order = 1, +log_comment = 'no preliminary merge, no filter'; + +SYSTEM FLUSH LOGS; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND log_comment = 'no preliminary merge, no filter' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC +LIMIT 1; + +SELECT '========'; +-- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192), +-- all chunks come from the same part. +SELECT k +FROM t +WHERE k > 8192 * 2 +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge +read_in_order_use_buffering = false, --avoid buffer +max_threads = 1, +optimize_read_in_order = 1, +log_comment = 'no preliminary merge, with filter'; + +SYSTEM FLUSH LOGS; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND log_comment = 'no preliminary merge, with filter' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC +LIMIT 1; + +DROP TABLE t; + SELECT '========'; -- from 02149_read_in_order_fixed_prefix DROP TABLE IF EXISTS fixed_prefix; From 2aba6f5b36d959c65d6daaaacb855a7ccf9b26b2 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 13 Sep 2024 21:44:03 +0000 Subject: [PATCH 034/171] avoid conflict with buffering --- .../QueryPlan/Optimizations/optimizeReadInOrder.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index c41122c26b2..29453acca41 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -821,7 +821,10 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n if (!can_read) return nullptr; - reading->enableVirtualRow(); + bool use_buffering = (order_info->limit == 0) && sorting.getSettings().read_in_order_use_buffering; + /// Avoid conflict with buffering. + if (!use_buffering) + reading->enableVirtualRow(); } return order_info; From c8d6c177688783b25eb8f88e1f891e9839dac8a7 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 14 Sep 2024 02:22:11 +0000 Subject: [PATCH 035/171] fix --- .../Optimizations/optimizeReadInOrder.cpp | 2 +- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 17 ++++++++++++----- src/Processors/QueryPlan/ReadFromMergeTree.h | 2 +- .../02149_read_in_order_fixed_prefix.reference | 18 +++++++++++++----- ...der_optimization_with_virtual_row.reference | 2 +- 5 files changed, 28 insertions(+), 13 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 29453acca41..b302534e2f4 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -822,7 +822,7 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n return nullptr; bool use_buffering = (order_info->limit == 0) && sorting.getSettings().read_in_order_use_buffering; - /// Avoid conflict with buffering. + /// Avoid conflict with buffering. if (!use_buffering) reading->enableVirtualRow(); } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 7a297f6db3b..ac5db8277c2 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -549,7 +549,8 @@ Pipe ReadFromMergeTree::readInOrder( Names required_columns, PoolSettings pool_settings, ReadType read_type, - UInt64 read_limit) + UInt64 read_limit, + bool enable_current_virtual_row) { /// For reading in order it makes sense to read only /// one range per task to reduce number of read rows. @@ -660,7 +661,7 @@ Pipe ReadFromMergeTree::readInOrder( Pipe pipe(source); - if (enable_virtual_row) + if (enable_current_virtual_row) { pipe.addSimpleTransform([&](const Block & header) { @@ -1097,14 +1098,20 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( splitted_parts_and_ranges.emplace_back(std::move(new_parts)); } + /// If enabled in the optimization stage, check whether there are more than one branch. + if (enable_virtual_row) + enable_virtual_row = splitted_parts_and_ranges.size() > 1 + || (splitted_parts_and_ranges.size() == 1 && splitted_parts_and_ranges[0].size() > 1); + for (auto && item : splitted_parts_and_ranges) { - /// If not enabled before, try to enable it when conditions meet as in the following section of preliminary merge, + /// If not enabled before, try to enable it when conditions meet, as in the following section of preliminary merge, /// only ExpressionTransform is added between MergingSortedTransform and readFromMergeTree. + bool enable_current_virtual_row = enable_virtual_row; if (!enable_virtual_row) - enable_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; + enable_current_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; - pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit)); + pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, enable_current_virtual_row)); } } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index ccb56c3f31a..7c0bbdc8dec 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -254,7 +254,7 @@ private: Pipe read(RangesInDataParts parts_with_range, Names required_columns, ReadType read_type, size_t max_streams, size_t min_marks_for_concurrent_read, bool use_uncompressed_cache); Pipe readFromPool(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); Pipe readFromPoolParallelReplicas(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); - Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit); + Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit, bool enable_current_virtual_row = false); Pipe spreadMarkRanges(RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, std::optional & result_projection); diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference index d608364e01b..f7966645e8a 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference @@ -14,7 +14,10 @@ ExpressionTransform (Expression) ExpressionTransform × 2 (ReadFromMergeTree) - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 2020-10-01 9 2020-10-01 9 2020-10-01 9 @@ -32,9 +35,11 @@ ExpressionTransform ExpressionTransform × 2 (ReadFromMergeTree) ReverseTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 - ReverseTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 + ReverseTransform + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 2020-10-01 9 2020-10-01 9 2020-10-01 9 @@ -51,7 +56,10 @@ ExpressionTransform (Expression) ExpressionTransform × 2 (ReadFromMergeTree) - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 2020-10-11 0 2020-10-11 0 2020-10-11 0 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference index 7106ddc157c..ef9f06ec21a 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference @@ -20,7 +20,7 @@ 16386 16387 16388 -24578 +24576 ======== 1 2 1 2 From 105639c0878e896b59bea098a51f4354cf831846 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 14 Sep 2024 20:41:36 +0000 Subject: [PATCH 036/171] disable pk function --- .../QueryPlan/ReadFromMergeTree.cpp | 14 ++++++++++++- ...1_mergetree_read_in_order_spread.reference | 9 +++------ ...in_order_optimization_with_virtual_row.sql | 20 +++++++++++++++---- 3 files changed, 32 insertions(+), 11 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index ac5db8277c2..02d10dcb46b 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1098,6 +1098,17 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( splitted_parts_and_ranges.emplace_back(std::move(new_parts)); } + bool primary_key_type_supports_virtual_row = true; + const auto & actions = storage_snapshot->metadata->getPrimaryKey().expression->getActions(); + for (const auto & action : actions) + { + if (action.node->type != ActionsDAG::ActionType::INPUT) + { + primary_key_type_supports_virtual_row = false; + break; + } + } + /// If enabled in the optimization stage, check whether there are more than one branch. if (enable_virtual_row) enable_virtual_row = splitted_parts_and_ranges.size() > 1 @@ -1111,7 +1122,8 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( if (!enable_virtual_row) enable_current_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; - pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, enable_current_virtual_row)); + pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, + enable_current_virtual_row && primary_key_type_supports_virtual_row)); } } diff --git a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference index e83c2e906d1..443f6d3ae93 100644 --- a/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference +++ b/tests/queries/0_stateless/01551_mergetree_read_in_order_spread.reference @@ -12,9 +12,6 @@ ExpressionTransform × 3 MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - ExpressionTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + ExpressionTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 5bae739bc51..159f38903e3 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -138,7 +138,14 @@ SYSTEM STOP MERGES fixed_prefix; INSERT INTO fixed_prefix VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); INSERT INTO fixed_prefix VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); -SELECT a, b FROM fixed_prefix WHERE a = 1 ORDER BY b SETTINGS max_threads = 1; +SELECT a, b +FROM fixed_prefix +WHERE a = 1 +ORDER BY b +SETTINGS max_threads = 1, +read_in_order_use_buffering = false, +optimize_read_in_order = 1, +read_in_order_two_level_merge_threshold = 0; --force preliminary merge DROP TABLE fixed_prefix; @@ -160,8 +167,13 @@ INSERT INTO function_pk values(1,1); INSERT INTO function_pk values(1,3); INSERT INTO function_pk values(1,2); --- TODO: handle preliminary merge for this case, temporarily disable it -SET optimize_read_in_order = 0; -SELECT * FROM function_pk ORDER BY (A,-B) ASC limit 3 SETTINGS max_threads = 1; +SELECT * +FROM function_pk +ORDER BY (A,-B) ASC +limit 3 +SETTINGS max_threads = 1, +read_in_order_use_buffering = false, +optimize_read_in_order = 1, +read_in_order_two_level_merge_threshold = 0; --force preliminary merge DROP TABLE function_pk; From 45471d841bd906cbd7c4b4e88581c049e759d9f1 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 16 Sep 2024 17:41:38 +0000 Subject: [PATCH 037/171] remove default value of enable_current_virtual_row --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 4 ++-- src/Processors/QueryPlan/ReadFromMergeTree.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 02d10dcb46b..fb69bdd5aaa 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -716,7 +716,7 @@ Pipe ReadFromMergeTree::read( if (read_type == ReadType::Default && (max_streams > 1 || checkAllPartsOnRemoteFS(parts_with_range))) return readFromPool(std::move(parts_with_range), std::move(required_columns), std::move(pool_settings)); - auto pipe = readInOrder(parts_with_range, required_columns, pool_settings, read_type, /*limit=*/ 0); + auto pipe = readInOrder(parts_with_range, required_columns, pool_settings, read_type, /*limit=*/ 0, false); /// Use ConcatProcessor to concat sources together. /// It is needed to read in parts order (and so in PK order) if single thread is used. @@ -1025,7 +1025,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( /// For parallel replicas the split will be performed on the initiator side. if (is_parallel_reading_from_replicas) { - pipes.emplace_back(readInOrder(std::move(parts_with_ranges), column_names, pool_settings, read_type, input_order_info->limit)); + pipes.emplace_back(readInOrder(std::move(parts_with_ranges), column_names, pool_settings, read_type, input_order_info->limit, false)); } else { diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 7c0bbdc8dec..aa1b9dcfdcb 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -254,7 +254,7 @@ private: Pipe read(RangesInDataParts parts_with_range, Names required_columns, ReadType read_type, size_t max_streams, size_t min_marks_for_concurrent_read, bool use_uncompressed_cache); Pipe readFromPool(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); Pipe readFromPoolParallelReplicas(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); - Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit, bool enable_current_virtual_row = false); + Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit, bool enable_current_virtual_row); Pipe spreadMarkRanges(RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, std::optional & result_projection); From 6af5fe48ba2f7d22447056fd148665f350830fe4 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Mon, 16 Sep 2024 19:43:00 +0000 Subject: [PATCH 038/171] handle the case first prefix fixed --- .../QueryPlan/Optimizations/optimizeReadInOrder.cpp | 10 +++++++--- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 2 +- src/Storages/ReadInOrderOptimizer.cpp | 2 +- src/Storages/SelectQueryInfo.h | 11 ++++++++++- ...d_in_order_optimization_with_virtual_row.reference | 6 ++++++ ...31_read_in_order_optimization_with_virtual_row.sql | 9 +++++++++ 6 files changed, 34 insertions(+), 6 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index b302534e2f4..909645098b1 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -370,6 +370,7 @@ InputOrderInfoPtr buildInputOrderInfo( int read_direction = 0; size_t next_description_column = 0; size_t next_sort_key = 0; + bool first_prefix_fixed = false; while (next_description_column < description.size() && next_sort_key < sorting_key.column_names.size()) { @@ -447,6 +448,9 @@ InputOrderInfoPtr buildInputOrderInfo( } else if (fixed_key_columns.contains(sort_column_node)) { + if (next_sort_key == 0) + first_prefix_fixed = true; + //std::cerr << "+++++++++ Found fixed key by match" << std::endl; ++next_sort_key; } @@ -481,7 +485,7 @@ InputOrderInfoPtr buildInputOrderInfo( if (read_direction == 0 || order_key_prefix_descr.empty()) return nullptr; - return std::make_shared(order_key_prefix_descr, next_sort_key, read_direction, limit); + return std::make_shared(order_key_prefix_descr, next_sort_key, read_direction, limit, first_prefix_fixed); } /// We really need three different sort descriptions here. @@ -685,7 +689,7 @@ AggregationInputOrder buildInputOrderInfo( for (const auto & key : not_matched_group_by_keys) group_by_sort_description.emplace_back(SortColumnDescription(std::string(key))); - auto input_order = std::make_shared(order_key_prefix_descr, next_sort_key, /*read_direction*/ 1, /* limit */ 0); + auto input_order = std::make_shared(order_key_prefix_descr, next_sort_key, /*read_direction*/ 1, /* limit */ 0, false); return { std::move(input_order), std::move(sort_description_for_merging), std::move(group_by_sort_description) }; } @@ -823,7 +827,7 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n bool use_buffering = (order_info->limit == 0) && sorting.getSettings().read_in_order_use_buffering; /// Avoid conflict with buffering. - if (!use_buffering) + if (!use_buffering && !order_info->first_prefix_fixed) reading->enableVirtualRow(); } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index fb69bdd5aaa..b507172597e 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1808,7 +1808,7 @@ bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, if (direction != 1 && query_info.isFinal()) return false; - query_info.input_order_info = std::make_shared(SortDescription{}, prefix_size, direction, read_limit); + query_info.input_order_info = std::make_shared(SortDescription{}, prefix_size, direction, read_limit, false); reader_settings.read_in_order = true; /// In case or read-in-order, don't create too many reading streams. diff --git a/src/Storages/ReadInOrderOptimizer.cpp b/src/Storages/ReadInOrderOptimizer.cpp index 9c8c4c2fe79..ea7ea218feb 100644 --- a/src/Storages/ReadInOrderOptimizer.cpp +++ b/src/Storages/ReadInOrderOptimizer.cpp @@ -249,7 +249,7 @@ InputOrderInfoPtr ReadInOrderOptimizer::getInputOrderImpl( if (sort_description_for_merging.empty()) return {}; - return std::make_shared(std::move(sort_description_for_merging), key_pos, read_direction, limit); + return std::make_shared(std::move(sort_description_for_merging), key_pos, read_direction, limit, false); } InputOrderInfoPtr ReadInOrderOptimizer::getInputOrder( diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 7ad6a733c6f..bf1229f7a3a 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -119,13 +119,22 @@ struct InputOrderInfo const int direction; const UInt64 limit; + /** For virtual row optimization only + * for example, when pk is (a,b), a = 1, order by b, virtual row should be + * disabled in the following case: + * 1st part (0, 100), (1, 2), (1, 3), (1, 4) + * 2nd part (0, 100), (1, 2), (1, 3), (1, 4). + */ + bool first_prefix_fixed; + InputOrderInfo( const SortDescription & sort_description_for_merging_, size_t used_prefix_of_sorting_key_size_, - int direction_, UInt64 limit_) + int direction_, UInt64 limit_, bool first_prefix_fixed_) : sort_description_for_merging(sort_description_for_merging_) , used_prefix_of_sorting_key_size(used_prefix_of_sorting_key_size_) , direction(direction_), limit(limit_) + , first_prefix_fixed(first_prefix_fixed_) { } diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference index ef9f06ec21a..08dabf3ee06 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference @@ -28,6 +28,12 @@ 1 3 1 4 1 4 +1 2 +1 2 +1 3 +1 3 +1 4 +1 4 ======== 1 3 1 2 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 159f38903e3..b26f3a48eef 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -147,6 +147,15 @@ read_in_order_use_buffering = false, optimize_read_in_order = 1, read_in_order_two_level_merge_threshold = 0; --force preliminary merge +SELECT a, b +FROM fixed_prefix +WHERE a = 1 +ORDER BY b +SETTINGS max_threads = 1, +read_in_order_use_buffering = false, +optimize_read_in_order = 1, +read_in_order_two_level_merge_threshold = 5; --avoid preliminary merge + DROP TABLE fixed_prefix; SELECT '========'; From 81a7927b8a24d6e7686ed6bf9bd6f7452428b492 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 17 Sep 2024 15:06:21 +0000 Subject: [PATCH 039/171] handle virtual row in BufferChunksTransform --- src/Processors/QueryPlan/BufferChunksTransform.cpp | 14 ++++++++++++++ .../Optimizations/optimizeReadInOrder.cpp | 4 +--- ...read_in_order_optimization_with_virtual_row.sql | 4 ---- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/src/Processors/QueryPlan/BufferChunksTransform.cpp b/src/Processors/QueryPlan/BufferChunksTransform.cpp index 3601a68d36e..0d9cee28619 100644 --- a/src/Processors/QueryPlan/BufferChunksTransform.cpp +++ b/src/Processors/QueryPlan/BufferChunksTransform.cpp @@ -1,4 +1,5 @@ #include +#include namespace DB { @@ -49,13 +50,26 @@ IProcessor::Status BufferChunksTransform::prepare() else if (input.hasData()) { auto chunk = pullChunk(); + bool virtual_row = getVirtualRowFromChunk(chunk); output.push(std::move(chunk)); + if (virtual_row) + { + input.setNotNeeded(); + return Status::PortFull; + } } } if (input.hasData() && (num_buffered_rows < max_rows_to_buffer || num_buffered_bytes < max_bytes_to_buffer)) { auto chunk = pullChunk(); + bool virtual_row = getVirtualRowFromChunk(chunk); + if (virtual_row) + { + output.push(std::move(chunk)); + input.setNotNeeded(); + return Status::PortFull; + } num_buffered_rows += chunk.getNumRows(); num_buffered_bytes += chunk.bytes(); chunks.push(std::move(chunk)); diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 909645098b1..e7468a3a3f2 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -825,9 +825,7 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n if (!can_read) return nullptr; - bool use_buffering = (order_info->limit == 0) && sorting.getSettings().read_in_order_use_buffering; - /// Avoid conflict with buffering. - if (!use_buffering && !order_info->first_prefix_fixed) + if (!order_info->first_prefix_fixed) reading->enableVirtualRow(); } diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index b26f3a48eef..7e3af6c057a 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -108,7 +108,6 @@ ORDER BY x ASC LIMIT 4 SETTINGS max_block_size = 8192, read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge -read_in_order_use_buffering = false, --avoid buffer max_threads = 1, optimize_read_in_order = 1, log_comment = 'no preliminary merge, with filter'; @@ -143,7 +142,6 @@ FROM fixed_prefix WHERE a = 1 ORDER BY b SETTINGS max_threads = 1, -read_in_order_use_buffering = false, optimize_read_in_order = 1, read_in_order_two_level_merge_threshold = 0; --force preliminary merge @@ -152,7 +150,6 @@ FROM fixed_prefix WHERE a = 1 ORDER BY b SETTINGS max_threads = 1, -read_in_order_use_buffering = false, optimize_read_in_order = 1, read_in_order_two_level_merge_threshold = 5; --avoid preliminary merge @@ -181,7 +178,6 @@ FROM function_pk ORDER BY (A,-B) ASC limit 3 SETTINGS max_threads = 1, -read_in_order_use_buffering = false, optimize_read_in_order = 1, read_in_order_two_level_merge_threshold = 0; --force preliminary merge From a48bd922d9122aa18a4cf1fe196a3418f798c7a4 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Tue, 17 Sep 2024 20:27:59 +0000 Subject: [PATCH 040/171] fix limit in BufferChunksTransform with virtual row --- src/Processors/QueryPlan/BufferChunksTransform.cpp | 14 ++++++++------ src/Processors/QueryPlan/BufferChunksTransform.h | 2 +- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 2 +- .../02149_read_in_order_fixed_prefix.reference | 8 +++----- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/Processors/QueryPlan/BufferChunksTransform.cpp b/src/Processors/QueryPlan/BufferChunksTransform.cpp index 0d9cee28619..47e2c2ba0d5 100644 --- a/src/Processors/QueryPlan/BufferChunksTransform.cpp +++ b/src/Processors/QueryPlan/BufferChunksTransform.cpp @@ -49,8 +49,8 @@ IProcessor::Status BufferChunksTransform::prepare() } else if (input.hasData()) { - auto chunk = pullChunk(); - bool virtual_row = getVirtualRowFromChunk(chunk); + bool virtual_row; + auto chunk = pullChunk(virtual_row); output.push(std::move(chunk)); if (virtual_row) { @@ -62,8 +62,8 @@ IProcessor::Status BufferChunksTransform::prepare() if (input.hasData() && (num_buffered_rows < max_rows_to_buffer || num_buffered_bytes < max_bytes_to_buffer)) { - auto chunk = pullChunk(); - bool virtual_row = getVirtualRowFromChunk(chunk); + bool virtual_row; + auto chunk = pullChunk(virtual_row); if (virtual_row) { output.push(std::move(chunk)); @@ -85,10 +85,12 @@ IProcessor::Status BufferChunksTransform::prepare() return Status::NeedData; } -Chunk BufferChunksTransform::pullChunk() +Chunk BufferChunksTransform::pullChunk(bool & virtual_row) { auto chunk = input.pull(); - num_processed_rows += chunk.getNumRows(); + virtual_row = getVirtualRowFromChunk(chunk); + if (!virtual_row) + num_processed_rows += chunk.getNumRows(); if (limit && num_processed_rows >= limit) input.close(); diff --git a/src/Processors/QueryPlan/BufferChunksTransform.h b/src/Processors/QueryPlan/BufferChunksTransform.h index 752f9910734..fce79eeaef3 100644 --- a/src/Processors/QueryPlan/BufferChunksTransform.h +++ b/src/Processors/QueryPlan/BufferChunksTransform.h @@ -24,7 +24,7 @@ public: String getName() const override { return "BufferChunks"; } private: - Chunk pullChunk(); + Chunk pullChunk(bool & virtual_row); InputPort & input; OutputPort & output; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index b507172597e..45dcb4616b1 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -661,7 +661,7 @@ Pipe ReadFromMergeTree::readInOrder( Pipe pipe(source); - if (enable_current_virtual_row) + if (enable_current_virtual_row && (read_type == ReadType::InOrder)) { pipe.addSimpleTransform([&](const Block & header) { diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference index f7966645e8a..31462988c2d 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference @@ -35,11 +35,9 @@ ExpressionTransform ExpressionTransform × 2 (ReadFromMergeTree) ReverseTransform - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 - ReverseTransform - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 + ReverseTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InReverseOrder) 0 → 1 2020-10-01 9 2020-10-01 9 2020-10-01 9 From fd021f658df9ecef6804da3885067061f842e5b2 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 18 Sep 2024 20:01:11 +0000 Subject: [PATCH 041/171] check steps before mergesort --- .../Optimizations/optimizeReadInOrder.cpp | 17 ++++++++-- .../QueryPlan/ReadFromMergeTree.cpp | 17 +++++----- src/Processors/QueryPlan/ReadFromMergeTree.h | 14 ++++++-- ...er_optimization_with_virtual_row.reference | 2 ++ ...in_order_optimization_with_virtual_row.sql | 32 +++++++++++++++++++ 5 files changed, 70 insertions(+), 12 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index e7468a3a3f2..d3ecb3cac6b 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -94,6 +94,17 @@ static QueryPlan::Node * findReadingStep(QueryPlan::Node & node, StepStack & bac return nullptr; } +static bool checkVirtualRowSupport(const StepStack & backward_path) +{ + for (size_t i = 0; i < backward_path.size() - 1; i++) + { + IQueryPlanStep * step = backward_path[i]; + if (!typeid_cast(step) && !typeid_cast(step)) + return false; + } + return true; +} + void updateStepsDataStreams(StepStack & steps_to_update) { /// update data stream's sorting properties for found transforms @@ -825,8 +836,10 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n if (!can_read) return nullptr; - if (!order_info->first_prefix_fixed) - reading->enableVirtualRow(); + if (!checkVirtualRowSupport(backward_path)) + reading->setVirtualRowStatus(ReadFromMergeTree::VirtualRowStatus::No); + else if (!order_info->first_prefix_fixed) + reading->setVirtualRowStatus(ReadFromMergeTree::VirtualRowStatus::Possible); } return order_info; diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 45dcb4616b1..2ac663e0680 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1109,17 +1109,18 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( } } - /// If enabled in the optimization stage, check whether there are more than one branch. - if (enable_virtual_row) - enable_virtual_row = splitted_parts_and_ranges.size() > 1 - || (splitted_parts_and_ranges.size() == 1 && splitted_parts_and_ranges[0].size() > 1); + /// If possible in the optimization stage, check whether there are more than one branch. + if (virtual_row_status == VirtualRowStatus::Possible) + virtual_row_status = splitted_parts_and_ranges.size() > 1 + || (splitted_parts_and_ranges.size() == 1 && splitted_parts_and_ranges[0].size() > 1) + ? VirtualRowStatus::Yes : VirtualRowStatus::NoConsiderInLogicalPlan; for (auto && item : splitted_parts_and_ranges) { - /// If not enabled before, try to enable it when conditions meet, as in the following section of preliminary merge, - /// only ExpressionTransform is added between MergingSortedTransform and readFromMergeTree. - bool enable_current_virtual_row = enable_virtual_row; - if (!enable_virtual_row) + bool enable_current_virtual_row = false; + if (virtual_row_status == VirtualRowStatus::Yes) + enable_current_virtual_row = true; + else if (virtual_row_status == VirtualRowStatus::NoConsiderInLogicalPlan) enable_current_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index aa1b9dcfdcb..767fcf3b0f8 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -108,6 +108,14 @@ public: using AnalysisResultPtr = std::shared_ptr; + enum class VirtualRowStatus + { + NoConsiderInLogicalPlan, + Possible, + No, + Yes, + }; + ReadFromMergeTree( MergeTreeData::DataPartsVector parts_, MergeTreeData::MutationsSnapshotPtr mutations_snapshot_, @@ -210,7 +218,7 @@ public: void applyFilters(ActionDAGNodes added_filter_nodes) override; - void enableVirtualRow() { enable_virtual_row = true; } + void setVirtualRowStatus(VirtualRowStatus virtual_row_status_) { virtual_row_status = virtual_row_status_; } private: int getSortDirection() const @@ -284,7 +292,9 @@ private: std::optional read_task_callback; bool enable_vertical_final = false; bool enable_remove_parts_from_snapshot_optimization = true; - bool enable_virtual_row = false; + + VirtualRowStatus virtual_row_status = VirtualRowStatus::NoConsiderInLogicalPlan; + std::optional number_of_current_replica; }; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference index 08dabf3ee06..499ac19d374 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.reference @@ -38,3 +38,5 @@ 1 3 1 2 1 1 +-- test distinct ---- +0 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 7e3af6c057a..4c7bc5d17c7 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -182,3 +182,35 @@ optimize_read_in_order = 1, read_in_order_two_level_merge_threshold = 0; --force preliminary merge DROP TABLE function_pk; + +-- modified from 02317_distinct_in_order_optimization +SELECT '-- test distinct ----'; + +DROP TABLE IF EXISTS distinct_in_order SYNC; + +CREATE TABLE distinct_in_order +( + `a` int, + `b` int, + `c` int +) +ENGINE = MergeTree +ORDER BY (a, b) +SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +SYSTEM STOP MERGES distinct_in_order; + +INSERT INTO distinct_in_order SELECT + number % number, + number % 5, + number % 10 +FROM numbers(1, 1000000); + +SELECT DISTINCT a +FROM distinct_in_order +ORDER BY a ASC +SETTINGS read_in_order_two_level_merge_threshold = 0, +optimize_read_in_order = 1, +max_threads = 2; + +DROP TABLE distinct_in_order; From 926e28e35cb1d17d0bb66c06b613671d3eeeeac2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Thu, 19 Sep 2024 02:52:23 +0300 Subject: [PATCH 042/171] Rollback part rename if it was deduplicated --- .../MergeTree/ReplicatedMergeTreeSink.cpp | 26 +++++++++++-------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index fb2bc2fada7..98c46edda25 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -583,7 +583,7 @@ bool ReplicatedMergeTreeSinkImpl::writeExistingPart(MergeTreeData::Mutabl { error = ErrorCodes::INSERT_WAS_DEDUPLICATED; if (!endsWith(part->getDataPartStorage().getRelativePath(), "detached/attaching_" + part->name + "/")) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected relative path for a part: {}", part->getDataPartStorage().getRelativePath()); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected relative path for a deduplicated part: {}", part->getDataPartStorage().getRelativePath()); fs::path new_relative_path = fs::path("detached") / part->getNewName(part->info); part->renameTo(new_relative_path, false); } @@ -1013,16 +1013,6 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: } } - transaction.rollback(); - - if (!Coordination::isUserError(multi_code)) - throw Exception( - ErrorCodes::UNEXPECTED_ZOOKEEPER_ERROR, - "Unexpected ZooKeeper error while adding block {} with ID '{}': {}", - block_number, - toString(block_id), - multi_code); - auto failed_op_idx = zkutil::getFailedOpIndex(multi_code, responses); String failed_op_path = ops[failed_op_idx]->getPath(); @@ -1032,6 +1022,10 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: LOG_INFO(log, "Block with ID {} already exists (it was just appeared) for part {}. Ignore it.", toString(block_id), part->name); + transaction.rollbackPartsToTemporaryState(); + part->is_temp = true; + part->renameTo(temporary_part_relative_path, false); + if constexpr (async_insert) { retry_context.conflict_block_ids = std::vector({failed_op_path}); @@ -1043,6 +1037,16 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: return CommitRetryContext::DUPLICATED_PART; } + transaction.rollback(); // Not in working set (data_parts) + + if (!Coordination::isUserError(multi_code)) + throw Exception( + ErrorCodes::UNEXPECTED_ZOOKEEPER_ERROR, + "Unexpected ZooKeeper error while adding block {} with ID '{}': {}", + block_number, + toString(block_id), + multi_code); + if (multi_code == Coordination::Error::ZNONODE && failed_op_idx == block_unlock_op_idx) throw Exception(ErrorCodes::QUERY_WAS_CANCELLED, "Insert query (for block {}) was canceled by concurrent ALTER PARTITION or TRUNCATE", From f570e8e2c0715001ac0f1633c898699700068edb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Thu, 19 Sep 2024 13:34:51 +0300 Subject: [PATCH 043/171] Remove debug comment --- src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 98c46edda25..3f5c70adb64 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -1037,7 +1037,7 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: return CommitRetryContext::DUPLICATED_PART; } - transaction.rollback(); // Not in working set (data_parts) + transaction.rollback(); if (!Coordination::isUserError(multi_code)) throw Exception( From 050b51799ce1e636f7806cb7af6d1bbb1cf481e5 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 19 Sep 2024 14:48:38 +0000 Subject: [PATCH 044/171] add inner and outer read-in-order virtual row test --- ...in_order_optimization_with_virtual_row.sql | 3 ++- ...ization_with_virtual_row_explain.reference | 25 ++++++++++++++++++ ..._optimization_with_virtual_row_explain.sql | 26 +++++++++++++++++++ 3 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.reference create mode 100644 tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 4c7bc5d17c7..f66b4be2c69 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -196,7 +196,8 @@ CREATE TABLE distinct_in_order ) ENGINE = MergeTree ORDER BY (a, b) -SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +SETTINGS index_granularity = 8192, +index_granularity_bytes = '10Mi'; SYSTEM STOP MERGES distinct_in_order; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.reference new file mode 100644 index 00000000000..33ef6b19222 --- /dev/null +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.reference @@ -0,0 +1,25 @@ +(Expression) +ExpressionTransform + (Sorting) + MergingSortedTransform 4 → 1 + (Expression) + ExpressionTransform × 4 + (ReadFromMergeTree) + ExpressionTransform × 5 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + ExpressionTransform + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql new file mode 100644 index 00000000000..668b21275b4 --- /dev/null +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql @@ -0,0 +1,26 @@ +-- Tags: no-random-merge-tree-settings + +SET optimize_read_in_order = 1, merge_tree_min_rows_for_concurrent_read = 1000; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + `t` DateTime +) +ENGINE = MergeTree +ORDER BY t +SETTINGS index_granularity = 1; + +SYSTEM STOP MERGES tab; + +INSERT INTO tab SELECT toDateTime('2024-01-10') + number FROM numbers(10000); +INSERT INTO tab SELECT toDateTime('2024-01-30') + number FROM numbers(10000); +INSERT INTO tab SELECT toDateTime('2024-01-20') + number FROM numbers(10000); + +EXPLAIN PIPELINE +SELECT * +FROM tab +ORDER BY t ASC +SETTINGS read_in_order_two_level_merge_threshold = 0, max_threads = 4, read_in_order_use_buffering = 0 +FORMAT tsv; \ No newline at end of file From b4e5c11fd775cf915dff9e816673fb699c99a307 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Fri, 20 Sep 2024 02:11:29 +0000 Subject: [PATCH 045/171] fix --- ...3031_read_in_order_optimization_with_virtual_row_explain.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql index 668b21275b4..8cdcb4628ec 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql @@ -1,4 +1,4 @@ --- Tags: no-random-merge-tree-settings +-- Tags: no-random-merge-tree-settings, no-object-storage SET optimize_read_in_order = 1, merge_tree_min_rows_for_concurrent_read = 1000; From 82b4986ee35f974efb48f7ffbb6c698d4e363e43 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 21 Sep 2024 14:53:45 +0000 Subject: [PATCH 046/171] use empty chunk with pk block --- .../Merges/Algorithms/MergeTreeReadInfo.h | 41 ++++++++++++++++--- .../Algorithms/MergingSortedAlgorithm.cpp | 12 +++++- src/Processors/Merges/IMergingTransform.cpp | 4 +- .../QueryPlan/BufferChunksTransform.cpp | 2 +- .../QueryPlan/ReadFromMergeTree.cpp | 21 ++++++++-- .../Transforms/VirtualRowTransform.cpp | 33 ++++----------- .../Transforms/VirtualRowTransform.h | 11 +---- .../MergeTree/MergeTreeSelectProcessor.cpp | 2 +- .../MergeTree/MergeTreeSequentialSource.cpp | 2 +- 9 files changed, 77 insertions(+), 51 deletions(-) diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h index 862fa1b5e9a..425df2c24b9 100644 --- a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace DB { @@ -10,13 +11,16 @@ class MergeTreeReadInfo : public ChunkInfoCloneable { public: MergeTreeReadInfo() = delete; - explicit MergeTreeReadInfo(size_t part_level, bool virtual_row_) : - origin_merge_tree_part_level(part_level), virtual_row(virtual_row_) { } + explicit MergeTreeReadInfo(size_t part_level) : + origin_merge_tree_part_level(part_level) {} + explicit MergeTreeReadInfo(size_t part_level, const Block & pk_block_) : + origin_merge_tree_part_level(part_level), pk_block(pk_block_) {} MergeTreeReadInfo(const MergeTreeReadInfo & other) = default; size_t origin_merge_tree_part_level = 0; - /// If virtual_row is true, the chunk must contain the virtual row only. - bool virtual_row = false; + + /// If is virtual_row, block should not be empty. + Block pk_block; }; inline size_t getPartLevelFromChunk(const Chunk & chunk) @@ -27,12 +31,37 @@ inline size_t getPartLevelFromChunk(const Chunk & chunk) return 0; } -inline bool getVirtualRowFromChunk(const Chunk & chunk) +inline bool isVirtualRow(const Chunk & chunk) { const auto read_info = chunk.getChunkInfos().get(); if (read_info) - return read_info->virtual_row; + return read_info->pk_block.columns() > 0; return false; } +inline void setVirtualRow(Chunk & chunk, const Block & header) +{ + const auto read_info = chunk.getChunkInfos().get(); + chassert(read_info); + + const Block & pk_block = read_info->pk_block; + + Columns ordered_columns; + ordered_columns.reserve(header.columns()); + + for (size_t i = 0; i < header.columns(); ++i) + { + const ColumnWithTypeAndName & type_and_name = header.getByPosition(i); + ColumnPtr current_column = type_and_name.type->createColumn(); + + size_t pos = type_and_name.name.find_last_of("."); + String column_name = (pos == String::npos) ? type_and_name.name : type_and_name.name.substr(pos + 1); + + const ColumnWithTypeAndName * column = pk_block.findByName(column_name, true); + ordered_columns.push_back(column ? column->column : current_column->cloneResized(1)); + } + + chunk.setColumns(ordered_columns, 1); +} + } diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 9476d46d939..75c04c8ddb2 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -55,6 +55,14 @@ void MergingSortedAlgorithm::addInput() void MergingSortedAlgorithm::initialize(Inputs inputs) { + for (auto & input : inputs) + { + if (!isVirtualRow(input.chunk)) + continue; + + setVirtualRow(input.chunk, header); + } + removeConstAndSparse(inputs); merged_data.initialize(header, inputs); current_inputs = std::move(inputs); @@ -139,7 +147,7 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeImpl(TSortingHeap & queue auto current = queue.current(); - if (getVirtualRowFromChunk(current_inputs[current.impl->order].chunk)) + if (isVirtualRow(current_inputs[current.impl->order].chunk)) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Virtual row is not implemented for Non-batch mode."); if (current.impl->isLast() && current_inputs[current.impl->order].skip_last_row) @@ -238,7 +246,7 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeBatchImpl(TSortingQueue & auto [current_ptr, initial_batch_size] = queue.current(); auto current = *current_ptr; - if (getVirtualRowFromChunk(current_inputs[current.impl->order].chunk)) + if (isVirtualRow(current_inputs[current.impl->order].chunk)) { /// If virtual row is detected, there should be only one row as a single chunk, /// and always skip this chunk to pull the next one. diff --git a/src/Processors/Merges/IMergingTransform.cpp b/src/Processors/Merges/IMergingTransform.cpp index 7488cf4769e..68957cd55f9 100644 --- a/src/Processors/Merges/IMergingTransform.cpp +++ b/src/Processors/Merges/IMergingTransform.cpp @@ -104,14 +104,14 @@ IProcessor::Status IMergingTransformBase::prepareInitializeInputs() /// we won't have to read any chunks anymore; /// If virtual row exists, let it pass through, so don't read more chunks. auto chunk = input.pull(true); - bool virtual_row = getVirtualRowFromChunk(chunk); + bool virtual_row = isVirtualRow(chunk); if (limit_hint == 0 && !virtual_row) input.setNeeded(); if (!virtual_row && ((limit_hint && chunk.getNumRows() < limit_hint) || always_read_till_end)) input.setNeeded(); - if (!chunk.hasRows()) + if (!virtual_row && !chunk.hasRows()) { if (!input.isFinished()) { diff --git a/src/Processors/QueryPlan/BufferChunksTransform.cpp b/src/Processors/QueryPlan/BufferChunksTransform.cpp index 47e2c2ba0d5..75f5f91d981 100644 --- a/src/Processors/QueryPlan/BufferChunksTransform.cpp +++ b/src/Processors/QueryPlan/BufferChunksTransform.cpp @@ -88,7 +88,7 @@ IProcessor::Status BufferChunksTransform::prepare() Chunk BufferChunksTransform::pullChunk(bool & virtual_row) { auto chunk = input.pull(); - virtual_row = getVirtualRowFromChunk(chunk); + virtual_row = isVirtualRow(chunk); if (!virtual_row) num_processed_rows += chunk.getNumRows(); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 2ac663e0680..4b5e33e8b07 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -663,12 +663,25 @@ Pipe ReadFromMergeTree::readInOrder( if (enable_current_virtual_row && (read_type == ReadType::InOrder)) { + const auto & index = part_with_ranges.data_part->getIndex(); + const auto & primary_key = storage_snapshot->metadata->primary_key; + size_t mark_range_begin = part_with_ranges.ranges.front().begin; + + ColumnsWithTypeAndName pk_columns; + pk_columns.reserve(index->size()); + + for (size_t j = 0; j < index->size(); ++j) + { + auto column = primary_key.data_types[j]->createColumn()->cloneEmpty(); + column->insert((*(*index)[j])[mark_range_begin]); + pk_columns.push_back({std::move(column), primary_key.data_types[j], primary_key.column_names[j]}); + } + + Block pk_block(std::move(pk_columns)); + pipe.addSimpleTransform([&](const Block & header) { - return std::make_shared(header, - storage_snapshot->metadata->primary_key, - part_with_ranges.data_part->getIndex(), - part_with_ranges.ranges.front().begin); + return std::make_shared(header, pk_block); }); } diff --git a/src/Processors/Transforms/VirtualRowTransform.cpp b/src/Processors/Transforms/VirtualRowTransform.cpp index 9b904fc4ae2..92bf5ce3064 100644 --- a/src/Processors/Transforms/VirtualRowTransform.cpp +++ b/src/Processors/Transforms/VirtualRowTransform.cpp @@ -9,14 +9,10 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -VirtualRowTransform::VirtualRowTransform(const Block & header_, - const KeyDescription & primary_key_, - const IMergeTreeDataPart::Index & index_, - size_t mark_range_begin_) +VirtualRowTransform::VirtualRowTransform(const Block & header_, const Block & pk_block_) : IProcessor({header_}, {header_}) , input(inputs.front()), output(outputs.front()) - , header(header_), primary_key(primary_key_) - , index(index_), mark_range_begin(mark_range_begin_) + , header(header_), pk_block(pk_block_) { } @@ -89,29 +85,16 @@ void VirtualRowTransform::work() is_first = false; - /// Reorder the columns according to result_header - Columns ordered_columns; - ordered_columns.reserve(header.columns()); - for (size_t i = 0, j = 0; i < header.columns(); ++i) + Columns empty_columns; + empty_columns.reserve(header.columns()); + for (size_t i = 0; i < header.columns(); ++i) { const ColumnWithTypeAndName & type_and_name = header.getByPosition(i); - ColumnPtr current_column = type_and_name.type->createColumn(); - // ordered_columns.push_back(current_column->cloneResized(1)); - - if (j < index->size() && type_and_name.name == primary_key.column_names[j] - && type_and_name.type == primary_key.data_types[j]) - { - auto column = current_column->cloneEmpty(); - column->insert((*(*index)[j])[mark_range_begin]); - ordered_columns.push_back(std::move(column)); - ++j; - } - else - ordered_columns.push_back(current_column->cloneResized(1)); + empty_columns.push_back(type_and_name.type->createColumn()->cloneEmpty()); } - current_chunk.setColumns(ordered_columns, 1); - current_chunk.getChunkInfos().add(std::make_shared(0, true)); + current_chunk.setColumns(empty_columns, 0); + current_chunk.getChunkInfos().add(std::make_shared(0, pk_block)); } else { diff --git a/src/Processors/Transforms/VirtualRowTransform.h b/src/Processors/Transforms/VirtualRowTransform.h index b9f0cb46242..e3215393ad1 100644 --- a/src/Processors/Transforms/VirtualRowTransform.h +++ b/src/Processors/Transforms/VirtualRowTransform.h @@ -11,10 +11,7 @@ namespace DB class VirtualRowTransform : public IProcessor { public: - explicit VirtualRowTransform(const Block & header_, - const KeyDescription & primary_key_, - const IMergeTreeDataPart::Index & index_, - size_t mark_range_begin_); + explicit VirtualRowTransform(const Block & header_, const Block & pk_block_); String getName() const override { return "VirtualRowTransform"; } @@ -32,11 +29,7 @@ private: bool is_first = true; Block header; - KeyDescription primary_key; - /// PK index used in virtual row. - IMergeTreeDataPart::Index index; - /// The first range that might contain the candidate. - size_t mark_range_begin; + Block pk_block; }; } diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 85f545d2a51..cafe8dc3fbf 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -147,7 +147,7 @@ ChunkAndProgress MergeTreeSelectProcessor::read() auto chunk = Chunk(ordered_columns, res.row_count); if (add_part_level) - chunk.getChunkInfos().add(std::make_shared(task->getInfo().data_part->info.level, false)); + chunk.getChunkInfos().add(std::make_shared(task->getInfo().data_part->info.level)); return ChunkAndProgress{ .chunk = std::move(chunk), diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index c62326f82dd..835045735fe 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -267,7 +267,7 @@ try auto result = Chunk(std::move(res_columns), rows_read); if (add_part_level) - result.getChunkInfos().add(std::make_shared(data_part->info.level, false)); + result.getChunkInfos().add(std::make_shared(data_part->info.level)); return result; } } From 10ed5a8521da3eb91e9d7caaf4bd0bdb32bca25c Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Wed, 25 Sep 2024 17:48:16 +0000 Subject: [PATCH 047/171] fix --- src/Processors/Merges/Algorithms/MergeTreeReadInfo.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h index 425df2c24b9..98cb414875b 100644 --- a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -54,7 +54,7 @@ inline void setVirtualRow(Chunk & chunk, const Block & header) const ColumnWithTypeAndName & type_and_name = header.getByPosition(i); ColumnPtr current_column = type_and_name.type->createColumn(); - size_t pos = type_and_name.name.find_last_of("."); + size_t pos = type_and_name.name.find_last_of('.'); String column_name = (pos == String::npos) ? type_and_name.name : type_and_name.name.substr(pos + 1); const ColumnWithTypeAndName * column = pk_block.findByName(column_name, true); From bf591fa12b27f16411bf2441b06d1173616d34ba Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 26 Sep 2024 12:20:51 +0000 Subject: [PATCH 048/171] Introduce virtual row conversions. --- .../Merges/Algorithms/MergeTreeReadInfo.h | 40 +++-- .../Algorithms/MergingSortedAlgorithm.cpp | 7 +- .../Algorithms/MergingSortedAlgorithm.h | 5 +- .../Merges/MergingSortedTransform.cpp | 4 +- .../Merges/MergingSortedTransform.h | 1 + .../Optimizations/actionsDAGUtils.cpp | 2 + .../QueryPlan/Optimizations/actionsDAGUtils.h | 4 + .../Optimizations/distinctReadInOrder.cpp | 2 +- .../Optimizations/optimizeReadInOrder.cpp | 168 +++++++++++++----- .../QueryPlan/ReadFromMergeTree.cpp | 73 ++++---- src/Processors/QueryPlan/ReadFromMergeTree.h | 16 +- .../Transforms/MergeSortingTransform.cpp | 2 + .../Transforms/VirtualRowTransform.cpp | 8 +- .../Transforms/VirtualRowTransform.h | 4 +- src/Storages/ReadInOrderOptimizer.cpp | 2 +- src/Storages/SelectQueryInfo.h | 11 +- src/Storages/StorageMerge.cpp | 2 +- 17 files changed, 226 insertions(+), 125 deletions(-) diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h index 98cb414875b..62bbe3eac6e 100644 --- a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -2,6 +2,7 @@ #include #include +#include namespace DB { @@ -13,14 +14,15 @@ public: MergeTreeReadInfo() = delete; explicit MergeTreeReadInfo(size_t part_level) : origin_merge_tree_part_level(part_level) {} - explicit MergeTreeReadInfo(size_t part_level, const Block & pk_block_) : - origin_merge_tree_part_level(part_level), pk_block(pk_block_) {} + explicit MergeTreeReadInfo(size_t part_level, const Block & pk_block_, ExpressionActionsPtr virtual_row_conversions_) : + origin_merge_tree_part_level(part_level), pk_block(pk_block_), virtual_row_conversions(std::move(virtual_row_conversions_)) {} MergeTreeReadInfo(const MergeTreeReadInfo & other) = default; size_t origin_merge_tree_part_level = 0; /// If is virtual_row, block should not be empty. Block pk_block; + ExpressionActionsPtr virtual_row_conversions; }; inline size_t getPartLevelFromChunk(const Chunk & chunk) @@ -39,29 +41,33 @@ inline bool isVirtualRow(const Chunk & chunk) return false; } -inline void setVirtualRow(Chunk & chunk, const Block & header) +inline void setVirtualRow(Chunk & chunk, bool apply_virtual_row_conversions) { - const auto read_info = chunk.getChunkInfos().get(); + auto read_info = chunk.getChunkInfos().extract(); chassert(read_info); - const Block & pk_block = read_info->pk_block; + Block & pk_block = read_info->pk_block; + if (apply_virtual_row_conversions) + read_info->virtual_row_conversions->execute(pk_block); - Columns ordered_columns; - ordered_columns.reserve(header.columns()); + chunk.setColumns(pk_block.getColumns(), 1); - for (size_t i = 0; i < header.columns(); ++i) - { - const ColumnWithTypeAndName & type_and_name = header.getByPosition(i); - ColumnPtr current_column = type_and_name.type->createColumn(); + // Columns ordered_columns; + // ordered_columns.reserve(pk_block.columns()); - size_t pos = type_and_name.name.find_last_of('.'); - String column_name = (pos == String::npos) ? type_and_name.name : type_and_name.name.substr(pos + 1); + // for (size_t i = 0; i < header.columns(); ++i) + // { + // const ColumnWithTypeAndName & type_and_name = header.getByPosition(i); + // ColumnPtr current_column = type_and_name.type->createColumn(); - const ColumnWithTypeAndName * column = pk_block.findByName(column_name, true); - ordered_columns.push_back(column ? column->column : current_column->cloneResized(1)); - } + // size_t pos = type_and_name.name.find_last_of('.'); + // String column_name = (pos == String::npos) ? type_and_name.name : type_and_name.name.substr(pos + 1); - chunk.setColumns(ordered_columns, 1); + // const ColumnWithTypeAndName * column = pk_block.findByName(column_name, true); + // ordered_columns.push_back(column ? column->column : current_column->cloneResized(1)); + // } + + // chunk.setColumns(ordered_columns, 1); } } diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 75c04c8ddb2..0dd95729ba3 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -22,12 +22,14 @@ MergingSortedAlgorithm::MergingSortedAlgorithm( SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_, WriteBuffer * out_row_sources_buf_, - bool use_average_block_sizes) + bool use_average_block_sizes, + bool apply_virtual_row_conversions_) : header(std::move(header_)) , merged_data(use_average_block_sizes, max_block_size_, max_block_size_bytes_) , description(description_) , limit(limit_) , out_row_sources_buf(out_row_sources_buf_) + , apply_virtual_row_conversions(apply_virtual_row_conversions_) , current_inputs(num_inputs) , sorting_queue_strategy(sorting_queue_strategy_) , cursors(num_inputs) @@ -60,7 +62,8 @@ void MergingSortedAlgorithm::initialize(Inputs inputs) if (!isVirtualRow(input.chunk)) continue; - setVirtualRow(input.chunk, header); + setVirtualRow(input.chunk, apply_virtual_row_conversions); + input.skip_last_row = true; } removeConstAndSparse(inputs); diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h index c889668a38e..0a99b1bd8a6 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h @@ -22,7 +22,8 @@ public: SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_ = 0, WriteBuffer * out_row_sources_buf_ = nullptr, - bool use_average_block_sizes = false); + bool use_average_block_sizes = false, + bool apply_virtual_row_conversions_ = true); void addInput(); @@ -47,6 +48,8 @@ private: /// If it is not nullptr then it should be populated during execution WriteBuffer * out_row_sources_buf = nullptr; + bool apply_virtual_row_conversions; + /// Chunks currently being merged. Inputs current_inputs; diff --git a/src/Processors/Merges/MergingSortedTransform.cpp b/src/Processors/Merges/MergingSortedTransform.cpp index d2895a2a2e9..760108facb6 100644 --- a/src/Processors/Merges/MergingSortedTransform.cpp +++ b/src/Processors/Merges/MergingSortedTransform.cpp @@ -22,6 +22,7 @@ MergingSortedTransform::MergingSortedTransform( bool always_read_till_end_, WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes, + bool apply_virtual_row_conversions, bool have_all_inputs_) : IMergingTransform( num_inputs, @@ -38,7 +39,8 @@ MergingSortedTransform::MergingSortedTransform( sorting_queue_strategy, limit_, out_row_sources_buf_, - use_average_block_sizes) + use_average_block_sizes, + apply_virtual_row_conversions) { } diff --git a/src/Processors/Merges/MergingSortedTransform.h b/src/Processors/Merges/MergingSortedTransform.h index 6e52450efa7..220ecf0902a 100644 --- a/src/Processors/Merges/MergingSortedTransform.h +++ b/src/Processors/Merges/MergingSortedTransform.h @@ -22,6 +22,7 @@ public: bool always_read_till_end_ = false, WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false, + bool apply_virtual_row_conversions = true, bool have_all_inputs_ = true); String getName() const override { return "MergingSortedTransform"; } diff --git a/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.cpp b/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.cpp index 2f1618ea6e1..b8216d6c4c4 100644 --- a/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.cpp +++ b/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.cpp @@ -210,6 +210,8 @@ MatchedTrees::Matches matchTrees(const ActionsDAG::NodeRawConstPtrs & inner_dag, MatchedTrees::Monotonicity monotonicity; monotonicity.direction *= info.is_positive ? 1 : -1; monotonicity.strict = info.is_strict; + monotonicity.child_match = &child_match; + monotonicity.child_node = monotonic_child; if (child_match.monotonicity) { diff --git a/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.h b/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.h index e78d658978e..82f0962f799 100644 --- a/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.h +++ b/src/Processors/QueryPlan/Optimizations/actionsDAGUtils.h @@ -22,12 +22,16 @@ namespace DB /// DAG for PK does not contain aliases and ambiguous nodes. struct MatchedTrees { + struct Match; + /// Monotonicity is calculated for monotonic functions chain. /// Chain is not strict if there is any non-strict monotonic function. struct Monotonicity { int direction = 1; bool strict = true; + const Match * child_match = nullptr; + const ActionsDAG::Node * child_node = nullptr; }; struct Match diff --git a/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp index 37e61a6c388..5af680b42b8 100644 --- a/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/distinctReadInOrder.cpp @@ -129,7 +129,7 @@ size_t tryDistinctReadInOrder(QueryPlan::Node * parent_node) /// update input order info in read_from_merge_tree step const int direction = 0; /// for DISTINCT direction doesn't matter, ReadFromMergeTree will choose proper one - bool can_read = read_from_merge_tree->requestReadingInOrder(number_of_sorted_distinct_columns, direction, pre_distinct->getLimitHint()); + bool can_read = read_from_merge_tree->requestReadingInOrder(number_of_sorted_distinct_columns, direction, pre_distinct->getLimitHint(), {}); if (!can_read) return 0; diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index d3ecb3cac6b..8cd0a634a1e 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -94,17 +94,6 @@ static QueryPlan::Node * findReadingStep(QueryPlan::Node & node, StepStack & bac return nullptr; } -static bool checkVirtualRowSupport(const StepStack & backward_path) -{ - for (size_t i = 0; i < backward_path.size() - 1; i++) - { - IQueryPlanStep * step = backward_path[i]; - if (!typeid_cast(step) && !typeid_cast(step)) - return false; - } - return true; -} - void updateStepsDataStreams(StepStack & steps_to_update) { /// update data stream's sorting properties for found transforms @@ -338,11 +327,42 @@ void enrichFixedColumns(const ActionsDAG & dag, FixedColumns & fixed_columns) } } -InputOrderInfoPtr buildInputOrderInfo( +static const ActionsDAG::Node * addMonotonicChain(ActionsDAG & dag, const ActionsDAG::Node * node, const MatchedTrees::Match * match) +{ + if (!match->monotonicity) + return &dag.addInput(node->result_name, node->result_type); + + if (node->type == ActionsDAG::ActionType::ALIAS) + return &dag.addAlias(*addMonotonicChain(dag, node->children.front(), match), node->result_name); + + ActionsDAG::NodeRawConstPtrs args; + args.reserve(node->children.size()); + for (const auto * child : node->children) + { + if (child == match->monotonicity->child_node) + args.push_back(addMonotonicChain(dag, match->monotonicity->child_node, match->monotonicity->child_match)); + else + args.push_back(&dag.addColumn({child->column, child->result_type, child->result_name})); + } + + return &dag.addFunction(node->function_base, std::move(args), {}); +} + +struct SortingInputOrder +{ + InputOrderInfoPtr input_order{}; + /// This is needed for virtual row optimization. + /// Convert the PR values to ORDER BY key. + /// If empty, the optimization cannot be applied. + std::optional virtual_row_conversion{}; +}; + +SortingInputOrder buildInputOrderInfo( const FixedColumns & fixed_columns, const std::optional & dag, const SortDescription & description, const KeyDescription & sorting_key, + const Names & pk_column_names, size_t limit) { //std::cerr << "------- buildInputOrderInfo " << std::endl; @@ -381,7 +401,18 @@ InputOrderInfoPtr buildInputOrderInfo( int read_direction = 0; size_t next_description_column = 0; size_t next_sort_key = 0; - bool first_prefix_fixed = false; + + bool can_optimize_virtual_row = true; + + struct MatchInfo + { + const ActionsDAG::Node * source = nullptr; + const ActionsDAG::Node * fixed_column = nullptr; + const MatchedTrees::Match * monotonic = nullptr; + }; + + std::vector match_infos; + match_infos.reserve(description.size()); while (next_description_column < description.size() && next_sort_key < sorting_key.column_names.size()) { @@ -424,6 +455,7 @@ InputOrderInfoPtr buildInputOrderInfo( //std::cerr << "====== (no dag) Found direct match" << std::endl; + match_infos.push_back({.source = sort_column_node}); ++next_description_column; ++next_sort_key; } @@ -452,27 +484,46 @@ InputOrderInfoPtr buildInputOrderInfo( { current_direction *= match.monotonicity->direction; strict_monotonic = match.monotonicity->strict; + match_infos.push_back({.source = sort_node, .monotonic = &match}); } + else + match_infos.push_back({.source = sort_node}); ++next_description_column; ++next_sort_key; } else if (fixed_key_columns.contains(sort_column_node)) { + if (next_sort_key == 0) - first_prefix_fixed = true; + { + // Disable virtual row optimization. + // For example, when pk is (a,b), a = 1, order by b, virtual row should be + // disabled in the following case: + // 1st part (0, 100), (1, 2), (1, 3), (1, 4) + // 2nd part (0, 100), (1, 2), (1, 3), (1, 4). + + can_optimize_virtual_row = true; + } //std::cerr << "+++++++++ Found fixed key by match" << std::endl; ++next_sort_key; } else { - //std::cerr << "====== Check for fixed const : " << bool(sort_node->column) << " fixed : " << fixed_columns.contains(sort_node) << std::endl; bool is_fixed_column = sort_node->column || fixed_columns.contains(sort_node); if (!is_fixed_column) break; + if (!sort_node->column) + /// Virtual row for fixed column from order by is not supported now. + /// TODO: we can do it for the simple case, + /// But it's better to remove fixed columns from ORDER BY completely, e.g: + /// WHERE x = 42 ORDER BY x, y => WHERE x = 42 ORDER BY y + can_optimize_virtual_row = false; + + match_infos.push_back({.source = sort_node, .fixed_column = sort_node}); order_key_prefix_descr.push_back(sort_column_description); ++next_description_column; } @@ -494,9 +545,36 @@ InputOrderInfoPtr buildInputOrderInfo( } if (read_direction == 0 || order_key_prefix_descr.empty()) - return nullptr; + return {}; - return std::make_shared(order_key_prefix_descr, next_sort_key, read_direction, limit, first_prefix_fixed); + /// If the prefix description is used, we can't restore the full description from PK value. + /// TODO: partial sort description can be used as well. Implement support later. + if (order_key_prefix_descr.size() < description.size() || pk_column_names.size() < next_sort_key) + can_optimize_virtual_row = false; + + auto order_info = std::make_shared(order_key_prefix_descr, next_sort_key, read_direction, limit); + + std::optional virtual_row_conversion; + if (can_optimize_virtual_row) + { + ActionsDAG virtual_row_dag; + virtual_row_dag.getOutputs().reserve(match_infos.size()); + for (const auto & info : match_infos) + { + const ActionsDAG::Node * output; + if (info.fixed_column) + output = &virtual_row_dag.addColumn({info.fixed_column->column, info.fixed_column->result_type, info.fixed_column->result_name}); + else if (info.monotonic) + output = addMonotonicChain(virtual_row_dag, info.source, info.monotonic); + else + output = &virtual_row_dag.addInput(info.source->result_name, info.source->result_type); + + virtual_row_dag.getOutputs().push_back(output); + } + virtual_row_conversion = std::move(virtual_row_dag); + } + + return {std::move(order_info), std::move(virtual_row_conversion)}; } /// We really need three different sort descriptions here. @@ -700,11 +778,11 @@ AggregationInputOrder buildInputOrderInfo( for (const auto & key : not_matched_group_by_keys) group_by_sort_description.emplace_back(SortColumnDescription(std::string(key))); - auto input_order = std::make_shared(order_key_prefix_descr, next_sort_key, /*read_direction*/ 1, /* limit */ 0, false); + auto input_order = std::make_shared(order_key_prefix_descr, next_sort_key, /*read_direction*/ 1, /* limit */ 0); return { std::move(input_order), std::move(sort_description_for_merging), std::move(group_by_sort_description) }; } -InputOrderInfoPtr buildInputOrderInfo( +SortingInputOrder buildInputOrderInfo( const ReadFromMergeTree * reading, const FixedColumns & fixed_columns, const std::optional & dag, @@ -712,15 +790,17 @@ InputOrderInfoPtr buildInputOrderInfo( size_t limit) { const auto & sorting_key = reading->getStorageMetadata()->getSortingKey(); + const auto & pk_column_names = reading->getStorageMetadata()->getPrimaryKey().column_names; return buildInputOrderInfo( fixed_columns, dag, description, sorting_key, + pk_column_names, limit); } -InputOrderInfoPtr buildInputOrderInfo( +SortingInputOrder buildInputOrderInfo( ReadFromMerge * merge, const FixedColumns & fixed_columns, const std::optional & dag, @@ -729,28 +809,31 @@ InputOrderInfoPtr buildInputOrderInfo( { const auto & tables = merge->getSelectedTables(); - InputOrderInfoPtr order_info; + SortingInputOrder order_info; for (const auto & table : tables) { auto storage = std::get(table); - const auto & sorting_key = storage->getInMemoryMetadataPtr()->getSortingKey(); + auto metadata = storage->getInMemoryMetadataPtr(); + const auto & sorting_key = metadata->getSortingKey(); + // const auto & pk_column_names = metadata->getPrimaryKey().column_names; if (sorting_key.column_names.empty()) - return nullptr; + return {}; auto table_order_info = buildInputOrderInfo( fixed_columns, dag, description, sorting_key, + {}, limit); - if (!table_order_info) - return nullptr; + if (!table_order_info.input_order) + return {}; - if (!order_info) - order_info = table_order_info; - else if (*order_info != *table_order_info) - return nullptr; + if (!order_info.input_order) + order_info = std::move(table_order_info); + else if (*order_info.input_order != *table_order_info.input_order) + return {}; } return order_info; @@ -830,19 +913,19 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n dag, description, limit); - if (order_info) + if (order_info.input_order) { - bool can_read = reading->requestReadingInOrder(order_info->used_prefix_of_sorting_key_size, order_info->direction, order_info->limit); + bool can_read = reading->requestReadingInOrder( + order_info.input_order->used_prefix_of_sorting_key_size, + order_info.input_order->direction, + order_info.input_order->limit, + std::move(order_info.virtual_row_conversion)); + if (!can_read) return nullptr; - - if (!checkVirtualRowSupport(backward_path)) - reading->setVirtualRowStatus(ReadFromMergeTree::VirtualRowStatus::No); - else if (!order_info->first_prefix_fixed) - reading->setVirtualRowStatus(ReadFromMergeTree::VirtualRowStatus::Possible); } - return order_info; + return order_info.input_order; } else if (auto * merge = typeid_cast(reading_node->step.get())) { @@ -852,14 +935,14 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n dag, description, limit); - if (order_info) + if (order_info.input_order) { - bool can_read = merge->requestReadingInOrder(order_info); + bool can_read = merge->requestReadingInOrder(order_info.input_order); if (!can_read) return nullptr; } - return order_info; + return order_info.input_order; } return nullptr; @@ -893,7 +976,8 @@ AggregationInputOrder buildInputOrderInfo(AggregatingStep & aggregating, QueryPl bool can_read = reading->requestReadingInOrder( order_info.input_order->used_prefix_of_sorting_key_size, order_info.input_order->direction, - order_info.input_order->limit); + order_info.input_order->limit, + {}); if (!can_read) return {}; } @@ -1139,7 +1223,7 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, if (order_info) { - bool can_read = read_from_merge_tree->requestReadingInOrder(order_info->used_prefix_of_sorting_key_size, order_info->direction, order_info->limit); + bool can_read = read_from_merge_tree->requestReadingInOrder(order_info->used_prefix_of_sorting_key_size, order_info->direction, order_info->limit, {}); if (!can_read) return 0; sorting->convertToFinishSorting(order_info->sort_description_for_merging, false); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 4b5e33e8b07..f4783862a50 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -549,8 +549,7 @@ Pipe ReadFromMergeTree::readInOrder( Names required_columns, PoolSettings pool_settings, ReadType read_type, - UInt64 read_limit, - bool enable_current_virtual_row) + UInt64 read_limit) { /// For reading in order it makes sense to read only /// one range per task to reduce number of read rows. @@ -661,7 +660,7 @@ Pipe ReadFromMergeTree::readInOrder( Pipe pipe(source); - if (enable_current_virtual_row && (read_type == ReadType::InOrder)) + if (virtual_row_conversion && (read_type == ReadType::InOrder)) { const auto & index = part_with_ranges.data_part->getIndex(); const auto & primary_key = storage_snapshot->metadata->primary_key; @@ -681,7 +680,7 @@ Pipe ReadFromMergeTree::readInOrder( pipe.addSimpleTransform([&](const Block & header) { - return std::make_shared(header, pk_block); + return std::make_shared(header, pk_block, virtual_row_conversion); }); } @@ -729,7 +728,7 @@ Pipe ReadFromMergeTree::read( if (read_type == ReadType::Default && (max_streams > 1 || checkAllPartsOnRemoteFS(parts_with_range))) return readFromPool(std::move(parts_with_range), std::move(required_columns), std::move(pool_settings)); - auto pipe = readInOrder(parts_with_range, required_columns, pool_settings, read_type, /*limit=*/ 0, false); + auto pipe = readInOrder(parts_with_range, required_columns, pool_settings, read_type, /*limit=*/ 0); /// Use ConcatProcessor to concat sources together. /// It is needed to read in parts order (and so in PK order) if single thread is used. @@ -1038,7 +1037,7 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( /// For parallel replicas the split will be performed on the initiator side. if (is_parallel_reading_from_replicas) { - pipes.emplace_back(readInOrder(std::move(parts_with_ranges), column_names, pool_settings, read_type, input_order_info->limit, false)); + pipes.emplace_back(readInOrder(std::move(parts_with_ranges), column_names, pool_settings, read_type, input_order_info->limit)); } else { @@ -1111,33 +1110,32 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( splitted_parts_and_ranges.emplace_back(std::move(new_parts)); } - bool primary_key_type_supports_virtual_row = true; - const auto & actions = storage_snapshot->metadata->getPrimaryKey().expression->getActions(); - for (const auto & action : actions) - { - if (action.node->type != ActionsDAG::ActionType::INPUT) - { - primary_key_type_supports_virtual_row = false; - break; - } - } + // bool primary_key_type_supports_virtual_row = true; + // const auto & actions = storage_snapshot->metadata->getPrimaryKey().expression->getActions(); + // for (const auto & action : actions) + // { + // if (action.node->type != ActionsDAG::ActionType::INPUT) + // { + // primary_key_type_supports_virtual_row = false; + // break; + // } + // } - /// If possible in the optimization stage, check whether there are more than one branch. - if (virtual_row_status == VirtualRowStatus::Possible) - virtual_row_status = splitted_parts_and_ranges.size() > 1 - || (splitted_parts_and_ranges.size() == 1 && splitted_parts_and_ranges[0].size() > 1) - ? VirtualRowStatus::Yes : VirtualRowStatus::NoConsiderInLogicalPlan; + // /// If possible in the optimization stage, check whether there are more than one branch. + // if (virtual_row_status == VirtualRowStatus::Possible) + // virtual_row_status = splitted_parts_and_ranges.size() > 1 + // || (splitted_parts_and_ranges.size() == 1 && splitted_parts_and_ranges[0].size() > 1) + // ? VirtualRowStatus::Yes : VirtualRowStatus::NoConsiderInLogicalPlan; for (auto && item : splitted_parts_and_ranges) { - bool enable_current_virtual_row = false; - if (virtual_row_status == VirtualRowStatus::Yes) - enable_current_virtual_row = true; - else if (virtual_row_status == VirtualRowStatus::NoConsiderInLogicalPlan) - enable_current_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; + // bool enable_current_virtual_row = false; + // if (virtual_row_status == VirtualRowStatus::Yes) + // enable_current_virtual_row = true; + // else if (virtual_row_status == VirtualRowStatus::NoConsiderInLogicalPlan) + // enable_current_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; - pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit, - enable_current_virtual_row && primary_key_type_supports_virtual_row)); + pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit)); } } @@ -1172,7 +1170,8 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( if (pipe.numOutputPorts() > 1) { auto transform = std::make_shared( - pipe.getHeader(), pipe.numOutputPorts(), sort_description, block_size.max_block_size_rows, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch); + pipe.getHeader(), pipe.numOutputPorts(), sort_description, block_size.max_block_size_rows, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, + 0, false, nullptr, false, /*apply_virtual_row_conversions*/ false); pipe.addTransform(std::move(transform)); } @@ -1811,7 +1810,7 @@ ReadFromMergeTree::AnalysisResultPtr ReadFromMergeTree::selectRangesToRead( return std::make_shared(std::move(result)); } -bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, size_t read_limit) +bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, size_t read_limit, std::optional virtual_row_conversion_) { /// if dirction is not set, use current one if (!direction) @@ -1822,7 +1821,7 @@ bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, if (direction != 1 && query_info.isFinal()) return false; - query_info.input_order_info = std::make_shared(SortDescription{}, prefix_size, direction, read_limit, false); + query_info.input_order_info = std::make_shared(SortDescription{}, prefix_size, direction, read_limit); reader_settings.read_in_order = true; /// In case or read-in-order, don't create too many reading streams. @@ -1855,6 +1854,9 @@ bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, /// Let prefer in-order optimization over vertical FINAL for now enable_vertical_final = false; + if (virtual_row_conversion_) + virtual_row_conversion = std::make_shared(std::move(*virtual_row_conversion_)); + return true; } @@ -2305,6 +2307,12 @@ void ReadFromMergeTree::describeActions(FormatSettings & format_settings) const expression->describeActions(format_settings.out, prefix); } } + + if (virtual_row_conversion) + { + format_settings.out << prefix << "Virtual row conversions" << '\n'; + virtual_row_conversion->describeActions(format_settings.out, prefix); + } } void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const @@ -2344,6 +2352,9 @@ void ReadFromMergeTree::describeActions(JSONBuilder::JSONMap & map) const map.add("Prewhere info", std::move(prewhere_info_map)); } + + if (virtual_row_conversion) + map.add("Virtual row conversions", virtual_row_conversion->toTree()); } void ReadFromMergeTree::describeIndexes(FormatSettings & format_settings) const diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index 767fcf3b0f8..e20c06aeb53 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -108,14 +108,6 @@ public: using AnalysisResultPtr = std::shared_ptr; - enum class VirtualRowStatus - { - NoConsiderInLogicalPlan, - Possible, - No, - Yes, - }; - ReadFromMergeTree( MergeTreeData::DataPartsVector parts_, MergeTreeData::MutationsSnapshotPtr mutations_snapshot_, @@ -195,7 +187,7 @@ public: StorageMetadataPtr getStorageMetadata() const { return storage_snapshot->metadata; } /// Returns `false` if requested reading cannot be performed. - bool requestReadingInOrder(size_t prefix_size, int direction, size_t limit); + bool requestReadingInOrder(size_t prefix_size, int direction, size_t limit, std::optional virtual_row_conversion_); bool readsInOrder() const; void updatePrewhereInfo(const PrewhereInfoPtr & prewhere_info_value) override; @@ -218,8 +210,6 @@ public: void applyFilters(ActionDAGNodes added_filter_nodes) override; - void setVirtualRowStatus(VirtualRowStatus virtual_row_status_) { virtual_row_status = virtual_row_status_; } - private: int getSortDirection() const { @@ -262,7 +252,7 @@ private: Pipe read(RangesInDataParts parts_with_range, Names required_columns, ReadType read_type, size_t max_streams, size_t min_marks_for_concurrent_read, bool use_uncompressed_cache); Pipe readFromPool(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); Pipe readFromPoolParallelReplicas(RangesInDataParts parts_with_range, Names required_columns, PoolSettings pool_settings); - Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit, bool enable_current_virtual_row); + Pipe readInOrder(RangesInDataParts parts_with_ranges, Names required_columns, PoolSettings pool_settings, ReadType read_type, UInt64 limit); Pipe spreadMarkRanges(RangesInDataParts && parts_with_ranges, size_t num_streams, AnalysisResult & result, std::optional & result_projection); @@ -293,7 +283,7 @@ private: bool enable_vertical_final = false; bool enable_remove_parts_from_snapshot_optimization = true; - VirtualRowStatus virtual_row_status = VirtualRowStatus::NoConsiderInLogicalPlan; + ExpressionActionsPtr virtual_row_conversion; std::optional number_of_current_replica; }; diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index c45192e7118..6121a847ca8 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -187,6 +187,7 @@ void MergeSortingTransform::consume(Chunk chunk) { bool have_all_inputs = false; bool use_average_block_sizes = false; + bool apply_virtual_row = false; external_merging_sorted = std::make_shared( header_without_constants, @@ -199,6 +200,7 @@ void MergeSortingTransform::consume(Chunk chunk) /*always_read_till_end_=*/ false, nullptr, use_average_block_sizes, + apply_virtual_row, have_all_inputs); processors.emplace_back(external_merging_sorted); diff --git a/src/Processors/Transforms/VirtualRowTransform.cpp b/src/Processors/Transforms/VirtualRowTransform.cpp index 92bf5ce3064..5f2bf0b0788 100644 --- a/src/Processors/Transforms/VirtualRowTransform.cpp +++ b/src/Processors/Transforms/VirtualRowTransform.cpp @@ -9,10 +9,11 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -VirtualRowTransform::VirtualRowTransform(const Block & header_, const Block & pk_block_) +VirtualRowTransform::VirtualRowTransform(const Block & header_, const Block & pk_block_, ExpressionActionsPtr virtual_row_conversions_) : IProcessor({header_}, {header_}) , input(inputs.front()), output(outputs.front()) - , header(header_), pk_block(pk_block_) + , pk_block(pk_block_) + , virtual_row_conversions(std::move(virtual_row_conversions_)) { } @@ -86,6 +87,7 @@ void VirtualRowTransform::work() is_first = false; Columns empty_columns; + const auto & header = getOutputs().front().getHeader(); empty_columns.reserve(header.columns()); for (size_t i = 0; i < header.columns(); ++i) { @@ -94,7 +96,7 @@ void VirtualRowTransform::work() } current_chunk.setColumns(empty_columns, 0); - current_chunk.getChunkInfos().add(std::make_shared(0, pk_block)); + current_chunk.getChunkInfos().add(std::make_shared(0, pk_block, virtual_row_conversions)); } else { diff --git a/src/Processors/Transforms/VirtualRowTransform.h b/src/Processors/Transforms/VirtualRowTransform.h index e3215393ad1..efc54419a6e 100644 --- a/src/Processors/Transforms/VirtualRowTransform.h +++ b/src/Processors/Transforms/VirtualRowTransform.h @@ -11,7 +11,7 @@ namespace DB class VirtualRowTransform : public IProcessor { public: - explicit VirtualRowTransform(const Block & header_, const Block & pk_block_); + explicit VirtualRowTransform(const Block & header_, const Block & pk_block_, ExpressionActionsPtr virtual_row_conversions_); String getName() const override { return "VirtualRowTransform"; } @@ -28,8 +28,8 @@ private: bool can_generate = true; bool is_first = true; - Block header; Block pk_block; + ExpressionActionsPtr virtual_row_conversions; }; } diff --git a/src/Storages/ReadInOrderOptimizer.cpp b/src/Storages/ReadInOrderOptimizer.cpp index ea7ea218feb..9c8c4c2fe79 100644 --- a/src/Storages/ReadInOrderOptimizer.cpp +++ b/src/Storages/ReadInOrderOptimizer.cpp @@ -249,7 +249,7 @@ InputOrderInfoPtr ReadInOrderOptimizer::getInputOrderImpl( if (sort_description_for_merging.empty()) return {}; - return std::make_shared(std::move(sort_description_for_merging), key_pos, read_direction, limit, false); + return std::make_shared(std::move(sort_description_for_merging), key_pos, read_direction, limit); } InputOrderInfoPtr ReadInOrderOptimizer::getInputOrder( diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index bf1229f7a3a..7ad6a733c6f 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -119,22 +119,13 @@ struct InputOrderInfo const int direction; const UInt64 limit; - /** For virtual row optimization only - * for example, when pk is (a,b), a = 1, order by b, virtual row should be - * disabled in the following case: - * 1st part (0, 100), (1, 2), (1, 3), (1, 4) - * 2nd part (0, 100), (1, 2), (1, 3), (1, 4). - */ - bool first_prefix_fixed; - InputOrderInfo( const SortDescription & sort_description_for_merging_, size_t used_prefix_of_sorting_key_size_, - int direction_, UInt64 limit_, bool first_prefix_fixed_) + int direction_, UInt64 limit_) : sort_description_for_merging(sort_description_for_merging_) , used_prefix_of_sorting_key_size(used_prefix_of_sorting_key_size_) , direction(direction_), limit(limit_) - , first_prefix_fixed(first_prefix_fixed_) { } diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index f40aa8ae4e8..40713a89f30 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -1555,7 +1555,7 @@ bool ReadFromMerge::requestReadingInOrder(InputOrderInfoPtr order_info_) auto request_read_in_order = [order_info_](ReadFromMergeTree & read_from_merge_tree) { return read_from_merge_tree.requestReadingInOrder( - order_info_->used_prefix_of_sorting_key_size, order_info_->direction, order_info_->limit); + order_info_->used_prefix_of_sorting_key_size, order_info_->direction, order_info_->limit, {}); }; bool ok = true; From 7feda9a05413fedf681d3fe1e229bf9e5ab434ef Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 26 Sep 2024 15:27:57 +0000 Subject: [PATCH 049/171] Fix 03031_read_in_order_optimization_with_virtual_row --- .../Merges/Algorithms/MergeTreeReadInfo.h | 51 +++++++++++++------ .../Algorithms/MergingSortedAlgorithm.cpp | 3 +- .../Optimizations/optimizeReadInOrder.cpp | 24 ++++++--- .../QueryPlan/ReadFromMergeTree.cpp | 5 +- 4 files changed, 57 insertions(+), 26 deletions(-) diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h index 62bbe3eac6e..a4baaca215b 100644 --- a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -7,6 +7,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + /// To carry part level and virtual row if chunk is produced by a merge tree source class MergeTreeReadInfo : public ChunkInfoCloneable { @@ -41,33 +46,49 @@ inline bool isVirtualRow(const Chunk & chunk) return false; } -inline void setVirtualRow(Chunk & chunk, bool apply_virtual_row_conversions) +inline void setVirtualRow(Chunk & chunk, const Block & header, bool apply_virtual_row_conversions) { - auto read_info = chunk.getChunkInfos().extract(); + auto read_info = chunk.getChunkInfos().get(); chassert(read_info); Block & pk_block = read_info->pk_block; + + // std::cerr << apply_virtual_row_conversions << std::endl; + // std::cerr << read_info->virtual_row_conversions->dumpActions() << std::endl; + if (apply_virtual_row_conversions) read_info->virtual_row_conversions->execute(pk_block); - chunk.setColumns(pk_block.getColumns(), 1); + // std::cerr << "++++" << pk_block.dumpStructure() << std::endl; - // Columns ordered_columns; - // ordered_columns.reserve(pk_block.columns()); + Columns ordered_columns; + ordered_columns.reserve(pk_block.columns()); - // for (size_t i = 0; i < header.columns(); ++i) - // { - // const ColumnWithTypeAndName & type_and_name = header.getByPosition(i); - // ColumnPtr current_column = type_and_name.type->createColumn(); + for (size_t i = 0; i < header.columns(); ++i) + { + const ColumnWithTypeAndName & col = header.getByPosition(i); + if (const auto * pk_col = pk_block.findByName(col.name)) + { + if (!col.type->equals(*pk_col->type)) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Virtual row has different tupe for {}. Expected {}, got {}", + col.name, col.dumpStructure(), pk_col->dumpStructure()); - // size_t pos = type_and_name.name.find_last_of('.'); - // String column_name = (pos == String::npos) ? type_and_name.name : type_and_name.name.substr(pos + 1); + ordered_columns.push_back(pk_col->column); + } + else + ordered_columns.push_back(col.type->createColumnConstWithDefaultValue(1)); - // const ColumnWithTypeAndName * column = pk_block.findByName(column_name, true); - // ordered_columns.push_back(column ? column->column : current_column->cloneResized(1)); - // } + // ColumnPtr current_column = type_and_name.type->createColumn(); - // chunk.setColumns(ordered_columns, 1); + // size_t pos = type_and_name.name.find_last_of('.'); + // String column_name = (pos == String::npos) ? type_and_name.name : type_and_name.name.substr(pos + 1); + + // const ColumnWithTypeAndName * column = pk_block.findByName(column_name, true); + // ordered_columns.push_back(column ? column->column : current_column->cloneResized(1)); + } + + chunk.setColumns(ordered_columns, 1); } } diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 0dd95729ba3..011f713744b 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -62,8 +62,7 @@ void MergingSortedAlgorithm::initialize(Inputs inputs) if (!isVirtualRow(input.chunk)) continue; - setVirtualRow(input.chunk, apply_virtual_row_conversions); - input.skip_last_row = true; + setVirtualRow(input.chunk, header, apply_virtual_row_conversions); } removeConstAndSparse(inputs); diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 49ce9a0280d..5396cced6c1 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -339,20 +339,20 @@ void enrichFixedColumns(const ActionsDAG & dag, FixedColumns & fixed_columns) } } -static const ActionsDAG::Node * addMonotonicChain(ActionsDAG & dag, const ActionsDAG::Node * node, const MatchedTrees::Match * match) +static const ActionsDAG::Node * addMonotonicChain(ActionsDAG & dag, const ActionsDAG::Node * node, const MatchedTrees::Match * match, const std::string & input_name) { if (!match->monotonicity) - return &dag.addInput(node->result_name, node->result_type); + return &dag.addInput(input_name, node->result_type); if (node->type == ActionsDAG::ActionType::ALIAS) - return &dag.addAlias(*addMonotonicChain(dag, node->children.front(), match), node->result_name); + return &dag.addAlias(*addMonotonicChain(dag, node->children.front(), match, input_name), node->result_name); ActionsDAG::NodeRawConstPtrs args; args.reserve(node->children.size()); for (const auto * child : node->children) { if (child == match->monotonicity->child_node) - args.push_back(addMonotonicChain(dag, match->monotonicity->child_node, match->monotonicity->child_match)); + args.push_back(addMonotonicChain(dag, match->monotonicity->child_node, match->monotonicity->child_match, input_name)); else args.push_back(&dag.addColumn({child->column, child->result_type, child->result_name})); } @@ -571,15 +571,25 @@ SortingInputOrder buildInputOrderInfo( { ActionsDAG virtual_row_dag; virtual_row_dag.getOutputs().reserve(match_infos.size()); + size_t next_pk_name = 0; for (const auto & info : match_infos) { const ActionsDAG::Node * output; if (info.fixed_column) output = &virtual_row_dag.addColumn({info.fixed_column->column, info.fixed_column->result_type, info.fixed_column->result_name}); - else if (info.monotonic) - output = addMonotonicChain(virtual_row_dag, info.source, info.monotonic); else - output = &virtual_row_dag.addInput(info.source->result_name, info.source->result_type); + { + if (info.monotonic) + output = addMonotonicChain(virtual_row_dag, info.source, info.monotonic, pk_column_names[next_pk_name]); + else + { + output = &virtual_row_dag.addInput(pk_column_names[next_pk_name], info.source->result_type); + if (pk_column_names[next_pk_name] != info.source->result_name) + output = &virtual_row_dag.addAlias(*output, info.source->result_name); + } + + ++next_pk_name; + } virtual_row_dag.getOutputs().push_back(output); } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 38f018d34ee..c6fc924d7a7 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -701,9 +701,10 @@ Pipe ReadFromMergeTree::readInOrder( size_t mark_range_begin = part_with_ranges.ranges.front().begin; ColumnsWithTypeAndName pk_columns; - pk_columns.reserve(index->size()); + size_t num_columns = virtual_row_conversion->getSampleBlock().columns(); + pk_columns.reserve(num_columns); - for (size_t j = 0; j < index->size(); ++j) + for (size_t j = 0; j < num_columns; ++j) { auto column = primary_key.data_types[j]->createColumn()->cloneEmpty(); column->insert((*(*index)[j])[mark_range_begin]); From d5c0c499df1c80d94c3248394f8d8e271003d8fc Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 26 Sep 2024 16:01:47 +0000 Subject: [PATCH 050/171] Fix PK size. --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index c6fc924d7a7..b16a460ec68 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -701,7 +701,7 @@ Pipe ReadFromMergeTree::readInOrder( size_t mark_range_begin = part_with_ranges.ranges.front().begin; ColumnsWithTypeAndName pk_columns; - size_t num_columns = virtual_row_conversion->getSampleBlock().columns(); + size_t num_columns = virtual_row_conversion->getRequiredColumnsWithTypes().size(); pk_columns.reserve(num_columns); for (size_t j = 0; j < num_columns; ++j) From d6b444dac9328ea0b64fda1005f78e2164fbab1b Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 26 Sep 2024 16:12:18 +0000 Subject: [PATCH 051/171] Skip virtual row chunk by skipping last row. --- .../Algorithms/MergingSortedAlgorithm.cpp | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 011f713744b..331b67066be 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -63,6 +63,7 @@ void MergingSortedAlgorithm::initialize(Inputs inputs) continue; setVirtualRow(input.chunk, header, apply_virtual_row_conversions); + input.skip_last_row = true; } removeConstAndSparse(inputs); @@ -149,8 +150,8 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeImpl(TSortingHeap & queue auto current = queue.current(); - if (isVirtualRow(current_inputs[current.impl->order].chunk)) - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Virtual row is not implemented for Non-batch mode."); + // if (isVirtualRow(current_inputs[current.impl->order].chunk)) + // throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Virtual row is not implemented for Non-batch mode."); if (current.impl->isLast() && current_inputs[current.impl->order].skip_last_row) { @@ -248,14 +249,14 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeBatchImpl(TSortingQueue & auto [current_ptr, initial_batch_size] = queue.current(); auto current = *current_ptr; - if (isVirtualRow(current_inputs[current.impl->order].chunk)) - { - /// If virtual row is detected, there should be only one row as a single chunk, - /// and always skip this chunk to pull the next one. - chassert(initial_batch_size == 1); - queue.removeTop(); - return Status(current.impl->order); - } + // if (isVirtualRow(current_inputs[current.impl->order].chunk)) + // { + // /// If virtual row is detected, there should be only one row as a single chunk, + // /// and always skip this chunk to pull the next one. + // chassert(initial_batch_size == 1); + // queue.removeTop(); + // return Status(current.impl->order); + // } bool batch_skip_last_row = false; if (current.impl->isLast(initial_batch_size) && current_inputs[current.impl->order].skip_last_row) From fb0b46adbf40dfbf7feaf36796c88d8e82da6633 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 27 Sep 2024 09:24:54 +0000 Subject: [PATCH 052/171] DIsable virtual row for FINAL. --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index b16a460ec68..37a159dd865 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1890,7 +1890,8 @@ bool ReadFromMergeTree::requestReadingInOrder(size_t prefix_size, int direction, /// Let prefer in-order optimization over vertical FINAL for now enable_vertical_final = false; - if (virtual_row_conversion_) + /// Disable virtual row for FINAL. + if (virtual_row_conversion_ && !isQueryWithFinal()) virtual_row_conversion = std::make_shared(std::move(*virtual_row_conversion_)); return true; From 63c89ded04c36c572e9280c356a4bf5570c65bf7 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 27 Sep 2024 10:56:28 +0000 Subject: [PATCH 053/171] Fixing other tests. --- .../Optimizations/optimizeReadInOrder.cpp | 2 +- .../01786_explain_merge_tree.reference | 14 ++++++++++++++ .../02149_read_in_order_fixed_prefix.reference | 16 ++++++---------- 3 files changed, 21 insertions(+), 11 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 5396cced6c1..cac4cf69054 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -515,7 +515,7 @@ SortingInputOrder buildInputOrderInfo( // 1st part (0, 100), (1, 2), (1, 3), (1, 4) // 2nd part (0, 100), (1, 2), (1, 3), (1, 4). - can_optimize_virtual_row = true; + can_optimize_virtual_row = false; } //std::cerr << "+++++++++ Found fixed key by match" << std::endl; diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.reference b/tests/queries/0_stateless/01786_explain_merge_tree.reference index 3a015d32539..f02dbcb59c9 100644 --- a/tests/queries/0_stateless/01786_explain_merge_tree.reference +++ b/tests/queries/0_stateless/01786_explain_merge_tree.reference @@ -86,11 +86,17 @@ ReadType: InOrder Parts: 1 Granules: 3 + Virtual row conversions + Actions: INPUT :: 0 -> x UInt32 : 0 + Positions: 0 ----------------- ReadFromMergeTree (default.test_index) ReadType: InReverseOrder Parts: 1 Granules: 3 + Virtual row conversions + Actions: INPUT :: 0 -> x UInt32 : 0 + Positions: 0 ReadFromMergeTree (default.idx) Indexes: PrimaryKey @@ -174,11 +180,19 @@ ReadType: InOrder Parts: 1 Granules: 3 + Virtual row conversions + Actions: INPUT : 0 -> x UInt32 : 0 + ALIAS x :: 0 -> __table1.x UInt32 : 1 + Positions: 1 ----------------- ReadFromMergeTree (default.test_index) ReadType: InReverseOrder Parts: 1 Granules: 3 + Virtual row conversions + Actions: INPUT : 0 -> x UInt32 : 0 + ALIAS x :: 0 -> __table1.x UInt32 : 1 + Positions: 1 ReadFromMergeTree (default.idx) Indexes: PrimaryKey diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference index 31462988c2d..cb96a7167da 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.reference @@ -14,10 +14,7 @@ ExpressionTransform (Expression) ExpressionTransform × 2 (ReadFromMergeTree) - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 2020-10-01 9 2020-10-01 9 2020-10-01 9 @@ -54,10 +51,7 @@ ExpressionTransform (Expression) ExpressionTransform × 2 (ReadFromMergeTree) - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 2020-10-11 0 2020-10-11 0 2020-10-11 0 @@ -178,7 +172,8 @@ ExpressionTransform (Expression) ExpressionTransform (ReadFromMergeTree) - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 2020-10-10 00:00:00 0.01 2020-10-10 00:00:00 0.01 2020-10-10 00:00:00 0.01 @@ -192,7 +187,8 @@ ExpressionTransform (Expression) ExpressionTransform (ReadFromMergeTree) - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + VirtualRowTransform + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 2020-10-10 00:00:00 0.01 2020-10-10 00:00:00 0.01 2020-10-10 00:00:00 0.01 From aaabaadf5650f37507ba07b7eca3327b4a41db95 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Sat, 28 Sep 2024 14:15:43 +0000 Subject: [PATCH 054/171] cleanup --- .../Merges/Algorithms/MergeTreeReadInfo.h | 10 +- .../Algorithms/MergingSortedAlgorithm.cpp | 17 --- .../QueryPlan/ReadFromMergeTree.cpp | 25 ---- .../02521_aggregation_by_partitions.reference | 112 +++++------------- 4 files changed, 33 insertions(+), 131 deletions(-) diff --git a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h index a4baaca215b..253d008c21d 100644 --- a/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h +++ b/src/Processors/Merges/Algorithms/MergeTreeReadInfo.h @@ -71,21 +71,13 @@ inline void setVirtualRow(Chunk & chunk, const Block & header, bool apply_virtua { if (!col.type->equals(*pk_col->type)) throw Exception(ErrorCodes::LOGICAL_ERROR, - "Virtual row has different tupe for {}. Expected {}, got {}", + "Virtual row has different type for {}. Expected {}, got {}", col.name, col.dumpStructure(), pk_col->dumpStructure()); ordered_columns.push_back(pk_col->column); } else ordered_columns.push_back(col.type->createColumnConstWithDefaultValue(1)); - - // ColumnPtr current_column = type_and_name.type->createColumn(); - - // size_t pos = type_and_name.name.find_last_of('.'); - // String column_name = (pos == String::npos) ? type_and_name.name : type_and_name.name.substr(pos + 1); - - // const ColumnWithTypeAndName * column = pk_block.findByName(column_name, true); - // ordered_columns.push_back(column ? column->column : current_column->cloneResized(1)); } chunk.setColumns(ordered_columns, 1); diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 331b67066be..f2ebf9053ea 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -8,11 +8,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int NOT_IMPLEMENTED; -} - MergingSortedAlgorithm::MergingSortedAlgorithm( Block header_, size_t num_inputs, @@ -150,9 +145,6 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeImpl(TSortingHeap & queue auto current = queue.current(); - // if (isVirtualRow(current_inputs[current.impl->order].chunk)) - // throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Virtual row is not implemented for Non-batch mode."); - if (current.impl->isLast() && current_inputs[current.impl->order].skip_last_row) { /// Get the next block from the corresponding source, if there is one. @@ -249,15 +241,6 @@ IMergingAlgorithm::Status MergingSortedAlgorithm::mergeBatchImpl(TSortingQueue & auto [current_ptr, initial_batch_size] = queue.current(); auto current = *current_ptr; - // if (isVirtualRow(current_inputs[current.impl->order].chunk)) - // { - // /// If virtual row is detected, there should be only one row as a single chunk, - // /// and always skip this chunk to pull the next one. - // chassert(initial_batch_size == 1); - // queue.removeTop(); - // return Status(current.impl->order); - // } - bool batch_skip_last_row = false; if (current.impl->isLast(initial_batch_size) && current_inputs[current.impl->order].skip_last_row) { diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index e90d6165aa3..6622662d0a0 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1133,33 +1133,8 @@ Pipe ReadFromMergeTree::spreadMarkRangesAmongStreamsWithOrder( splitted_parts_and_ranges.emplace_back(std::move(new_parts)); } - // bool primary_key_type_supports_virtual_row = true; - // const auto & actions = storage_snapshot->metadata->getPrimaryKey().expression->getActions(); - // for (const auto & action : actions) - // { - // if (action.node->type != ActionsDAG::ActionType::INPUT) - // { - // primary_key_type_supports_virtual_row = false; - // break; - // } - // } - - // /// If possible in the optimization stage, check whether there are more than one branch. - // if (virtual_row_status == VirtualRowStatus::Possible) - // virtual_row_status = splitted_parts_and_ranges.size() > 1 - // || (splitted_parts_and_ranges.size() == 1 && splitted_parts_and_ranges[0].size() > 1) - // ? VirtualRowStatus::Yes : VirtualRowStatus::NoConsiderInLogicalPlan; - for (auto && item : splitted_parts_and_ranges) - { - // bool enable_current_virtual_row = false; - // if (virtual_row_status == VirtualRowStatus::Yes) - // enable_current_virtual_row = true; - // else if (virtual_row_status == VirtualRowStatus::NoConsiderInLogicalPlan) - // enable_current_virtual_row = (need_preliminary_merge || output_each_partition_through_separate_port) && item.size() > 1; - pipes.emplace_back(readInOrder(std::move(item), column_names, pool_settings, read_type, input_order_info->limit)); - } } Block pipe_header; diff --git a/tests/queries/0_stateless/02521_aggregation_by_partitions.reference b/tests/queries/0_stateless/02521_aggregation_by_partitions.reference index addc36421c3..87b2d5c3430 100644 --- a/tests/queries/0_stateless/02521_aggregation_by_partitions.reference +++ b/tests/queries/0_stateless/02521_aggregation_by_partitions.reference @@ -160,100 +160,52 @@ ExpressionTransform × 16 (ReadFromMergeTree) MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 MergingSortedTransform 2 → 1 ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - MergingSortedTransform 2 → 1 - ExpressionTransform × 2 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 - VirtualRowTransform - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 + MergingSortedTransform 2 → 1 + ExpressionTransform × 2 + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) × 2 0 → 1 1000000 Skip merging: 1 Skip merging: 1 From 93620886f689d3b6ed59a9a36539c2902158e6b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Sun, 6 Oct 2024 22:16:06 +0300 Subject: [PATCH 055/171] Revert part actual name to pass the check --- src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index 3f5c70adb64..4a994bc38e2 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -844,8 +844,9 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: } } - /// Save the current temporary path in case we need to revert the change to retry (ZK connection loss) + /// Save the current temporary path and name in case we need to revert the change to retry (ZK connection loss) or in case part is deduplicated. const String temporary_part_relative_path = part->getDataPartStorage().getPartDirectory(); + const String initial_part_name = part->name; /// Obtain incremental block number and lock it. The lock holds our intention to add the block to the filesystem. /// We remove the lock just after renaming the part. In case of exception, block number will be marked as abandoned. @@ -1024,6 +1025,7 @@ std::pair, bool> ReplicatedMergeTreeSinkImpl:: transaction.rollbackPartsToTemporaryState(); part->is_temp = true; + part->setName(initial_part_name); part->renameTo(temporary_part_relative_path, false); if constexpr (async_insert) From 91931b5b3cccabdc94231a8a07ad4d2e8de8d8b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9A=D0=B8=D1=80=D0=B8=D0=BB=D0=BB=20=D0=93=D0=B0=D1=80?= =?UTF-8?q?=D0=B1=D0=B0=D1=80?= Date: Sun, 6 Oct 2024 22:56:48 +0300 Subject: [PATCH 056/171] Fix style --- tests/integration/test_deduplicated_attached_part_rename/test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/test_deduplicated_attached_part_rename/test.py b/tests/integration/test_deduplicated_attached_part_rename/test.py index 7afd85c62dc..02fa2c9d4a3 100644 --- a/tests/integration/test_deduplicated_attached_part_rename/test.py +++ b/tests/integration/test_deduplicated_attached_part_rename/test.py @@ -1,4 +1,5 @@ import pytest + from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) From 04f68594dcf3dccc5eaecd542e00073af39777d9 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Tue, 29 Oct 2024 21:36:43 +0800 Subject: [PATCH 057/171] Print method in clickhouse-compressor --stat. --- programs/compressor/Compressor.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp index 819f16cfd64..fc07a0adc66 100644 --- a/programs/compressor/Compressor.cpp +++ b/programs/compressor/Compressor.cpp @@ -33,12 +33,12 @@ namespace DB namespace { -/// Outputs sizes of uncompressed and compressed blocks for compressed file. +/// Outputs method, sizes of uncompressed and compressed blocks for compressed file. void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out) { while (!in.eof()) { - in.ignore(16); /// checksum + in.ignore(16); /// checksum char header[COMPRESSED_BLOCK_HEADER_SIZE]; in.readStrict(header, COMPRESSED_BLOCK_HEADER_SIZE); @@ -50,6 +50,13 @@ void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out) UInt32 size_decompressed = unalignedLoad(&header[5]); + auto method_byte = static_cast(header[0]); + auto method = magic_enum::enum_cast(method_byte); + if (method) + DB::writeText(magic_enum::enum_name(*method), out); + else + DB::writeText(fmt::format("UNKNOWN({})", method_byte), out); + DB::writeChar('\t', out); DB::writeText(size_decompressed, out); DB::writeChar('\t', out); DB::writeText(size_compressed, out); From c5d6acf5e3ff24122518abb992e78c73954f8703 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Wed, 30 Oct 2024 09:00:18 +0800 Subject: [PATCH 058/171] Fix --- programs/compressor/Compressor.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp index fc07a0adc66..69936912d49 100644 --- a/programs/compressor/Compressor.cpp +++ b/programs/compressor/Compressor.cpp @@ -1,3 +1,6 @@ +/// For magic_enum to properly get enum name of DB::CompressionMethodByte +#define MAGIC_ENUM_RANGE_MAX 256 + #include #include #include From 10ee24d9a0c749624b86840f78b2bd3bbdb221d9 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Wed, 30 Oct 2024 09:41:18 +0800 Subject: [PATCH 059/171] Fix multiple codecs and add test --- programs/compressor/Compressor.cpp | 26 +++++----------- .../getCompressionCodecForFile.cpp | 31 ++++++++++++++----- src/Compression/getCompressionCodecForFile.h | 4 +++ .../03260_compressor_stat.reference | 1 + .../0_stateless/03260_compressor_stat.sh | 13 ++++++++ 5 files changed, 49 insertions(+), 26 deletions(-) create mode 100644 tests/queries/0_stateless/03260_compressor_stat.reference create mode 100755 tests/queries/0_stateless/03260_compressor_stat.sh diff --git a/programs/compressor/Compressor.cpp b/programs/compressor/Compressor.cpp index 69936912d49..7bb434d40a8 100644 --- a/programs/compressor/Compressor.cpp +++ b/programs/compressor/Compressor.cpp @@ -1,6 +1,3 @@ -/// For magic_enum to properly get enum name of DB::CompressionMethodByte -#define MAGIC_ENUM_RANGE_MAX 256 - #include #include #include @@ -14,9 +11,12 @@ #include #include #include +#include +#include #include #include #include +#include #include #include #include @@ -41,31 +41,19 @@ void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out) { while (!in.eof()) { - in.ignore(16); /// checksum - - char header[COMPRESSED_BLOCK_HEADER_SIZE]; - in.readStrict(header, COMPRESSED_BLOCK_HEADER_SIZE); - - UInt32 size_compressed = unalignedLoad(&header[1]); + UInt32 size_compressed; + UInt32 size_decompressed; + auto codec = DB::getCompressionCodecForFile(in, size_compressed, size_decompressed, true /* skip_to_next_block */); if (size_compressed > DBMS_MAX_COMPRESSED_SIZE) throw DB::Exception(DB::ErrorCodes::TOO_LARGE_SIZE_COMPRESSED, "Too large size_compressed. Most likely corrupted data."); - UInt32 size_decompressed = unalignedLoad(&header[5]); - - auto method_byte = static_cast(header[0]); - auto method = magic_enum::enum_cast(method_byte); - if (method) - DB::writeText(magic_enum::enum_name(*method), out); - else - DB::writeText(fmt::format("UNKNOWN({})", method_byte), out); + DB::writeText(queryToString(codec->getFullCodecDesc()), out); DB::writeChar('\t', out); DB::writeText(size_decompressed, out); DB::writeChar('\t', out); DB::writeText(size_compressed, out); DB::writeChar('\n', out); - - in.ignore(size_compressed - COMPRESSED_BLOCK_HEADER_SIZE); } } diff --git a/src/Compression/getCompressionCodecForFile.cpp b/src/Compression/getCompressionCodecForFile.cpp index 027ee0ac57a..b04e4b6371a 100644 --- a/src/Compression/getCompressionCodecForFile.cpp +++ b/src/Compression/getCompressionCodecForFile.cpp @@ -10,33 +10,50 @@ namespace DB { - using Checksum = CityHash_v1_0_2::uint128; -CompressionCodecPtr getCompressionCodecForFile(const IDataPartStorage & data_part_storage, const String & relative_path) +CompressionCodecPtr +getCompressionCodecForFile(ReadBuffer & read_buffer, UInt32 & size_compressed, UInt32 & size_decompressed, bool skip_to_next_block) { - auto read_buffer = data_part_storage.readFile(relative_path, {}, std::nullopt, std::nullopt); - read_buffer->ignore(sizeof(Checksum)); + read_buffer.ignore(sizeof(Checksum)); UInt8 header_size = ICompressionCodec::getHeaderSize(); + size_t starting_bytes = read_buffer.count(); PODArray compressed_buffer; compressed_buffer.resize(header_size); - read_buffer->readStrict(compressed_buffer.data(), header_size); + read_buffer.readStrict(compressed_buffer.data(), header_size); uint8_t method = ICompressionCodec::readMethod(compressed_buffer.data()); + size_compressed = unalignedLoad(&compressed_buffer[1]); + size_decompressed = unalignedLoad(&compressed_buffer[5]); if (method == static_cast(CompressionMethodByte::Multiple)) { compressed_buffer.resize(1); - read_buffer->readStrict(compressed_buffer.data(), 1); + read_buffer.readStrict(compressed_buffer.data(), 1); compressed_buffer.resize(1 + compressed_buffer[0]); - read_buffer->readStrict(compressed_buffer.data() + 1, compressed_buffer[0]); + read_buffer.readStrict(compressed_buffer.data() + 1, compressed_buffer[0]); auto codecs_bytes = CompressionCodecMultiple::getCodecsBytesFromData(compressed_buffer.data()); Codecs codecs; for (auto byte : codecs_bytes) codecs.push_back(CompressionCodecFactory::instance().get(byte)); + if (skip_to_next_block) + read_buffer.ignore(size_compressed - (read_buffer.count() - starting_bytes)); + return std::make_shared(codecs); } + + if (skip_to_next_block) + read_buffer.ignore(size_compressed - (read_buffer.count() - starting_bytes)); + return CompressionCodecFactory::instance().get(method); } +CompressionCodecPtr getCompressionCodecForFile(const IDataPartStorage & data_part_storage, const String & relative_path) +{ + auto read_buffer = data_part_storage.readFile(relative_path, {}, std::nullopt, std::nullopt); + UInt32 size_compressed; + UInt32 size_decompressed; + return getCompressionCodecForFile(*read_buffer, size_compressed, size_decompressed, false); +} + } diff --git a/src/Compression/getCompressionCodecForFile.h b/src/Compression/getCompressionCodecForFile.h index b6f22750e4d..535befa37e1 100644 --- a/src/Compression/getCompressionCodecForFile.h +++ b/src/Compression/getCompressionCodecForFile.h @@ -13,4 +13,8 @@ namespace DB /// from metadata. CompressionCodecPtr getCompressionCodecForFile(const IDataPartStorage & data_part_storage, const String & relative_path); +/// Same as above which is used by clickhouse-compressor to print compression statistics of each data block. +CompressionCodecPtr +getCompressionCodecForFile(ReadBuffer & read_buffer, UInt32 & size_compressed, UInt32 & size_decompressed, bool skip_to_next_block); + } diff --git a/tests/queries/0_stateless/03260_compressor_stat.reference b/tests/queries/0_stateless/03260_compressor_stat.reference new file mode 100644 index 00000000000..ba84b26cc48 --- /dev/null +++ b/tests/queries/0_stateless/03260_compressor_stat.reference @@ -0,0 +1 @@ +CODEC(Delta(1), LZ4) 14 48 diff --git a/tests/queries/0_stateless/03260_compressor_stat.sh b/tests/queries/0_stateless/03260_compressor_stat.sh new file mode 100755 index 00000000000..6efa7b6ee0a --- /dev/null +++ b/tests/queries/0_stateless/03260_compressor_stat.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +echo "Hello, World!" > 03260_test_data + +$CLICKHOUSE_COMPRESSOR --codec 'Delta' --codec 'LZ4' --input '03260_test_data' --output '03260_test_out' + +$CLICKHOUSE_COMPRESSOR --stat '03260_test_out' + +rm -f 03260_test_data 03260_test_out From bd9cfaecea93dc3b6d469f3898fcde9506ae5f9b Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Wed, 30 Oct 2024 14:35:06 +0800 Subject: [PATCH 060/171] No need to create tmp files --- tests/queries/0_stateless/03260_compressor_stat.sh | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/tests/queries/0_stateless/03260_compressor_stat.sh b/tests/queries/0_stateless/03260_compressor_stat.sh index 6efa7b6ee0a..8a03541763c 100755 --- a/tests/queries/0_stateless/03260_compressor_stat.sh +++ b/tests/queries/0_stateless/03260_compressor_stat.sh @@ -4,10 +4,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -echo "Hello, World!" > 03260_test_data - -$CLICKHOUSE_COMPRESSOR --codec 'Delta' --codec 'LZ4' --input '03260_test_data' --output '03260_test_out' - -$CLICKHOUSE_COMPRESSOR --stat '03260_test_out' - -rm -f 03260_test_data 03260_test_out +echo "Hello, World!" | $CLICKHOUSE_COMPRESSOR --codec 'Delta' --codec 'LZ4' | $CLICKHOUSE_COMPRESSOR --stat From e3890a9de103a560a9804dc4b1fb63c0eb68a569 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 30 Oct 2024 11:12:21 +0000 Subject: [PATCH 061/171] Disable virtual row better. --- .../QueryPlan/Optimizations/applyOrder.cpp | 2 +- .../Optimizations/optimizeReadInOrder.cpp | 16 ++++++++++------ src/Processors/QueryPlan/SortingStep.cpp | 8 ++++++-- src/Processors/QueryPlan/SortingStep.h | 3 ++- 4 files changed, 19 insertions(+), 10 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/applyOrder.cpp b/src/Processors/QueryPlan/Optimizations/applyOrder.cpp index 8695f29c26b..51a5aa099ac 100644 --- a/src/Processors/QueryPlan/Optimizations/applyOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/applyOrder.cpp @@ -124,7 +124,7 @@ SortingProperty applyOrder(QueryPlan::Node * parent, SortingProperty * propertie auto common_prefix = commonPrefix(properties->sort_description, sorting_step->getSortDescription()); if (!common_prefix.empty()) /// Buffering is useful for reading from MergeTree, and it is applied in optimizeReadInOrder only. - sorting_step->convertToFinishSorting(common_prefix, /*use_buffering*/ false); + sorting_step->convertToFinishSorting(common_prefix, /*use_buffering*/ false, false); } auto scope = sorting_step->hasPartitions() ? SortingProperty::SortScope::Stream : SortingProperty::SortScope::Global; diff --git a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp index 7d9e1a7c5f7..9cb9db8eebe 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeReadInOrder.cpp @@ -899,7 +899,7 @@ InputOrder buildInputOrderFromUnorderedKeys( return order_info; } -InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & node) +InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, bool & apply_virtual_row, QueryPlan::Node & node) { QueryPlan::Node * reading_node = findReadingStep(node, /*allow_existing_order=*/ false); if (!reading_node) @@ -925,6 +925,8 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n if (order_info.input_order) { + apply_virtual_row = order_info.virtual_row_conversion != std::nullopt; + bool can_read = reading->requestReadingInOrder( order_info.input_order->used_prefix_of_sorting_key_size, order_info.input_order->direction, @@ -1128,6 +1130,8 @@ void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes) if (sorting->getType() != SortingStep::Type::Full) return; + bool apply_virtual_row = false; + if (typeid_cast(node.children.front()->step.get())) { auto & union_node = node.children.front(); @@ -1150,7 +1154,7 @@ void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes) for (auto * child : union_node->children) { - infos.push_back(buildInputOrderInfo(*sorting, *child)); + infos.push_back(buildInputOrderInfo(*sorting, apply_virtual_row, *child)); if (infos.back()) { @@ -1202,13 +1206,13 @@ void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes) } } - sorting->convertToFinishSorting(*max_sort_descr, use_buffering); + sorting->convertToFinishSorting(*max_sort_descr, use_buffering, false); } - else if (auto order_info = buildInputOrderInfo(*sorting, *node.children.front())) + else if (auto order_info = buildInputOrderInfo(*sorting, apply_virtual_row, *node.children.front())) { /// Use buffering only if have filter or don't have limit. bool use_buffering = order_info->limit == 0; - sorting->convertToFinishSorting(order_info->sort_description_for_merging, use_buffering); + sorting->convertToFinishSorting(order_info->sort_description_for_merging, use_buffering, apply_virtual_row); } } @@ -1350,7 +1354,7 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, bool can_read = read_from_merge_tree->requestReadingInOrder(order_info->used_prefix_of_sorting_key_size, order_info->direction, order_info->limit, {}); if (!can_read) return 0; - sorting->convertToFinishSorting(order_info->sort_description_for_merging, false); + sorting->convertToFinishSorting(order_info->sort_description_for_merging, false, false); } return 0; diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index 5ad2f1f62d5..5f0e54faf18 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -147,11 +147,12 @@ void SortingStep::updateLimit(size_t limit_) } } -void SortingStep::convertToFinishSorting(SortDescription prefix_description_, bool use_buffering_) +void SortingStep::convertToFinishSorting(SortDescription prefix_description_, bool use_buffering_, bool apply_virtual_row_conversions_) { type = Type::FinishSorting; prefix_description = std::move(prefix_description_); use_buffering = use_buffering_; + apply_virtual_row_conversions = apply_virtual_row_conversions_; } void SortingStep::scatterByPartitionIfNeeded(QueryPipelineBuilder& pipeline) @@ -255,7 +256,10 @@ void SortingStep::mergingSorted(QueryPipelineBuilder & pipeline, const SortDescr /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, limit_, - always_read_till_end); + always_read_till_end, + nullptr, + false, + apply_virtual_row_conversions); pipeline.addTransform(std::move(transform)); } diff --git a/src/Processors/QueryPlan/SortingStep.h b/src/Processors/QueryPlan/SortingStep.h index 6cdf626d4c8..9366630f0fb 100644 --- a/src/Processors/QueryPlan/SortingStep.h +++ b/src/Processors/QueryPlan/SortingStep.h @@ -81,7 +81,7 @@ public: bool hasPartitions() const { return !partition_by_description.empty(); } - void convertToFinishSorting(SortDescription prefix_description, bool use_buffering_); + void convertToFinishSorting(SortDescription prefix_description, bool use_buffering_, bool apply_virtual_row_conversions_); Type getType() const { return type; } const Settings & getSettings() const { return sort_settings; } @@ -128,6 +128,7 @@ private: UInt64 limit; bool always_read_till_end = false; bool use_buffering = false; + bool apply_virtual_row_conversions = false; Settings sort_settings; }; From 4e2693bb466a07ab06d4155a091e6782a495ed45 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 31 Oct 2024 02:01:23 +0000 Subject: [PATCH 062/171] add test --- ...in_order_optimization_with_virtual_row.sql | 5 ++--- ...ization_with_virtual_row_special.reference | 2 ++ ..._optimization_with_virtual_row_special.sql | 21 +++++++++++++++++++ 3 files changed, 25 insertions(+), 3 deletions(-) create mode 100644 tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.reference create mode 100644 tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index f66b4be2c69..8826f2c27cf 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -156,7 +156,6 @@ read_in_order_two_level_merge_threshold = 5; --avoid preliminary merge DROP TABLE fixed_prefix; SELECT '========'; --- currently don't support virtual row in this case DROP TABLE IF EXISTS function_pk; CREATE TABLE function_pk @@ -179,7 +178,7 @@ ORDER BY (A,-B) ASC limit 3 SETTINGS max_threads = 1, optimize_read_in_order = 1, -read_in_order_two_level_merge_threshold = 0; --force preliminary merge +read_in_order_two_level_merge_threshold = 5; --avoid preliminary merge DROP TABLE function_pk; @@ -214,4 +213,4 @@ SETTINGS read_in_order_two_level_merge_threshold = 0, optimize_read_in_order = 1, max_threads = 2; -DROP TABLE distinct_in_order; +DROP TABLE distinct_in_order; \ No newline at end of file diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.reference b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.reference new file mode 100644 index 00000000000..b03759364cf --- /dev/null +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.reference @@ -0,0 +1,2 @@ +dist +src diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql new file mode 100644 index 00000000000..ee7336bdf02 --- /dev/null +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql @@ -0,0 +1,21 @@ +-- Tags: no-parallel + +-- modified from test_01155_ordinary +DROP DATABASE IF EXISTS test_01155_ordinary; + +SET allow_deprecated_database_ordinary = 1; + +CREATE DATABASE test_01155_ordinary ENGINE = Ordinary; + +USE test_01155_ordinary; + +CREATE TABLE src (s String) ENGINE = MergeTree() ORDER BY s; +INSERT INTO src(s) VALUES ('before moving tables'); +CREATE TABLE dist (s String) ENGINE = Distributed(test_shard_localhost, test_01155_ordinary, src); + +SET enable_analyzer=0; +SELECT _table FROM merge('test_01155_ordinary', '') ORDER BY _table, s; + +DROP TABLE src; +DROP TABLE dist; +DROP DATABASE test_01155_ordinary; \ No newline at end of file From fa5010ba181f7251ebcf9ce09ade01c48fdcdebc Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 31 Oct 2024 14:20:47 +0000 Subject: [PATCH 063/171] fix test --- ...der_optimization_with_virtual_row_special.sql | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql index ee7336bdf02..3d6f9ad391b 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql @@ -1,21 +1,19 @@ -- Tags: no-parallel --- modified from test_01155_ordinary -DROP DATABASE IF EXISTS test_01155_ordinary; +-- modified from test_01155_ordinary, to test special optimization path for virtual row +DROP DATABASE IF EXISTS test_03031; -SET allow_deprecated_database_ordinary = 1; +CREATE DATABASE test_03031; -CREATE DATABASE test_01155_ordinary ENGINE = Ordinary; - -USE test_01155_ordinary; +USE test_03031; CREATE TABLE src (s String) ENGINE = MergeTree() ORDER BY s; INSERT INTO src(s) VALUES ('before moving tables'); -CREATE TABLE dist (s String) ENGINE = Distributed(test_shard_localhost, test_01155_ordinary, src); +CREATE TABLE dist (s String) ENGINE = Distributed(test_shard_localhost, test_03031, src); SET enable_analyzer=0; -SELECT _table FROM merge('test_01155_ordinary', '') ORDER BY _table, s; +SELECT _table FROM merge('test_03031', '') ORDER BY _table, s; DROP TABLE src; DROP TABLE dist; -DROP DATABASE test_01155_ordinary; \ No newline at end of file +DROP DATABASE test_03031; \ No newline at end of file From 77298ef479befda70073216255658f656bf5fba5 Mon Sep 17 00:00:00 2001 From: jsc0218 Date: Thu, 31 Oct 2024 18:23:06 +0000 Subject: [PATCH 064/171] add setting --- src/Core/Settings.cpp | 3 +++ src/Core/SettingsChangesHistory.cpp | 1 + src/Processors/QueryPlan/ReadFromMergeTree.cpp | 3 ++- tests/queries/0_stateless/01786_explain_merge_tree.sh | 2 +- tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql | 1 + .../03031_read_in_order_optimization_with_virtual_row.sql | 2 ++ ...031_read_in_order_optimization_with_virtual_row_explain.sql | 2 +- ...031_read_in_order_optimization_with_virtual_row_special.sql | 2 ++ 8 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 0aecb7cf941..37646dc86cb 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -2863,6 +2863,9 @@ Possible values: **See Also** - [ORDER BY Clause](../../sql-reference/statements/select/order-by.md/#optimize_read_in_order) +)", 0) \ + DECLARE(Bool, read_in_order_use_virtual_row, false, R"( +Use virtual row while reading in order of primary key or its monotonic function fashion. It is useful when searching over multiple parts as only relevant ones are touched. )", 0) \ DECLARE(Bool, optimize_read_in_window_order, true, R"( Enable ORDER BY optimization in window clause for reading data in corresponding order in MergeTree tables. diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 88d39d6d393..4b014e141ac 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -64,6 +64,7 @@ static std::initializer_listgetSettingsRef()[Setting::read_in_order_use_virtual_row]) virtual_row_conversion = std::make_shared(std::move(*virtual_row_conversion_)); updateSortDescription(); diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.sh b/tests/queries/0_stateless/01786_explain_merge_tree.sh index 828012f56bc..9fb764dcd38 100755 --- a/tests/queries/0_stateless/01786_explain_merge_tree.sh +++ b/tests/queries/0_stateless/01786_explain_merge_tree.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) for i in $(seq 0 1) do - CH_CLIENT="$CLICKHOUSE_CLIENT --optimize_move_to_prewhere=1 --convert_query_to_cnf=0 --optimize_read_in_order=1 --enable_analyzer=$i" + CH_CLIENT="$CLICKHOUSE_CLIENT --optimize_move_to_prewhere=1 --convert_query_to_cnf=0 --optimize_read_in_order=1 --read_in_order_use_virtual_row=1 --enable_analyzer=$i" $CH_CLIENT -q "drop table if exists test_index" $CH_CLIENT -q "drop table if exists idx" diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql index 7bbdecf5501..4cc05203b6a 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql @@ -2,6 +2,7 @@ SET max_threads=0; SET optimize_read_in_order=1; SET optimize_trivial_insert_select = 1; SET read_in_order_two_level_merge_threshold=100; +SET read_in_order_use_virtual_row = 1; DROP TABLE IF EXISTS t_read_in_order; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql index 8826f2c27cf..0f100287815 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row.sql @@ -1,4 +1,6 @@ +SET read_in_order_use_virtual_row = 1; + DROP TABLE IF EXISTS t; CREATE TABLE t diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql index 8cdcb4628ec..8e3f37b37b8 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_explain.sql @@ -1,6 +1,6 @@ -- Tags: no-random-merge-tree-settings, no-object-storage -SET optimize_read_in_order = 1, merge_tree_min_rows_for_concurrent_read = 1000; +SET optimize_read_in_order = 1, merge_tree_min_rows_for_concurrent_read = 1000, read_in_order_use_virtual_row = 1; DROP TABLE IF EXISTS tab; diff --git a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql index 3d6f9ad391b..52aa71437db 100644 --- a/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql +++ b/tests/queries/0_stateless/03031_read_in_order_optimization_with_virtual_row_special.sql @@ -7,6 +7,8 @@ CREATE DATABASE test_03031; USE test_03031; +SET read_in_order_use_virtual_row = 1; + CREATE TABLE src (s String) ENGINE = MergeTree() ORDER BY s; INSERT INTO src(s) VALUES ('before moving tables'); CREATE TABLE dist (s String) ENGINE = Distributed(test_shard_localhost, test_03031, src); From 52fe2f18b08fdaff1d93abf2730096676eb55228 Mon Sep 17 00:00:00 2001 From: Michael Stetsyuk Date: Fri, 1 Nov 2024 16:42:01 +0000 Subject: [PATCH 065/171] rm metadata_version znode creation from restarting thread --- .../ReplicatedMergeTreeRestartingThread.cpp | 25 ++++--------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index 93124e634bd..c73c9f6d048 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -31,6 +31,7 @@ namespace ErrorCodes extern const int REPLICA_IS_ALREADY_ACTIVE; extern const int REPLICA_STATUS_CHANGED; extern const int LOGICAL_ERROR; + extern const int SUPPORT_IS_DISABLED; } namespace FailPoints @@ -217,26 +218,10 @@ bool ReplicatedMergeTreeRestartingThread::tryStartup() } else { - /// Table was created before 20.4 and was never altered, - /// let's initialize replica metadata version from global metadata version. - - const String & zookeeper_path = storage.zookeeper_path, & replica_path = storage.replica_path; - - Coordination::Stat table_metadata_version_stat; - zookeeper->get(zookeeper_path + "/metadata", &table_metadata_version_stat); - - Coordination::Requests ops; - ops.emplace_back(zkutil::makeCheckRequest(zookeeper_path + "/metadata", table_metadata_version_stat.version)); - ops.emplace_back(zkutil::makeCreateRequest(replica_path + "/metadata_version", toString(table_metadata_version_stat.version), zkutil::CreateMode::Persistent)); - - Coordination::Responses res; - auto code = zookeeper->tryMulti(ops, res); - - if (code == Coordination::Error::ZBADVERSION) - throw Exception(ErrorCodes::REPLICA_STATUS_CHANGED, "Failed to initialize metadata_version " - "because table was concurrently altered, will retry"); - - zkutil::KeeperMultiException::check(code, ops, res); + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, + "It seems you have upgraded from a version earlier than 20.4 straight to one later than 24.10. " + "ClickHouse does not support upgrades that span more than a year. " + "Please update gradually (through intermediate versions)."); } storage.queue.removeCurrentPartsFromMutations(); From 157e1695d5f8d8dd0962f89a782317a5249ad8eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 4 Nov 2024 20:02:57 +0100 Subject: [PATCH 066/171] Fix ExecuteScalarSubqueriesMatcher visiting join elements --- src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index d4da038c089..c80852e9ae7 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -63,7 +63,7 @@ bool ExecuteScalarSubqueriesMatcher::needChildVisit(ASTPtr & node, const ASTPtr if (node->as()) { /// Do not go to FROM, JOIN, UNION. - if (child->as() || child->as()) + if (child->as() || child->as() || child->as()) return false; } From b4a3f6d3709b87f5b1a30316b60f042fc1c0f2ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 4 Nov 2024 20:11:33 +0100 Subject: [PATCH 067/171] Make sure to update table_join children properly --- src/Analyzer/JoinNode.cpp | 10 ++++++++-- src/Interpreters/QueryNormalizer.cpp | 6 ++++++ .../TimeSeries/PrometheusRemoteReadProtocol.cpp | 1 + 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/src/Analyzer/JoinNode.cpp b/src/Analyzer/JoinNode.cpp index bf99c014826..722c1e19b7e 100644 --- a/src/Analyzer/JoinNode.cpp +++ b/src/Analyzer/JoinNode.cpp @@ -48,9 +48,15 @@ ASTPtr JoinNode::toASTTableJoin() const auto join_expression_ast = children[join_expression_child_index]->toAST(); if (is_using_join_expression) - join_ast->using_expression_list = std::move(join_expression_ast); + { + join_ast->using_expression_list = join_expression_ast; + join_ast->children.push_back(join_ast->using_expression_list); + } else - join_ast->on_expression = std::move(join_expression_ast); + { + join_ast->on_expression = join_expression_ast; + join_ast->children.push_back(join_ast->on_expression); + } } return join_ast; diff --git a/src/Interpreters/QueryNormalizer.cpp b/src/Interpreters/QueryNormalizer.cpp index a8639906aad..bba30fb5194 100644 --- a/src/Interpreters/QueryNormalizer.cpp +++ b/src/Interpreters/QueryNormalizer.cpp @@ -161,7 +161,13 @@ void QueryNormalizer::visit(ASTTablesInSelectQueryElement & node, const ASTPtr & { auto & join = node.table_join->as(); if (join.on_expression) + { + ASTPtr original_on_expression = join.on_expression; visit(join.on_expression, data); + if (join.on_expression != original_on_expression) + join.children = { join.on_expression }; + } + } } diff --git a/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp b/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp index df0f6b8bc5c..b8a3b2911b9 100644 --- a/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp +++ b/src/Storages/TimeSeries/PrometheusRemoteReadProtocol.cpp @@ -245,6 +245,7 @@ namespace table_join->strictness = JoinStrictness::Semi; table_join->on_expression = makeASTFunction("equals", makeASTColumn(data_table_id, TimeSeriesColumnNames::ID), makeASTColumn(tags_table_id, TimeSeriesColumnNames::ID)); + table_join->children.push_back(table_join->on_expression); table->table_join = table_join; auto table_exp = std::make_shared(); From 35a0d08a32302247b3689e887e9a3b72bb9152e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 4 Nov 2024 20:12:34 +0100 Subject: [PATCH 068/171] RewriteArrayExistsFunctionVisitor: Assert proper child on join expression --- .../RewriteArrayExistsFunctionVisitor.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp b/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp index 22ce91d8c67..60bac2fb7a3 100644 --- a/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp +++ b/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp @@ -20,21 +20,21 @@ void RewriteArrayExistsFunctionMatcher::visit(ASTPtr & ast, Data & data) if (join->using_expression_list) { auto * it = std::find(join->children.begin(), join->children.end(), join->using_expression_list); + if (it == join->children.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Could not find join->using_expression_list in '{}'", join->formatForLogging()); visit(join->using_expression_list, data); - - if (it && *it != join->using_expression_list) - *it = join->using_expression_list; + *it = join->using_expression_list; } if (join->on_expression) { auto * it = std::find(join->children.begin(), join->children.end(), join->on_expression); + if (it == join->children.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Could not find join->on_expression in '{}'", join->formatForLogging()); visit(join->on_expression, data); - - if (it && *it != join->on_expression) - *it = join->on_expression; + *it = join->on_expression; } } } From 389fdd80d36b5073698b87b7a7d24dcc4c6560bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 4 Nov 2024 20:15:29 +0100 Subject: [PATCH 069/171] Add test for crasher --- ...ptimize_rewrite_array_exists_to_has_crash.reference | 0 ...3261_optimize_rewrite_array_exists_to_has_crash.sql | 10 ++++++++++ 2 files changed, 10 insertions(+) create mode 100644 tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.reference create mode 100644 tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql diff --git a/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.reference b/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql b/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql new file mode 100644 index 00000000000..5a54d86f339 --- /dev/null +++ b/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql @@ -0,0 +1,10 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/71382 +DROP TABLE IF EXISTS rewrite; +CREATE TABLE rewrite (c0 Int) ENGINE = Memory(); +SELECT 1 +FROM rewrite +INNER JOIN rewrite AS y ON ( + SELECT 1 +) +INNER JOIN rewrite AS z ON 1 +SETTINGS allow_experimental_analyzer=0, optimize_rewrite_array_exists_to_has=1; From 19422e75b0fbe7fbbe68bef98f10f22ee046db4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 4 Nov 2024 20:24:06 +0100 Subject: [PATCH 070/171] Style --- src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp b/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp index 60bac2fb7a3..2890357494d 100644 --- a/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp +++ b/src/Interpreters/RewriteArrayExistsFunctionVisitor.cpp @@ -6,6 +6,12 @@ namespace DB { + +namespace ErrorCode +{ +extern const int LOGICAL_ERROR; +} + void RewriteArrayExistsFunctionMatcher::visit(ASTPtr & ast, Data & data) { if (auto * func = ast->as()) From 8c2d1ec7f8ef625c7bfb914a551af183520a3119 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 5 Nov 2024 12:35:23 +0100 Subject: [PATCH 071/171] Allow ExecuteScalarSubqueriesVisitor on ARRAY JOIN --- .../ExecuteScalarSubqueriesVisitor.cpp | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index c80852e9ae7..943febf4b0e 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -63,10 +63,22 @@ bool ExecuteScalarSubqueriesMatcher::needChildVisit(ASTPtr & node, const ASTPtr if (node->as()) { /// Do not go to FROM, JOIN, UNION. - if (child->as() || child->as() || child->as()) + if (child->as() || child->as()) return false; } + if (auto tables = node->as()) + { + /// Contrary to what's said in the code block above, ARRAY JOIN needs to resolve the subquery if possible + /// and assign an alias for 02367_optimize_trivial_count_with_array_join to pass. Otherwise it will fail in + /// ArrayJoinedColumnsVisitor (`No alias for non-trivial value in ARRAY JOIN: _a`) + /// This looks 100% as a incomplete code working on top of a bug, but this code has already been made obsolete + /// by the new analyzer, so it's an inconvenience we can live with until we deprecate it. + if (child == tables->array_join) + return true; + return false; + } + return true; } From 996773b205121f55d6f066826dab95b38b49dbbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 5 Nov 2024 12:39:36 +0100 Subject: [PATCH 072/171] Test with both analyzers --- .../03261_optimize_rewrite_array_exists_to_has_crash.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql b/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql index 5a54d86f339..e0018632be4 100644 --- a/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql +++ b/tests/queries/0_stateless/03261_optimize_rewrite_array_exists_to_has_crash.sql @@ -7,4 +7,4 @@ INNER JOIN rewrite AS y ON ( SELECT 1 ) INNER JOIN rewrite AS z ON 1 -SETTINGS allow_experimental_analyzer=0, optimize_rewrite_array_exists_to_has=1; +SETTINGS optimize_rewrite_array_exists_to_has=1; From ead7630d04b5aab7ff28a0e99710a8b6ce17800c Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Tue, 5 Nov 2024 14:28:28 +0100 Subject: [PATCH 073/171] Missing tests in several tests in 24.10 Added corner cases for tests for: to_utc_timestamp and from_utc_timestamp (more timezones, spetial timezones, epoch corners does not look right, raising a bug over that) arrayUnion (empty and big arrays) quantilesExactWeightedInterpolated (more data types) --- .../02812_from_to_utc_timestamp.reference | 5 +++ .../02812_from_to_utc_timestamp.sh | 8 +++- .../0_stateless/03224_arrayUnion.reference | 10 +++++ .../queries/0_stateless/03224_arrayUnion.sql | 21 ++++++++- ...tile_exact_weighted_interpolated.reference | 13 +++--- ...0_quantile_exact_weighted_interpolated.sql | 45 ++++++++++++++++--- 6 files changed, 88 insertions(+), 14 deletions(-) diff --git a/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference b/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference index 4da8a9784dd..bdce849e069 100644 --- a/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference +++ b/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference @@ -3,3 +3,8 @@ 3 2023-03-16 12:22:33 2023-03-16 10:22:33.000 2023-03-16 03:22:33 2023-03-16 19:22:33.123 2024-02-24 10:22:33 2024-02-24 12:22:33 2024-10-24 09:22:33 2024-10-24 13:22:33 +2024-10-24 16:22:33 2024-10-24 06:22:33 +leap year: 2024-02-29 16:22:33 2024-02-29 06:22:33 +non-leap year: 2023-03-01 16:22:33 2023-03-01 06:22:33 +timezone with half-hour offset: 2024-02-29 00:52:33 2024-02-29 21:52:33 +jump over a year: 2024-01-01 04:01:01 2023-12-31 20:01:01 diff --git a/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh b/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh index 835dab8af57..441fc254256 100755 --- a/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh +++ b/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh @@ -15,4 +15,10 @@ $CLICKHOUSE_CLIENT -q "select x, to_utc_timestamp(toDateTime('2023-03-16 11:22:3 # timestamp convert between DST timezone and UTC $CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-02-24 11:22:33'), 'Europe/Madrid'), from_utc_timestamp(toDateTime('2024-02-24 11:22:33'), 'Europe/Madrid')" $CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'Europe/Madrid'), from_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'Europe/Madrid')" -$CLICKHOUSE_CLIENT -q "drop table test_tbl" \ No newline at end of file +$CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'EST')" +$CLICKHOUSE_CLIENT -q "select 'leap year:', to_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'EST')" +$CLICKHOUSE_CLIENT -q "select 'non-leap year:', to_utc_timestamp(toDateTime('2023-02-29 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2023-02-29 11:22:33'), 'EST')" +$CLICKHOUSE_CLIENT -q "select 'timezone with half-hour offset:', to_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'Australia/Adelaide'), from_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'Australia/Adelaide')" +$CLICKHOUSE_CLIENT -q "select 'jump over a year:', to_utc_timestamp(toDateTime('2023-12-31 23:01:01'), 'EST'), from_utc_timestamp(toDateTime('2024-01-01 01:01:01'), 'EST')" + +$CLICKHOUSE_CLIENT -q "drop table test_tbl" diff --git a/tests/queries/0_stateless/03224_arrayUnion.reference b/tests/queries/0_stateless/03224_arrayUnion.reference index b900b6cdb0a..9b871234d27 100644 --- a/tests/queries/0_stateless/03224_arrayUnion.reference +++ b/tests/queries/0_stateless/03224_arrayUnion.reference @@ -41,3 +41,13 @@ [1,2,3,4,5,10,20] ------- [1,2,3] +------- +[10,-2,1] ['hello','hi'] [3,2,1,NULL] +------- +------- +[1] +------- +[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256] +199999 +------- +[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19] diff --git a/tests/queries/0_stateless/03224_arrayUnion.sql b/tests/queries/0_stateless/03224_arrayUnion.sql index dedbacad906..14a9192f39a 100644 --- a/tests/queries/0_stateless/03224_arrayUnion.sql +++ b/tests/queries/0_stateless/03224_arrayUnion.sql @@ -35,4 +35,23 @@ SELECT arraySort(arrayUnion([NULL, NULL, NULL, 1], [1, NULL, NULL], [1, 2, 3, NU select '-------'; SELECT arraySort(arrayUnion([1, 1, 1, 2, 3], [2, 2, 4], [5, 10, 20])); select '-------'; -SELECT arraySort(arrayUnion([1, 2], [1, 3], [])), +SELECT arraySort(arrayUnion([1, 2], [1, 3], [])); +select '-------'; +-- example from docs +SELECT + arrayUnion([-2, 1], [10, 1], [-2], []) as num_example, + arrayUnion(['hi'], [], ['hello', 'hi']) as str_example, + arrayUnion([1, 3, NULL], [2, 3, NULL]) as null_example; +select '-------'; +--mix of types +SELECT arrayUnion([1], [-2], [1.1, 'hi'], [NULL, 'hello', []]); -- {serverError NO_COMMON_TYPE} +select '-------'; +SELECT arrayUnion([1]); +SELECT arrayUnion(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select '-------'; +--bigger arrays +SELECT arraySort(arrayUnion(range(1, 256), range(2, 257))); +SELECT length(arrayUnion(range(1, 100000), range(9999, 200000))); +select '-------'; +--bigger number of arguments +SELECT arraySort(arrayUnion([1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7], [1, 8], [1, 9], [1, 10], [1, 11], [1, 12], [1, 13], [1, 14], [1, 15], [1, 16], [1, 17], [1, 18], [1, 19])); diff --git a/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.reference b/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.reference index 23cbe2bfdec..ccb315b8305 100644 --- a/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.reference +++ b/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.reference @@ -1,6 +1,6 @@ quantileExactWeightedInterpolated -0 0 0 Decimal(38, 8) --25.5 -8.49999999 -5.1 Decimal(38, 8) +0 0 0 25 2024-02-20 Decimal(38, 8) +-25.5 -8.49999999 -5.1 12.25 2024-01-25 Decimal(38, 8) 0 0 0 10 3.33333333 2 20 6.66666666 4 @@ -10,11 +10,14 @@ quantileExactWeightedInterpolated [-50,-40,-30,-20,-10,0,10,20,30,40,50] [-16.66666666,-13.33333333,-10,-6.66666666,-3.33333333,0,3.33333333,6.66666666,10,13.33333333,16.66666666] [-10,-8,-6,-4,-2,0,2,4,6,8,10] +[0,5,10,15,20,25,30,35,40,45,50] +['2024-01-01','2024-01-11','2024-01-21','2024-01-31','2024-02-10','2024-02-20','2024-03-01','2024-03-11','2024-03-21','2024-03-31','2024-04-10'] quantileExactWeightedInterpolatedState [10000.6,20000.2,29999.8,39999.4] Test with filter that returns no rows -0 0 0 +0 0 0 nan 1970-01-01 +0 0 0 nan 1970-01-01 Test with dynamic weights -21 7 4.2 +21 7 4.2 35.5 2024-03-12 Test with all weights set to 0 -0 0 0 +0 0 0 nan 1970-01-01 diff --git a/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.sql b/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.sql index dba16eae22a..a64b46e751b 100644 --- a/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.sql +++ b/tests/queries/0_stateless/03240_quantile_exact_weighted_interpolated.sql @@ -5,16 +5,28 @@ CREATE TABLE decimal a Decimal32(4), b Decimal64(8), c Decimal128(8), + f Float64, + d Date, w UInt64 ) ENGINE = Memory; -INSERT INTO decimal (a, b, c, w) -SELECT toDecimal32(number - 50, 4), toDecimal64(number - 50, 8) / 3, toDecimal128(number - 50, 8) / 5, number +INSERT INTO decimal (a, b, c, f, d, w) +SELECT toDecimal32(number - 50, 4), toDecimal64(number - 50, 8) / 3, toDecimal128(number - 50, 8) / 5, number/2, addDays(toDate('2024-01-01'), number), number FROM system.numbers LIMIT 101; SELECT 'quantileExactWeightedInterpolated'; -SELECT medianExactWeightedInterpolated(a, 1), medianExactWeightedInterpolated(b, 2), medianExactWeightedInterpolated(c, 3) as x, toTypeName(x) FROM decimal; -SELECT quantileExactWeightedInterpolated(a, 1), quantileExactWeightedInterpolated(b, 2), quantileExactWeightedInterpolated(c, 3) as x, toTypeName(x) FROM decimal WHERE a < 0; +SELECT medianExactWeightedInterpolated(a, 1), + medianExactWeightedInterpolated(b, 2), + medianExactWeightedInterpolated(c, 3) as x, + medianExactWeightedInterpolated(f, 4), + medianExactWeightedInterpolated(d, 5), + toTypeName(x) FROM decimal; +SELECT quantileExactWeightedInterpolated(a, 1), + quantileExactWeightedInterpolated(b, 2), + quantileExactWeightedInterpolated(c, 3) as x, + quantileExactWeightedInterpolated(f, 4), + quantileExactWeightedInterpolated(d, 5), + toTypeName(x) FROM decimal WHERE a < 0; SELECT quantileExactWeightedInterpolated(0.0)(a, 1), quantileExactWeightedInterpolated(0.0)(b, 2), quantileExactWeightedInterpolated(0.0)(c, 3) FROM decimal WHERE a >= 0; SELECT quantileExactWeightedInterpolated(0.2)(a, 1), quantileExactWeightedInterpolated(0.2)(b, 2), quantileExactWeightedInterpolated(0.2)(c, 3) FROM decimal WHERE a >= 0; SELECT quantileExactWeightedInterpolated(0.4)(a, 1), quantileExactWeightedInterpolated(0.4)(b, 2), quantileExactWeightedInterpolated(0.4)(c, 3) FROM decimal WHERE a >= 0; @@ -24,6 +36,8 @@ SELECT quantileExactWeightedInterpolated(1.0)(a, 1), quantileExactWeightedInterp SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(a, 1) FROM decimal; SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(b, 2) FROM decimal; SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(c, 3) FROM decimal; +SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(f, 4) FROM decimal; +SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(d, 5) FROM decimal; SELECT 'quantileExactWeightedInterpolatedState'; SELECT quantilesExactWeightedInterpolatedMerge(0.2, 0.4, 0.6, 0.8)(x) @@ -34,12 +48,29 @@ FROM ); SELECT 'Test with filter that returns no rows'; -SELECT medianExactWeightedInterpolated(a, 1), medianExactWeightedInterpolated(b, 2), medianExactWeightedInterpolated(c, 3) FROM decimal WHERE a > 1000; +SELECT medianExactWeightedInterpolated(a, 1), + medianExactWeightedInterpolated(b, 2), + medianExactWeightedInterpolated(c, 3), + medianExactWeightedInterpolated(f, 4), + medianExactWeightedInterpolated(d, 5) FROM decimal WHERE a > 1000; +SELECT quantileExactWeightedInterpolated(a, 1), + quantileExactWeightedInterpolated(b, 2), + quantileExactWeightedInterpolated(c, 3), + quantileExactWeightedInterpolated(f, 4), + quantileExactWeightedInterpolated(d, 5) FROM decimal WHERE d < toDate('2024-01-01'); SELECT 'Test with dynamic weights'; -SELECT medianExactWeightedInterpolated(a, w), medianExactWeightedInterpolated(b, w), medianExactWeightedInterpolated(c, w) FROM decimal; +SELECT medianExactWeightedInterpolated(a, w), + medianExactWeightedInterpolated(b, w), + medianExactWeightedInterpolated(c, w), + medianExactWeightedInterpolated(f, w), + medianExactWeightedInterpolated(d, w) FROM decimal; SELECT 'Test with all weights set to 0'; -SELECT medianExactWeightedInterpolated(a, 0), medianExactWeightedInterpolated(b, 0), medianExactWeightedInterpolated(c, 0) FROM decimal; +SELECT medianExactWeightedInterpolated(a, 0), + medianExactWeightedInterpolated(b, 0), + medianExactWeightedInterpolated(c, 0), + medianExactWeightedInterpolated(f, 0), + medianExactWeightedInterpolated(d, 0) FROM decimal; DROP TABLE IF EXISTS decimal; From d7da086a2e474b1938568dbd47f6515344ef397f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 5 Nov 2024 20:50:05 +0100 Subject: [PATCH 074/171] Fix tidy --- src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index 943febf4b0e..2add11d0f6a 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -67,7 +67,7 @@ bool ExecuteScalarSubqueriesMatcher::needChildVisit(ASTPtr & node, const ASTPtr return false; } - if (auto tables = node->as()) + if (auto * tables = node->as()) { /// Contrary to what's said in the code block above, ARRAY JOIN needs to resolve the subquery if possible /// and assign an alias for 02367_optimize_trivial_count_with_array_join to pass. Otherwise it will fail in From c23dfa343155a7162b1bcf1f98080f5a08b92f7f Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Wed, 6 Nov 2024 12:30:37 +0800 Subject: [PATCH 075/171] fix uninitialized orc data --- .../Impl/NativeORCBlockInputFormat.cpp | 25 +++++++++++++------ 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp index 81df330ffb5..5c7637d3e51 100644 --- a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp @@ -1534,15 +1534,24 @@ static ColumnWithTypeAndName readColumnWithDateData( for (size_t i = 0; i < orc_int_column->numElements; ++i) { - Int32 days_num = static_cast(orc_int_column->data[i]); - if (check_date_range && (days_num > DATE_LUT_MAX_EXTEND_DAY_NUM || days_num < -DAYNUM_OFFSET_EPOCH)) - throw Exception( - ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, - "Input value {} of a column \"{}\" exceeds the range of type Date32", - days_num, - column_name); + if (!orc_int_column->hasNulls || orc_int_column->notNull[i]) + { + Int32 days_num = static_cast(orc_int_column->data[i]); + if (check_date_range && (days_num > DATE_LUT_MAX_EXTEND_DAY_NUM || days_num < -DAYNUM_OFFSET_EPOCH)) + throw Exception( + ErrorCodes::VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE, + "Input value {} of a column \"{}\" exceeds the range of type Date32", + days_num, + column_name); + + column_data.push_back(days_num); + } + else + { + /// ORC library doesn't gurantee that orc_int_column->data[i] is initialized to zero when orc_int_column->notNull[i] is false since https://github.com/ClickHouse/ClickHouse/pull/69473 + column_data.push_back(0); + } - column_data.push_back(days_num); } return {std::move(internal_column), internal_type, column_name}; From 6a8df5ea89724d7686f6c520bc436b7cb80294bd Mon Sep 17 00:00:00 2001 From: nauu Date: Wed, 6 Nov 2024 14:57:14 +0800 Subject: [PATCH 076/171] support the endpoint of oss accelerator --- src/IO/S3/URI.cpp | 12 ++++++++++-- src/IO/tests/gtest_s3_uri.cpp | 16 ++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/src/IO/S3/URI.cpp b/src/IO/S3/URI.cpp index 7c6a21941eb..ad746ff3326 100644 --- a/src/IO/S3/URI.cpp +++ b/src/IO/S3/URI.cpp @@ -37,7 +37,7 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) /// Case when bucket name represented in domain name of S3 URL. /// E.g. (https://bucket-name.s3.region.amazonaws.com/key) /// https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#virtual-hosted-style-access - static const RE2 virtual_hosted_style_pattern(R"((.+)\.(s3express[\-a-z0-9]+|s3|cos|obs|oss|eos)([.\-][a-z0-9\-.:]+))"); + static const RE2 virtual_hosted_style_pattern(R"((.+)\.(s3express[\-a-z0-9]+|s3|cos|obs|oss-data-acc|oss|eos)([.\-][a-z0-9\-.:]+))"); /// Case when AWS Private Link Interface is being used /// E.g. (bucket.vpce-07a1cd78f1bd55c5f-j3a3vg6w.s3.us-east-1.vpce.amazonaws.com/bucket-name/key) @@ -115,7 +115,15 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) && re2::RE2::FullMatch(uri.getAuthority(), virtual_hosted_style_pattern, &bucket, &name, &endpoint_authority_from_uri)) { is_virtual_hosted_style = true; - endpoint = uri.getScheme() + "://" + name + endpoint_authority_from_uri; + if (name == "oss-data-acc") + { + bucket = bucket.substr(0, bucket.find(".")); + endpoint = uri.getScheme() + "://" + uri.getHost().substr(bucket.length() + 1); + } + else + { + endpoint = uri.getScheme() + "://" + name + endpoint_authority_from_uri; + } validateBucket(bucket, uri); if (!uri.getPath().empty()) diff --git a/src/IO/tests/gtest_s3_uri.cpp b/src/IO/tests/gtest_s3_uri.cpp index 8696fab0616..6167313b634 100644 --- a/src/IO/tests/gtest_s3_uri.cpp +++ b/src/IO/tests/gtest_s3_uri.cpp @@ -212,6 +212,22 @@ TEST(S3UriTest, validPatterns) ASSERT_EQ("", uri.version_id); ASSERT_EQ(true, uri.is_virtual_hosted_style); } + { + S3::URI uri("https://bucket-test1.oss-cn-beijing-internal.aliyuncs.com/ab-test"); + ASSERT_EQ("https://oss-cn-beijing-internal.aliyuncs.com", uri.endpoint); + ASSERT_EQ("bucket-test1", uri.bucket); + ASSERT_EQ("ab-test", uri.key); + ASSERT_EQ("", uri.version_id); + ASSERT_EQ(true, uri.is_virtual_hosted_style); + } + { + S3::URI uri("https://bucket-test.cn-beijing-internal.oss-data-acc.aliyuncs.com/ab-test"); + ASSERT_EQ("https://cn-beijing-internal.oss-data-acc.aliyuncs.com", uri.endpoint); + ASSERT_EQ("bucket-test", uri.bucket); + ASSERT_EQ("ab-test", uri.key); + ASSERT_EQ("", uri.version_id); + ASSERT_EQ(true, uri.is_virtual_hosted_style); + } } TEST(S3UriTest, versionIdChecks) From 127f324822e7b45259eb6ec9b9f5168933350aa1 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Wed, 6 Nov 2024 15:03:41 +0800 Subject: [PATCH 077/171] add uts --- .../Formats/Impl/NativeORCBlockInputFormat.cpp | 3 +-- .../03259_orc_date_out_of_range.reference | 12 ++++++++++++ .../0_stateless/03259_orc_date_out_of_range.sql | 15 +++++++++++++++ 3 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03259_orc_date_out_of_range.reference create mode 100644 tests/queries/0_stateless/03259_orc_date_out_of_range.sql diff --git a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp index 5c7637d3e51..26aa3555c2b 100644 --- a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp @@ -1548,10 +1548,9 @@ static ColumnWithTypeAndName readColumnWithDateData( } else { - /// ORC library doesn't gurantee that orc_int_column->data[i] is initialized to zero when orc_int_column->notNull[i] is false since https://github.com/ClickHouse/ClickHouse/pull/69473 + /// ORC library doesn't guarantee that orc_int_column->data[i] is initialized to zero when orc_int_column->notNull[i] is false since https://github.com/ClickHouse/ClickHouse/pull/69473 column_data.push_back(0); } - } return {std::move(internal_column), internal_type, column_name}; diff --git a/tests/queries/0_stateless/03259_orc_date_out_of_range.reference b/tests/queries/0_stateless/03259_orc_date_out_of_range.reference new file mode 100644 index 00000000000..ddac785369f --- /dev/null +++ b/tests/queries/0_stateless/03259_orc_date_out_of_range.reference @@ -0,0 +1,12 @@ +number Nullable(Int64) +date_field Nullable(Date32) +\N +1970-01-02 +\N +1970-01-04 +\N +1970-01-06 +\N +1970-01-08 +\N +1970-01-10 diff --git a/tests/queries/0_stateless/03259_orc_date_out_of_range.sql b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql new file mode 100644 index 00000000000..470c4ff3817 --- /dev/null +++ b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql @@ -0,0 +1,15 @@ + +-- Tags: no-parallel + +SET session_timezone = 'UTC'; +SET engine_file_truncate_on_insert = 1; + +insert into function file('03259.orc') +select + number, + if (number % 2 = 0, null, toDate32(number)) as date_field + from numbers(10); + +desc file('03259.orc'); + +select date_field from file('03259.orc') order by number; From ef0be4a01cb4fd9c4723ecf31b96aab7ee6a30ac Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Wed, 6 Nov 2024 15:06:00 +0800 Subject: [PATCH 078/171] fix typo --- tests/queries/0_stateless/03259_orc_date_out_of_range.sql | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/03259_orc_date_out_of_range.sql b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql index 470c4ff3817..409e8ce079d 100644 --- a/tests/queries/0_stateless/03259_orc_date_out_of_range.sql +++ b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql @@ -1,4 +1,3 @@ - -- Tags: no-parallel SET session_timezone = 'UTC'; From 590029a33bfd844eede8b4ad570464d0cf86c938 Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Wed, 6 Nov 2024 16:38:09 +0800 Subject: [PATCH 079/171] fix orc date32 overflow --- tests/queries/0_stateless/03259_orc_date_out_of_range.sql | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/03259_orc_date_out_of_range.sql b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql index 409e8ce079d..7103b93b147 100644 --- a/tests/queries/0_stateless/03259_orc_date_out_of_range.sql +++ b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql @@ -3,12 +3,12 @@ SET session_timezone = 'UTC'; SET engine_file_truncate_on_insert = 1; -insert into function file('03259.orc') +insert into function file('03259.orc', 'ORC') select number, if (number % 2 = 0, null, toDate32(number)) as date_field - from numbers(10); +from numbers(10); -desc file('03259.orc'); +desc file('03259.orc', 'ORC'); -select date_field from file('03259.orc') order by number; +select date_field from file('03259.orc', 'ORC') order by number; From 6761fccbf30cba1b18331bab993710e89c047aba Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Wed, 6 Nov 2024 17:10:00 +0800 Subject: [PATCH 080/171] fix orc date32 overflow --- tests/queries/0_stateless/03259_orc_date_out_of_range.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03259_orc_date_out_of_range.sql b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql index 7103b93b147..e73d2faa5dd 100644 --- a/tests/queries/0_stateless/03259_orc_date_out_of_range.sql +++ b/tests/queries/0_stateless/03259_orc_date_out_of_range.sql @@ -1,4 +1,4 @@ --- Tags: no-parallel +-- Tags: no-fasttest, no-parallel SET session_timezone = 'UTC'; SET engine_file_truncate_on_insert = 1; From 068b4fe8cfa184c4aaecda057b78d7b8acfdbb06 Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Wed, 6 Nov 2024 12:16:59 +0100 Subject: [PATCH 081/171] squash! Missing tests in several tests in 24.10 Added corner cases for tests for: to_utc_timestamp and from_utc_timestamp (more timezones, spetial timezones, epoch corners does not look right, raising a bug over that) arrayUnion (empty and big arrays) quantilesExactWeightedInterpolated (more data types) --- tests/queries/0_stateless/02812_from_to_utc_timestamp.reference | 2 ++ tests/queries/0_stateless/02812_from_to_utc_timestamp.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference b/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference index bdce849e069..fb92bdda821 100644 --- a/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference +++ b/tests/queries/0_stateless/02812_from_to_utc_timestamp.reference @@ -6,5 +6,7 @@ 2024-10-24 16:22:33 2024-10-24 06:22:33 leap year: 2024-02-29 16:22:33 2024-02-29 06:22:33 non-leap year: 2023-03-01 16:22:33 2023-03-01 06:22:33 +leap year: 2024-02-29 04:22:33 2024-02-29 19:22:33 +non-leap year: 2023-03-01 04:22:33 2023-02-28 19:22:33 timezone with half-hour offset: 2024-02-29 00:52:33 2024-02-29 21:52:33 jump over a year: 2024-01-01 04:01:01 2023-12-31 20:01:01 diff --git a/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh b/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh index 441fc254256..20ae224332c 100755 --- a/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh +++ b/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh @@ -18,6 +18,8 @@ $CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-10-24 11:22:33') $CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'EST')" $CLICKHOUSE_CLIENT -q "select 'leap year:', to_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'EST')" $CLICKHOUSE_CLIENT -q "select 'non-leap year:', to_utc_timestamp(toDateTime('2023-02-29 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2023-02-29 11:22:33'), 'EST')" +$CLICKHOUSE_CLIENT -q "select 'leap year:', to_utc_timestamp(toDateTime('2024-02-28 23:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-03-01 00:22:33'), 'EST')" +$CLICKHOUSE_CLIENT -q "select 'non-leap year:', to_utc_timestamp(toDateTime('2023-02-28 23:22:33'), 'EST'), from_utc_timestamp(toDateTime('2023-03-01 00:22:33'), 'EST')" $CLICKHOUSE_CLIENT -q "select 'timezone with half-hour offset:', to_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'Australia/Adelaide'), from_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'Australia/Adelaide')" $CLICKHOUSE_CLIENT -q "select 'jump over a year:', to_utc_timestamp(toDateTime('2023-12-31 23:01:01'), 'EST'), from_utc_timestamp(toDateTime('2024-01-01 01:01:01'), 'EST')" From 338af374d88c134b39d75dd1f56f5630cd41fcc2 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 6 Nov 2024 09:52:25 +0100 Subject: [PATCH 082/171] remove the method remove in favor of the method removeIfExists --- .../AzureBlobStorage/AzureObjectStorage.cpp | 20 ++++++++--------- .../AzureBlobStorage/AzureObjectStorage.h | 4 ++-- .../Cached/CachedObjectStorage.cpp | 22 +++++++++---------- .../Cached/CachedObjectStorage.h | 4 ++-- .../DiskObjectStorageTransaction.cpp | 6 ++--- .../ObjectStorages/HDFS/HDFSObjectStorage.h | 4 ++-- src/Disks/ObjectStorages/IObjectStorage.h | 4 ++-- .../ObjectStorages/Local/LocalObjectStorage.h | 4 ++-- .../MetadataStorageFromPlainObjectStorage.cpp | 4 ++-- ...torageFromPlainObjectStorageOperations.cpp | 4 ++-- .../ObjectStorages/S3/S3ObjectStorage.cpp | 16 +++++++------- src/Disks/ObjectStorages/S3/S3ObjectStorage.h | 4 ++-- .../ObjectStorages/Web/WebObjectStorage.cpp | 16 +++++++------- .../ObjectStorages/Web/WebObjectStorage.h | 4 ++-- .../ObjectStorageQueueSource.cpp | 2 +- 15 files changed, 58 insertions(+), 60 deletions(-) diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp index 673c82806bd..959afa65672 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp @@ -278,17 +278,17 @@ void AzureObjectStorage::removeObjectImpl(const StoredObject & object, const Sha } /// Remove file. Throws exception if file doesn't exists or it's a directory. -void AzureObjectStorage::removeObject(const StoredObject & object) -{ - removeObjectImpl(object, client.get(), false); -} +// void AzureObjectStorage::removeObject(const StoredObject & object) +// { +// removeObjectImpl(object, client.get(), false); +// } -void AzureObjectStorage::removeObjects(const StoredObjects & objects) -{ - auto client_ptr = client.get(); - for (const auto & object : objects) - removeObjectImpl(object, client_ptr, false); -} +// void AzureObjectStorage::removeObjects(const StoredObjects & objects) +// { +// auto client_ptr = client.get(); +// for (const auto & object : objects) +// removeObjectImpl(object, client_ptr, false); +// } void AzureObjectStorage::removeObjectIfExists(const StoredObject & object) { diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h index 58225eccd90..433fe7a852e 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h @@ -60,9 +60,9 @@ public: const WriteSettings & write_settings = {}) override; /// Remove file. Throws exception if file doesn't exists or it's a directory. - void removeObject(const StoredObject & object) override; + //void removeObject(const StoredObject & object) override; - void removeObjects(const StoredObjects & objects) override; + //void removeObjects(const StoredObjects & objects) override; void removeObjectIfExists(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp index 163ff3a9c68..f2750e6814f 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp @@ -148,19 +148,19 @@ void CachedObjectStorage::removeCacheIfExists(const std::string & path_key_for_c cache->removeKeyIfExists(getCacheKey(path_key_for_cache), FileCache::getCommonUser().user_id); } -void CachedObjectStorage::removeObject(const StoredObject & object) -{ - removeCacheIfExists(object.remote_path); - object_storage->removeObject(object); -} +// void CachedObjectStorage::removeObject(const StoredObject & object) +// { +// removeCacheIfExists(object.remote_path); +// object_storage->removeObject(object); +// } -void CachedObjectStorage::removeObjects(const StoredObjects & objects) -{ - for (const auto & object : objects) - removeCacheIfExists(object.remote_path); +// void CachedObjectStorage::removeObjects(const StoredObjects & objects) +// { +// for (const auto & object : objects) +// removeCacheIfExists(object.remote_path); - object_storage->removeObjects(objects); -} +// object_storage->removeObjects(objects); +// } void CachedObjectStorage::removeObjectIfExists(const StoredObject & object) { diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h index b77baf21e40..7e10057e04c 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h @@ -45,9 +45,9 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - void removeObject(const StoredObject & object) override; + // void removeObject(const StoredObject & object) override; - void removeObjects(const StoredObjects & objects) override; + // void removeObjects(const StoredObjects & objects) override; void removeObjectIfExists(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp index 64323fb6f3c..19de2bb78af 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageTransaction.cpp @@ -480,8 +480,7 @@ struct WriteFileObjectStorageOperation final : public IDiskObjectStorageOperatio void undo() override { - if (object_storage.exists(object)) - object_storage.removeObject(object); + object_storage.removeObjectIfExists(object); } void finalize() override @@ -543,8 +542,7 @@ struct CopyFileObjectStorageOperation final : public IDiskObjectStorageOperation void undo() override { - for (const auto & object : created_objects) - destination_object_storage.removeObject(object); + destination_object_storage.removeObjectsIfExist(created_objects); } void finalize() override diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h index b53161beb76..317399b4753 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h @@ -78,9 +78,9 @@ public: const WriteSettings & write_settings = {}) override; /// Remove file. Throws exception if file doesn't exists or it's a directory. - void removeObject(const StoredObject & object) override; + void removeObject(const StoredObject & object); - void removeObjects(const StoredObjects & objects) override; + void removeObjects(const StoredObjects & objects); void removeObjectIfExists(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/IObjectStorage.h b/src/Disks/ObjectStorages/IObjectStorage.h index 8dde96b8b16..adb36762539 100644 --- a/src/Disks/ObjectStorages/IObjectStorage.h +++ b/src/Disks/ObjectStorages/IObjectStorage.h @@ -161,11 +161,11 @@ public: virtual bool isRemote() const = 0; /// Remove object. Throws exception if object doesn't exists. - virtual void removeObject(const StoredObject & object) = 0; + // virtual void removeObject(const StoredObject & object) = 0; /// Remove multiple objects. Some object storages can do batch remove in a more /// optimal way. - virtual void removeObjects(const StoredObjects & objects) = 0; + // virtual void removeObjects(const StoredObjects & objects) = 0; /// Remove object on path if exists virtual void removeObjectIfExists(const StoredObject & object) = 0; diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.h b/src/Disks/ObjectStorages/Local/LocalObjectStorage.h index f1a0391a984..ffc151bda04 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.h +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.h @@ -42,9 +42,9 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - void removeObject(const StoredObject & object) override; + void removeObject(const StoredObject & object); - void removeObjects(const StoredObjects & objects) override; + void removeObjects(const StoredObjects & objects); void removeObjectIfExists(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp index d56c5d9143c..27aa9304de7 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorage.cpp @@ -203,7 +203,7 @@ void MetadataStorageFromPlainObjectStorageTransaction::unlinkFile(const std::str { auto object_key = metadata_storage.object_storage->generateObjectKeyForPath(path, std::nullopt /* key_prefix */); auto object = StoredObject(object_key.serialize()); - metadata_storage.object_storage->removeObject(object); + metadata_storage.object_storage->removeObjectIfExists(object); } void MetadataStorageFromPlainObjectStorageTransaction::removeDirectory(const std::string & path) @@ -211,7 +211,7 @@ void MetadataStorageFromPlainObjectStorageTransaction::removeDirectory(const std if (metadata_storage.object_storage->isWriteOnce()) { for (auto it = metadata_storage.iterateDirectory(path); it->isValid(); it->next()) - metadata_storage.object_storage->removeObject(StoredObject(it->path())); + metadata_storage.object_storage->removeObjectIfExists(StoredObject(it->path())); } else { diff --git a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp index ea57d691908..62015631aa5 100644 --- a/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp +++ b/src/Disks/ObjectStorages/MetadataStorageFromPlainObjectStorageOperations.cpp @@ -107,7 +107,7 @@ void MetadataStorageFromPlainObjectStorageCreateDirectoryOperation::undo(std::un auto metric = object_storage->getMetadataStorageMetrics().directory_map_size; CurrentMetrics::sub(metric, 1); - object_storage->removeObject(StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME)); + object_storage->removeObjectIfExists(StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME)); } else if (write_created) object_storage->removeObjectIfExists(StoredObject(metadata_object_key.serialize(), path / PREFIX_PATH_FILE_NAME)); @@ -247,7 +247,7 @@ void MetadataStorageFromPlainObjectStorageRemoveDirectoryOperation::execute(std: auto metadata_object_key = createMetadataObjectKey(key_prefix, metadata_key_prefix); auto metadata_object = StoredObject(/*remote_path*/ metadata_object_key.serialize(), /*local_path*/ path / PREFIX_PATH_FILE_NAME); - object_storage->removeObject(metadata_object); + object_storage->removeObjectIfExists(metadata_object); { std::lock_guard lock(path_map.mutex); diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 47ef97401f2..7ed118c6b07 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -326,20 +326,20 @@ void S3ObjectStorage::removeObjectsImpl(const StoredObjects & objects, bool if_e ProfileEvents::DiskS3DeleteObjects); } -void S3ObjectStorage::removeObject(const StoredObject & object) -{ - removeObjectImpl(object, false); -} +// void S3ObjectStorage::removeObject(const StoredObject & object) +// { +// removeObjectImpl(object, false); +// } void S3ObjectStorage::removeObjectIfExists(const StoredObject & object) { removeObjectImpl(object, true); } -void S3ObjectStorage::removeObjects(const StoredObjects & objects) -{ - removeObjectsImpl(objects, false); -} +// void S3ObjectStorage::removeObjects(const StoredObjects & objects) +// { +// removeObjectsImpl(objects, false); +// } void S3ObjectStorage::removeObjectsIfExist(const StoredObjects & objects) { diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h index d6e84cf57ef..a2aeaf8a43c 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h @@ -102,11 +102,11 @@ public: ObjectStorageIteratorPtr iterate(const std::string & path_prefix, size_t max_keys) const override; /// Uses `DeleteObjectRequest`. - void removeObject(const StoredObject & object) override; + //void removeObject(const StoredObject & object) override; /// Uses `DeleteObjectsRequest` if it is allowed by `s3_capabilities`, otherwise `DeleteObjectRequest`. /// `DeleteObjectsRequest` is not supported on GCS, see https://issuetracker.google.com/issues/162653700 . - void removeObjects(const StoredObjects & objects) override; + //void removeObjects(const StoredObjects & objects) override; /// Uses `DeleteObjectRequest`. void removeObjectIfExists(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp index 871d3b506f6..1503d5819eb 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp @@ -254,15 +254,15 @@ std::unique_ptr WebObjectStorage::writeObject( /// NOLI throwNotAllowed(); } -void WebObjectStorage::removeObject(const StoredObject &) -{ - throwNotAllowed(); -} +// void WebObjectStorage::removeObject(const StoredObject &) +// { +// throwNotAllowed(); +// } -void WebObjectStorage::removeObjects(const StoredObjects &) -{ - throwNotAllowed(); -} +// void WebObjectStorage::removeObjects(const StoredObjects &) +// { +// throwNotAllowed(); +// } void WebObjectStorage::removeObjectIfExists(const StoredObject &) { diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.h b/src/Disks/ObjectStorages/Web/WebObjectStorage.h index 573221b7e21..ae52cc20f9b 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.h +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.h @@ -47,9 +47,9 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - void removeObject(const StoredObject & object) override; + // void removeObject(const StoredObject & object) override; - void removeObjects(const StoredObjects & objects) override; + // void removeObjects(const StoredObjects & objects) override; void removeObjectIfExists(const StoredObject & object) override; diff --git a/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp b/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp index ba1a97bc2fb..e702f07208a 100644 --- a/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp +++ b/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp @@ -659,7 +659,7 @@ void ObjectStorageQueueSource::applyActionAfterProcessing(const String & path) { if (files_metadata->getTableMetadata().after_processing == ObjectStorageQueueAction::DELETE) { - object_storage->removeObject(StoredObject(path)); + object_storage->removeObjectIfExists(StoredObject(path)); } } From 020b69647a65dd740cddfbf62730f37de14a4eb8 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 6 Nov 2024 15:15:29 +0000 Subject: [PATCH 083/171] Fix counting column size in wide part for Dynamid and JSON types --- .../MergeTree/MergeTreeDataPartWide.cpp | 2 +- .../MergeTree/MergeTreeReaderWide.cpp | 2 +- ...umn_sizes_with_dynamic_structure.reference | 1 + ...62_column_sizes_with_dynamic_structure.sql | 22 +++++++++++++++++++ 4 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.reference create mode 100644 tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.sql diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp index d6f213463f2..d8470ba8405 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp @@ -108,7 +108,7 @@ ColumnSize MergeTreeDataPartWide::getColumnSizeImpl( auto mrk_checksum = checksums.files.find(*stream_name + getMarksFileExtension()); if (mrk_checksum != checksums.files.end()) size.marks += mrk_checksum->second.file_size; - }); + }, column.type, getColumnSample(column)); return size; } diff --git a/src/Storages/MergeTree/MergeTreeReaderWide.cpp b/src/Storages/MergeTree/MergeTreeReaderWide.cpp index 77231d8d392..885bd1ded8c 100644 --- a/src/Storages/MergeTree/MergeTreeReaderWide.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderWide.cpp @@ -172,7 +172,7 @@ size_t MergeTreeReaderWide::readRows( throw; } - if (column->empty()) + if (column->empty() && max_rows_to_read > 0) res_columns[pos] = nullptr; } diff --git a/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.reference b/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.reference new file mode 100644 index 00000000000..5cab16ed96d --- /dev/null +++ b/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.reference @@ -0,0 +1 @@ +test 10.00 million 352.87 MiB 39.43 MiB 39.45 MiB diff --git a/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.sql b/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.sql new file mode 100644 index 00000000000..21e6515fc99 --- /dev/null +++ b/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.sql @@ -0,0 +1,22 @@ +-- Tags: no-random-settings + +set allow_experimental_dynamic_type = 1; +set allow_experimental_json_type = 1; + +drop table if exists test; +create table test (d Dynamic, json JSON) engine=MergeTree order by tuple() settings min_rows_for_wide_part=0, min_bytes_for_wide_part=1; +insert into test select number, '{"a" : 42, "b" : "Hello, World"}' from numbers(10000000); + +SELECT + `table`, + formatReadableQuantity(sum(rows)) AS rows, + formatReadableSize(sum(data_uncompressed_bytes)) AS data_size_uncompressed, + formatReadableSize(sum(data_compressed_bytes)) AS data_size_compressed, + formatReadableSize(sum(bytes_on_disk)) AS total_size_on_disk +FROM system.parts +WHERE active AND (database = currentDatabase()) AND (`table` = 'test') +GROUP BY `table` +ORDER BY `table` ASC; + +drop table test; + From df632b6f1e4d825644138c77c2ae4a25943a7fe8 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Wed, 6 Nov 2024 16:44:52 +0100 Subject: [PATCH 084/171] clean up --- .../AzureBlobStorage/AzureObjectStorage.cpp | 13 ------------- .../AzureBlobStorage/AzureObjectStorage.h | 5 ----- .../ObjectStorages/Cached/CachedObjectStorage.cpp | 14 -------------- .../ObjectStorages/Cached/CachedObjectStorage.h | 4 ---- src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp | 10 ---------- src/Disks/ObjectStorages/S3/S3ObjectStorage.h | 7 ------- src/Disks/ObjectStorages/Web/WebObjectStorage.cpp | 10 ---------- src/Disks/ObjectStorages/Web/WebObjectStorage.h | 4 ---- 8 files changed, 67 deletions(-) diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp index 959afa65672..b8386bcf967 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.cpp @@ -277,19 +277,6 @@ void AzureObjectStorage::removeObjectImpl(const StoredObject & object, const Sha } } -/// Remove file. Throws exception if file doesn't exists or it's a directory. -// void AzureObjectStorage::removeObject(const StoredObject & object) -// { -// removeObjectImpl(object, client.get(), false); -// } - -// void AzureObjectStorage::removeObjects(const StoredObjects & objects) -// { -// auto client_ptr = client.get(); -// for (const auto & object : objects) -// removeObjectImpl(object, client_ptr, false); -// } - void AzureObjectStorage::removeObjectIfExists(const StoredObject & object) { removeObjectImpl(object, client.get(), true); diff --git a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h index 433fe7a852e..401493be367 100644 --- a/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h +++ b/src/Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h @@ -59,11 +59,6 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - /// Remove file. Throws exception if file doesn't exists or it's a directory. - //void removeObject(const StoredObject & object) override; - - //void removeObjects(const StoredObjects & objects) override; - void removeObjectIfExists(const StoredObject & object) override; void removeObjectsIfExist(const StoredObjects & objects) override; diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp index f2750e6814f..779b8830fab 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.cpp @@ -148,20 +148,6 @@ void CachedObjectStorage::removeCacheIfExists(const std::string & path_key_for_c cache->removeKeyIfExists(getCacheKey(path_key_for_cache), FileCache::getCommonUser().user_id); } -// void CachedObjectStorage::removeObject(const StoredObject & object) -// { -// removeCacheIfExists(object.remote_path); -// object_storage->removeObject(object); -// } - -// void CachedObjectStorage::removeObjects(const StoredObjects & objects) -// { -// for (const auto & object : objects) -// removeCacheIfExists(object.remote_path); - -// object_storage->removeObjects(objects); -// } - void CachedObjectStorage::removeObjectIfExists(const StoredObject & object) { removeCacheIfExists(object.remote_path); diff --git a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h index 7e10057e04c..77aa635b89b 100644 --- a/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h +++ b/src/Disks/ObjectStorages/Cached/CachedObjectStorage.h @@ -45,10 +45,6 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - // void removeObject(const StoredObject & object) override; - - // void removeObjects(const StoredObjects & objects) override; - void removeObjectIfExists(const StoredObject & object) override; void removeObjectsIfExist(const StoredObjects & objects) override; diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 7ed118c6b07..9fca3cad688 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -326,21 +326,11 @@ void S3ObjectStorage::removeObjectsImpl(const StoredObjects & objects, bool if_e ProfileEvents::DiskS3DeleteObjects); } -// void S3ObjectStorage::removeObject(const StoredObject & object) -// { -// removeObjectImpl(object, false); -// } - void S3ObjectStorage::removeObjectIfExists(const StoredObject & object) { removeObjectImpl(object, true); } -// void S3ObjectStorage::removeObjects(const StoredObjects & objects) -// { -// removeObjectsImpl(objects, false); -// } - void S3ObjectStorage::removeObjectsIfExist(const StoredObjects & objects) { removeObjectsImpl(objects, true); diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h index a2aeaf8a43c..4b9c968ede9 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.h +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.h @@ -101,13 +101,6 @@ public: ObjectStorageIteratorPtr iterate(const std::string & path_prefix, size_t max_keys) const override; - /// Uses `DeleteObjectRequest`. - //void removeObject(const StoredObject & object) override; - - /// Uses `DeleteObjectsRequest` if it is allowed by `s3_capabilities`, otherwise `DeleteObjectRequest`. - /// `DeleteObjectsRequest` is not supported on GCS, see https://issuetracker.google.com/issues/162653700 . - //void removeObjects(const StoredObjects & objects) override; - /// Uses `DeleteObjectRequest`. void removeObjectIfExists(const StoredObject & object) override; diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp index 1503d5819eb..35abc0ed0df 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.cpp @@ -254,16 +254,6 @@ std::unique_ptr WebObjectStorage::writeObject( /// NOLI throwNotAllowed(); } -// void WebObjectStorage::removeObject(const StoredObject &) -// { -// throwNotAllowed(); -// } - -// void WebObjectStorage::removeObjects(const StoredObjects &) -// { -// throwNotAllowed(); -// } - void WebObjectStorage::removeObjectIfExists(const StoredObject &) { throwNotAllowed(); diff --git a/src/Disks/ObjectStorages/Web/WebObjectStorage.h b/src/Disks/ObjectStorages/Web/WebObjectStorage.h index ae52cc20f9b..1e612bd359c 100644 --- a/src/Disks/ObjectStorages/Web/WebObjectStorage.h +++ b/src/Disks/ObjectStorages/Web/WebObjectStorage.h @@ -47,10 +47,6 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - // void removeObject(const StoredObject & object) override; - - // void removeObjects(const StoredObjects & objects) override; - void removeObjectIfExists(const StoredObject & object) override; void removeObjectsIfExist(const StoredObjects & objects) override; From 4ad8273e5f3d16f5a95220824223800b4a356e26 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 6 Nov 2024 17:31:24 +0000 Subject: [PATCH 085/171] Enable merge filters optimization. --- src/Core/Settings.cpp | 2 +- src/Core/SettingsChangesHistory.cpp | 1 + .../QueryPlanOptimizationSettings.h | 2 +- .../03262_filter_push_down_view.reference | 2 ++ .../03262_filter_push_down_view.sql | 36 +++++++++++++++++++ 5 files changed, 41 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03262_filter_push_down_view.reference create mode 100644 tests/queries/0_stateless/03262_filter_push_down_view.sql diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 081e07ca2ce..6f8047bbdf8 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4554,7 +4554,7 @@ Possible values: - 0 - Disable - 1 - Enable )", 0) \ - DECLARE(Bool, query_plan_merge_filters, false, R"( + DECLARE(Bool, query_plan_merge_filters, true, R"( Allow to merge filters in the query plan )", 0) \ DECLARE(Bool, query_plan_filter_push_down, true, R"( diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index ed87fde8b7e..12350b6cdaf 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -74,6 +74,7 @@ static std::initializer_list Date: Tue, 5 Nov 2024 14:02:43 +0000 Subject: [PATCH 086/171] Reduce the general critical section for query_metric_log - Use a separate mutex for each query to reduce the contention period for queries_mutex. - Refactor to use std::mutex instead of std::recursive_mutex for queries_mutex. - In case we're running late to schedule the next task, schedule it immediately. - Fix LockGuard because unlocking twice is undefined behavior. --- base/base/defines.h | 1 + src/Common/LockGuard.h | 32 +++++- src/Interpreters/QueryMetricLog.cpp | 165 +++++++++++++++++++--------- src/Interpreters/QueryMetricLog.h | 43 ++++++-- 4 files changed, 179 insertions(+), 62 deletions(-) diff --git a/base/base/defines.h b/base/base/defines.h index 5685a6d9833..a0c3c0d1de5 100644 --- a/base/base/defines.h +++ b/base/base/defines.h @@ -145,6 +145,7 @@ #define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure #define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability #define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability +#define TSA_RETURN_CAPABILITY(...) __attribute__((lock_returned(__VA_ARGS__))) /// to return capabilities in functions /// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function) /// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of diff --git a/src/Common/LockGuard.h b/src/Common/LockGuard.h index 8a98c5f553a..03c8a3e7617 100644 --- a/src/Common/LockGuard.h +++ b/src/Common/LockGuard.h @@ -1,23 +1,47 @@ #pragma once -#include #include +#include +#include namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +}; + /** LockGuard provides RAII-style locking mechanism for a mutex. - ** It's intended to be used like std::unique_ptr but with TSA annotations + ** It's intended to be used like std::unique_lock but with TSA annotations */ template class TSA_SCOPED_LOCKABLE LockGuard { public: - explicit LockGuard(Mutex & mutex_) TSA_ACQUIRE(mutex_) : mutex(mutex_) { mutex.lock(); } - ~LockGuard() TSA_RELEASE() { mutex.unlock(); } + explicit LockGuard(Mutex & mutex_) TSA_ACQUIRE(mutex_) : mutex(mutex_) { lock(); } + ~LockGuard() TSA_RELEASE() { if (locked) unlock(); } + + void lock() TSA_ACQUIRE() + { + /// Don't allow recursive_mutex for now. + if (locked) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't lock twice the same mutex"); + mutex.lock(); + locked = true; + } + + void unlock() TSA_RELEASE() + { + if (!locked) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't unlock the mutex without locking it first"); + mutex.unlock(); + locked = false; + } private: Mutex & mutex; + bool locked = false; }; template typename TLockGuard, typename Mutex> diff --git a/src/Interpreters/QueryMetricLog.cpp b/src/Interpreters/QueryMetricLog.cpp index 5ab3fe590e0..e784c357b29 100644 --- a/src/Interpreters/QueryMetricLog.cpp +++ b/src/Interpreters/QueryMetricLog.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -16,7 +17,6 @@ #include #include -#include namespace DB @@ -24,6 +24,20 @@ namespace DB static auto logger = getLogger("QueryMetricLog"); +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +}; + +String timePointToString(QueryMetricLog::TimePoint time) +{ + /// fmtlib supports subsecond formatting in 10.0.0. We're in 9.1.0, so we need to add the milliseconds ourselves. + auto seconds = std::chrono::time_point_cast(time); + auto microseconds = std::chrono::duration_cast(time - seconds).count(); + + return fmt::format("{:%Y.%m.%d %H:%M:%S}.{:06}", seconds, microseconds); +} + ColumnsDescription QueryMetricLogElement::getColumnsDescription() { ColumnsDescription result; @@ -87,36 +101,69 @@ void QueryMetricLog::shutdown() Base::shutdown(); } -void QueryMetricLog::startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds) +void QueryMetricLog::collectMetric(const ProcessList & process_list, String query_id) { - QueryMetricLogStatus status; - status.interval_milliseconds = interval_milliseconds; - status.next_collect_time = start_time + std::chrono::milliseconds(interval_milliseconds); + auto current_time = std::chrono::system_clock::now(); + const auto query_info = process_list.getQueryInfo(query_id, false, true, false); + if (!query_info) + { + LOG_TRACE(logger, "Query {} is not running anymore, so we couldn't get its QueryStatusInfo", query_id); + return; + } + + LockGuard global_lock(queries_mutex); + auto it = queries.find(query_id); + + /// The query might have finished while the scheduled task is running. + if (it == queries.end()) + { + global_lock.unlock(); + LOG_TRACE(logger, "Query {} not found in the list. Finished while this collecting task was running", query_id); + return; + } + + auto & query_status = it->second; + if (!query_status.mutex) + { + global_lock.unlock(); + LOG_TRACE(logger, "Query {} finished while this collecting task was running", query_id); + return; + } + + LockGuard query_lock(query_status.getMutex()); + global_lock.unlock(); + + auto elem = query_status.createLogMetricElement(query_id, *query_info, current_time); + if (elem) + add(std::move(elem.value())); +} + +/// We use TSA_NO_THREAD_SAFETY_ANALYSIS to prevent TSA complaining that we're modifying the query_status fields +/// without locking the mutex. Since we're building it from scratch, there's no harm in not holding it. +/// If we locked it to make TSA happy, TSAN build would falsely complain about +/// lock-order-inversion (potential deadlock) +/// which is not a real issue since QueryMetricLogStatus's mutex cannot be locked by anything else +/// until we add it to the queries map. +void QueryMetricLog::startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds) TSA_NO_THREAD_SAFETY_ANALYSIS +{ + QueryMetricLogStatus query_status; + query_status.interval_milliseconds = interval_milliseconds; + query_status.next_collect_time = start_time + std::chrono::milliseconds(interval_milliseconds); auto context = getContext(); const auto & process_list = context->getProcessList(); - status.task = context->getSchedulePool().createTask("QueryMetricLog", [this, &process_list, query_id] { - auto current_time = std::chrono::system_clock::now(); - const auto query_info = process_list.getQueryInfo(query_id, false, true, false); - if (!query_info) - { - LOG_TRACE(logger, "Query {} is not running anymore, so we couldn't get its QueryStatusInfo", query_id); - return; - } - - auto elem = createLogMetricElement(query_id, *query_info, current_time); - if (elem) - add(std::move(elem.value())); + query_status.task = context->getSchedulePool().createTask("QueryMetricLog", [this, &process_list, query_id] { + collectMetric(process_list, query_id); }); - std::lock_guard lock(queries_mutex); - status.task->scheduleAfter(interval_milliseconds); - queries.emplace(query_id, std::move(status)); + LockGuard global_lock(queries_mutex); + query_status.scheduleNext(query_id); + queries.emplace(query_id, std::move(query_status)); } void QueryMetricLog::finishQuery(const String & query_id, TimePoint finish_time, QueryStatusInfoPtr query_info) { - std::unique_lock lock(queries_mutex); + LockGuard global_lock(queries_mutex); auto it = queries.find(query_id); /// finishQuery may be called from logExceptionBeforeStart when the query has not even started @@ -124,9 +171,19 @@ void QueryMetricLog::finishQuery(const String & query_id, TimePoint finish_time, if (it == queries.end()) return; + auto & query_status = it->second; + decltype(query_status.mutex) query_mutex; + LockGuard query_lock(query_status.getMutex()); + + /// Move the query mutex here so that we hold it until the end, after removing the query from queries. + query_mutex = std::move(query_status.mutex); + query_status.mutex = {}; + + global_lock.unlock(); + if (query_info) { - auto elem = createLogMetricElement(query_id, *query_info, finish_time, false); + auto elem = query_status.createLogMetricElement(query_id, *query_info, finish_time, false); if (elem) add(std::move(elem.value())); } @@ -139,51 +196,62 @@ void QueryMetricLog::finishQuery(const String & query_id, TimePoint finish_time, /// that order. { /// Take ownership of the task so that we can destroy it in this scope after unlocking `queries_mutex`. - auto task = std::move(it->second.task); + auto task = std::move(query_status.task); /// Build an empty task for the old task to make sure it does not lock any mutex on its destruction. - it->second.task = {}; + query_status.task = {}; + query_lock.unlock(); + global_lock.lock(); queries.erase(query_id); /// Ensure `queries_mutex` is unlocked before calling task's destructor at the end of this /// scope which will lock `exec_mutex`. - lock.unlock(); + global_lock.unlock(); } } -std::optional QueryMetricLog::createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next) +void QueryMetricLogStatus::scheduleNext(String query_id) { - /// fmtlib supports subsecond formatting in 10.0.0. We're in 9.1.0, so we need to add the milliseconds ourselves. - auto seconds = std::chrono::time_point_cast(query_info_time); - auto microseconds = std::chrono::duration_cast(query_info_time - seconds).count(); - LOG_DEBUG(logger, "Collecting query_metric_log for query {} with QueryStatusInfo from {:%Y.%m.%d %H:%M:%S}.{:06}. Schedule next: {}", query_id, seconds, microseconds, schedule_next); - - std::unique_lock lock(queries_mutex); - auto query_status_it = queries.find(query_id); - - /// The query might have finished while the scheduled task is running. - if (query_status_it == queries.end()) + const auto now = std::chrono::system_clock::now(); + if (next_collect_time > now) { - lock.unlock(); - LOG_TRACE(logger, "Query {} finished already while this collecting task was running", query_id); - return {}; + const auto wait_time = std::chrono::duration_cast(next_collect_time - now).count(); + task->scheduleAfter(wait_time); } - - auto & query_status = query_status_it->second; - if (query_info_time <= query_status.last_collect_time) + else + { + LOG_TRACE(logger, "The next collecting task for query {} should have already run at {}. Scheduling it right now", + query_id, timePointToString(next_collect_time)); + task->schedule(); + } +} + +std::optional QueryMetricLogStatus::createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next) +{ + LOG_TRACE(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Schedule next: {}", + query_id, interval_milliseconds, timePointToString(query_info_time), schedule_next); + + if (query_info_time <= last_collect_time) { - lock.unlock(); LOG_TRACE(logger, "Query {} has a more recent metrics collected. Skipping this one", query_id); return {}; } - query_status.last_collect_time = query_info_time; + /// Leave some margin because task->scheduleAfter takes a value in milliseconds. + /// So, we can expect up to 1ms of drift since BackgroundSchedulePool will compare + /// time points in milliseconds. + static auto error_margin = std::chrono::milliseconds(1); + if (schedule_next && query_info_time + error_margin < next_collect_time) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Task to collect metric for query {} scheduled at {} but run at {}", + query_id, timePointToString(next_collect_time), timePointToString(query_info_time)); + + last_collect_time = query_info_time; QueryMetricLogElement elem; elem.event_time = timeInSeconds(query_info_time); elem.event_time_microseconds = timeInMicroseconds(query_info_time); - elem.query_id = query_status_it->first; + elem.query_id = query_id; elem.memory_usage = query_info.memory_usage > 0 ? query_info.memory_usage : 0; elem.peak_memory_usage = query_info.peak_memory_usage > 0 ? query_info.peak_memory_usage : 0; @@ -192,7 +260,7 @@ std::optional QueryMetricLog::createLogMetricElement(cons for (ProfileEvents::Event i = ProfileEvents::Event(0), end = ProfileEvents::end(); i < end; ++i) { const auto & new_value = (*(query_info.profile_counters))[i]; - auto & old_value = query_status.last_profile_events[i]; + auto & old_value = last_profile_events[i]; /// Profile event counters are supposed to be monotonic. However, at least the `NetworkReceiveBytes` can be inaccurate. /// So, since in the future the counter should always have a bigger value than in the past, we skip this event. @@ -214,9 +282,8 @@ std::optional QueryMetricLog::createLogMetricElement(cons if (schedule_next) { - query_status.next_collect_time += std::chrono::milliseconds(query_status.interval_milliseconds); - const auto wait_time = std::chrono::duration_cast(query_status.next_collect_time - std::chrono::system_clock::now()).count(); - query_status.task->scheduleAfter(wait_time); + next_collect_time += std::chrono::milliseconds(interval_milliseconds); + scheduleNext(query_id); } return elem; diff --git a/src/Interpreters/QueryMetricLog.h b/src/Interpreters/QueryMetricLog.h index 802cee7bf26..65764229b0a 100644 --- a/src/Interpreters/QueryMetricLog.h +++ b/src/Interpreters/QueryMetricLog.h @@ -1,5 +1,6 @@ #pragma once +#include #include #include #include @@ -11,11 +12,17 @@ #include #include +#include namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +}; + /** QueryMetricLogElement is a log of query metric values measured at regular time interval. */ @@ -36,31 +43,49 @@ struct QueryMetricLogElement struct QueryMetricLogStatus { + using TimePoint = std::chrono::system_clock::time_point; + using Mutex = std::mutex; + UInt64 interval_milliseconds; - std::chrono::system_clock::time_point last_collect_time; - std::chrono::system_clock::time_point next_collect_time; - std::vector last_profile_events = std::vector(ProfileEvents::end()); - BackgroundSchedulePool::TaskHolder task; + std::chrono::system_clock::time_point last_collect_time TSA_GUARDED_BY(getMutex()); + std::chrono::system_clock::time_point next_collect_time TSA_GUARDED_BY(getMutex()); + std::vector last_profile_events TSA_GUARDED_BY(getMutex()) = std::vector(ProfileEvents::end()); + BackgroundSchedulePool::TaskHolder task TSA_GUARDED_BY(getMutex()); + + /// We need to be able to move it for the hash map, so we need to add an indirection here. + std::unique_ptr mutex = std::make_unique(); + + /// Return a reference to the mutex, used for Thread Sanitizer annotations. + Mutex & getMutex() const TSA_RETURN_CAPABILITY(mutex) + { + if (!mutex) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mutex cannot be NULL"); + return *mutex; + } + + void scheduleNext(String query_id) TSA_REQUIRES(getMutex()); + std::optional createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next = true) TSA_REQUIRES(getMutex()); }; class QueryMetricLog : public SystemLog { using SystemLog::SystemLog; - using TimePoint = std::chrono::system_clock::time_point; using Base = SystemLog; public: + using TimePoint = std::chrono::system_clock::time_point; + void shutdown() final; - // Both startQuery and finishQuery are called from the thread that executes the query + /// Both startQuery and finishQuery are called from the thread that executes the query. void startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds); void finishQuery(const String & query_id, TimePoint finish_time, QueryStatusInfoPtr query_info = nullptr); private: - std::optional createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next = true); + void collectMetric(const ProcessList & process_list, String query_id); - std::recursive_mutex queries_mutex; - std::unordered_map queries; + std::mutex queries_mutex; + std::unordered_map queries TSA_GUARDED_BY(queries_mutex); }; } From 552b0fc8d0f106db1a85805ab883debe7e491e9c Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 7 Nov 2024 13:11:33 +0100 Subject: [PATCH 087/171] Rename a setting --- src/Core/Settings.cpp | 3 ++- src/IO/ReadSettings.h | 2 +- src/Interpreters/Cache/QueryLimit.cpp | 2 +- src/Interpreters/Context.cpp | 4 ++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index c2ffc2ddf0e..d9668849fd2 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4852,7 +4852,7 @@ Allows to record the filesystem caching log for each query DECLARE(Bool, read_from_filesystem_cache_if_exists_otherwise_bypass_cache, false, R"( Allow to use the filesystem cache in passive mode - benefit from the existing cache entries, but don't put more entries into the cache. If you set this setting for heavy ad-hoc queries and leave it disabled for short real-time queries, this will allows to avoid cache threshing by too heavy queries and to improve the overall system efficiency. )", 0) \ - DECLARE(Bool, skip_download_if_exceeds_query_cache, true, R"( + DECLARE(Bool, filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit, true, R"( Skip download from remote filesystem if exceeds query cache size )", 0) \ DECLARE(UInt64, filesystem_cache_max_download_size, (128UL * 1024 * 1024 * 1024), R"( @@ -5887,6 +5887,7 @@ Experimental data deduplication for SELECT queries based on part UUIDs MAKE_OBSOLETE(M, Bool, use_mysql_types_in_show_columns, false) \ MAKE_OBSOLETE(M, Bool, s3queue_allow_experimental_sharded_mode, false) \ MAKE_OBSOLETE(M, LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW) \ + MAKE_OBSOLETE(M, Bool, skip_download_if_exceeds_query_cache, true) \ /* moved to config.xml: see also src/Core/ServerSettings.h */ \ MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, background_buffer_flush_schedule_pool_size, 16) \ MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, background_pool_size, 16) \ diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index 6ed02212095..103ce7df54b 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -68,7 +68,7 @@ struct ReadSettings std::shared_ptr page_cache; size_t filesystem_cache_max_download_size = (128UL * 1024 * 1024 * 1024); - bool skip_download_if_exceeds_query_cache = true; + bool filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit = true; size_t remote_read_min_bytes_for_seek = DBMS_DEFAULT_BUFFER_SIZE; diff --git a/src/Interpreters/Cache/QueryLimit.cpp b/src/Interpreters/Cache/QueryLimit.cpp index b18d23a5b7f..a7c964022a5 100644 --- a/src/Interpreters/Cache/QueryLimit.cpp +++ b/src/Interpreters/Cache/QueryLimit.cpp @@ -53,7 +53,7 @@ FileCacheQueryLimit::QueryContextPtr FileCacheQueryLimit::getOrSetQueryContext( { it->second = std::make_shared( settings.filesystem_cache_max_download_size, - !settings.skip_download_if_exceeds_query_cache); + !settings.filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit); } return it->second; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index c1fa2c8549a..7b7cdfa2104 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -236,7 +236,7 @@ namespace Setting extern const SettingsUInt64 remote_fs_read_backoff_max_tries; extern const SettingsUInt64 remote_read_min_bytes_for_seek; extern const SettingsBool throw_on_error_from_cache_on_write_operations; - extern const SettingsBool skip_download_if_exceeds_query_cache; + extern const SettingsBool filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit; extern const SettingsBool s3_allow_parallel_part_upload; extern const SettingsBool use_page_cache_for_disks_without_file_cache; extern const SettingsUInt64 use_structure_from_insertion_table_in_table_functions; @@ -5753,7 +5753,7 @@ ReadSettings Context::getReadSettings() const res.filesystem_cache_allow_background_download_during_fetch = settings_ref[Setting::filesystem_cache_enable_background_download_during_fetch]; res.filesystem_cache_max_download_size = settings_ref[Setting::filesystem_cache_max_download_size]; - res.skip_download_if_exceeds_query_cache = settings_ref[Setting::skip_download_if_exceeds_query_cache]; + res.filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit = settings_ref[Setting::filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit]; res.page_cache = getPageCache(); res.use_page_cache_for_disks_without_file_cache = settings_ref[Setting::use_page_cache_for_disks_without_file_cache]; From ca23e5254c2cca5e6b3f4a9c7ccd65f70be42fc4 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 7 Nov 2024 12:44:57 +0000 Subject: [PATCH 088/171] Fix for tmp parts --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 20d7528d38a..fb934a77512 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -2501,7 +2501,7 @@ ColumnPtr IMergeTreeDataPart::getColumnSample(const NameAndTypePair & column) co { const size_t total_mark = getMarksCount(); /// If column doesn't have dynamic subcolumns or part has no data, just create column using it's type. - if (!column.type->hasDynamicSubcolumns() || !total_mark) + if (is_temp || !column.type->hasDynamicSubcolumns() || !total_mark) return column.type->createColumn(); /// Otherwise, read sample column with 0 rows from the part, so it will load dynamic structure. @@ -2510,22 +2510,24 @@ ColumnPtr IMergeTreeDataPart::getColumnSample(const NameAndTypePair & column) co StorageMetadataPtr metadata_ptr = storage.getInMemoryMetadataPtr(); StorageSnapshotPtr storage_snapshot_ptr = std::make_shared(storage, metadata_ptr); + MergeTreeReaderSettings settings; + settings.can_read_part_without_marks = true; MergeTreeReaderPtr reader = getReader( cols, storage_snapshot_ptr, - MarkRanges{MarkRange(0, 1)}, + MarkRanges{MarkRange(0, total_mark)}, /*virtual_fields=*/ {}, /*uncompressed_cache=*/{}, storage.getContext()->getMarkCache().get(), std::make_shared(), - MergeTreeReaderSettings{}, + settings, ValueSizeMap{}, ReadBufferFromFileBase::ProfileCallback{}); Columns result; result.resize(1); - reader->readRows(0, 1, false, 0, result); + reader->readRows(0, total_mark, false, 0, result); return result[0]; } From 07b480c1e4e1f1fd647c4c9cf7d00e29b5619868 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 7 Nov 2024 14:44:31 +0100 Subject: [PATCH 089/171] Implicitly treat a file argument as --queries-file --- programs/main.cpp | 40 +++++++++---------- src/Client/ClientBaseOptimizedParts.cpp | 8 +++- ...al_arguments_implicit_query_file.reference | 11 +++++ ...ositional_arguments_implicit_query_file.sh | 34 ++++++++++++++++ 4 files changed, 71 insertions(+), 22 deletions(-) create mode 100644 tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.reference create mode 100755 tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh diff --git a/programs/main.cpp b/programs/main.cpp index 02ea1471108..ea8fbc1aece 100644 --- a/programs/main.cpp +++ b/programs/main.cpp @@ -1,27 +1,22 @@ -#include -#include +#include +#include +#include +#include -#include -#include -#include -#include -#include -#include /// pair - -#include +#if defined(SANITIZE_COVERAGE) +# include +#endif #include "config.h" #include "config_tools.h" -#include -#include -#include -#include -#include - -#include -#include - +#include +#include +#include +#include +#include +#include /// pair +#include /// Universal executable for various clickhouse applications int mainEntryClickHouseServer(int argc, char ** argv); @@ -238,9 +233,12 @@ int main(int argc_, char ** argv_) /// clickhouse # spawn local /// clickhouse local # spawn local /// clickhouse "select ..." # spawn local + /// clickhouse /tmp/repro --enable-analyzer /// - if (main_func == printHelp && !argv.empty() && (argv.size() == 1 || argv[1][0] == '-' - || std::string_view(argv[1]).contains(' '))) + std::error_code ec; + if (main_func == printHelp && !argv.empty() + && (argv.size() == 1 || argv[1][0] == '-' || std::string_view(argv[1]).contains(' ') + || std::filesystem::exists(std::filesystem::path{argv[1]}, ec))) { main_func = mainEntryClickHouseLocal; } diff --git a/src/Client/ClientBaseOptimizedParts.cpp b/src/Client/ClientBaseOptimizedParts.cpp index ac4d3417779..bc362288079 100644 --- a/src/Client/ClientBaseOptimizedParts.cpp +++ b/src/Client/ClientBaseOptimizedParts.cpp @@ -1,5 +1,7 @@ #include +#include + namespace DB { @@ -107,6 +109,7 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de && !op.original_tokens[0].empty() && !op.value.empty()) { /// Two special cases for better usability: + /// - if the option is a filesystem file, then it's likely a queries file (clickhouse repro.sql) /// - if the option contains a whitespace, it might be a query: clickhouse "SELECT 1" /// These are relevant for interactive usage - user-friendly, but questionable in general. /// In case of ambiguity or for scripts, prefer using proper options. @@ -115,7 +118,10 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de po::variable_value value(boost::any(op.value), false); const char * option; - if (token.contains(' ')) + std::error_code ec; + if (std::filesystem::exists(std::filesystem::path{token}, ec)) + option = "queries-file"; + else if (token.contains(' ')) option = "query"; else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token); diff --git a/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.reference b/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.reference new file mode 100644 index 00000000000..fe2432a063f --- /dev/null +++ b/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.reference @@ -0,0 +1,11 @@ +Hello from a file +Hello from a file +Hello from a file +Hello from a file +Hello from a file +Hello from a file +Hello from a file +Hello from a file +Hello from a file +max_local_read_bandwidth 1 100 +max_local_read_bandwidth 1 200 diff --git a/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh b/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh new file mode 100755 index 00000000000..14b6e735a9a --- /dev/null +++ b/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh @@ -0,0 +1,34 @@ +# Tags: no-random-settings + +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +FILE=${CLICKHOUSE_TMP}/${CLICKHOUSE_DATABASE}_without_extension +echo "SELECT 'Hello from a file'" > ${FILE} + +# Queries can be read from a file. +${CLICKHOUSE_BINARY} --queries-file ${FILE} + +# Or from stdin. +${CLICKHOUSE_BINARY} < ${FILE} + +# Also the positional argument can be interpreted as a file. +${CLICKHOUSE_BINARY} ${FILE} + +${CLICKHOUSE_LOCAL} --queries-file ${FILE} +${CLICKHOUSE_LOCAL} < ${FILE} +${CLICKHOUSE_LOCAL} ${FILE} + +${CLICKHOUSE_CLIENT} --queries-file ${FILE} +${CLICKHOUSE_CLIENT} < ${FILE} +${CLICKHOUSE_CLIENT} ${FILE} + +# Check that positional arguments work in any place +echo "Select name, changed, value FROM system.settings where name = 'max_local_read_bandwidth'" > ${FILE} +${CLICKHOUSE_BINARY} ${FILE} --max-local-read-bandwidth 100 +${CLICKHOUSE_BINARY} --max-local-read-bandwidth 200 ${FILE} + +rm ${FILE} From 06b580777e6ee8ef95cfa261b0a745ddda2662f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 7 Nov 2024 15:08:05 +0100 Subject: [PATCH 090/171] Style --- .../03267_positional_arguments_implicit_query_file.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh b/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh index 14b6e735a9a..791aa3af0db 100755 --- a/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh +++ b/tests/queries/0_stateless/03267_positional_arguments_implicit_query_file.sh @@ -1,6 +1,5 @@ -# Tags: no-random-settings - #!/usr/bin/env bash +# Tags: no-random-settings CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh From e5fc37bc7e6c707cd7ea14bb3c4888f94118a126 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 7 Nov 2024 17:27:51 +0100 Subject: [PATCH 091/171] Add alias --- src/Core/Settings.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index d9668849fd2..328f950da1d 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4854,7 +4854,7 @@ Allow to use the filesystem cache in passive mode - benefit from the existing ca )", 0) \ DECLARE(Bool, filesystem_cache_skip_download_if_exceeds_per_query_cache_write_limit, true, R"( Skip download from remote filesystem if exceeds query cache size -)", 0) \ +)", 0) ALIAS(skip_download_if_exceeds_query_cache) \ DECLARE(UInt64, filesystem_cache_max_download_size, (128UL * 1024 * 1024 * 1024), R"( Max remote filesystem cache size that can be downloaded by a single query )", 0) \ @@ -5887,7 +5887,6 @@ Experimental data deduplication for SELECT queries based on part UUIDs MAKE_OBSOLETE(M, Bool, use_mysql_types_in_show_columns, false) \ MAKE_OBSOLETE(M, Bool, s3queue_allow_experimental_sharded_mode, false) \ MAKE_OBSOLETE(M, LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW) \ - MAKE_OBSOLETE(M, Bool, skip_download_if_exceeds_query_cache, true) \ /* moved to config.xml: see also src/Core/ServerSettings.h */ \ MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, background_buffer_flush_schedule_pool_size, 16) \ MAKE_DEPRECATED_BY_SERVER_CONFIG(M, UInt64, background_pool_size, 16) \ From 4fb38411c128e3a293c93d6f1d5f9b71c961e8db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 7 Nov 2024 19:06:36 +0100 Subject: [PATCH 092/171] Only accept regular files --- programs/main.cpp | 2 +- src/Client/ClientBaseOptimizedParts.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/programs/main.cpp b/programs/main.cpp index ea8fbc1aece..d15c20867d1 100644 --- a/programs/main.cpp +++ b/programs/main.cpp @@ -238,7 +238,7 @@ int main(int argc_, char ** argv_) std::error_code ec; if (main_func == printHelp && !argv.empty() && (argv.size() == 1 || argv[1][0] == '-' || std::string_view(argv[1]).contains(' ') - || std::filesystem::exists(std::filesystem::path{argv[1]}, ec))) + || std::filesystem::is_regular_file(std::filesystem::path{argv[1]}, ec))) { main_func = mainEntryClickHouseLocal; } diff --git a/src/Client/ClientBaseOptimizedParts.cpp b/src/Client/ClientBaseOptimizedParts.cpp index bc362288079..afffe775029 100644 --- a/src/Client/ClientBaseOptimizedParts.cpp +++ b/src/Client/ClientBaseOptimizedParts.cpp @@ -119,7 +119,7 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de const char * option; std::error_code ec; - if (std::filesystem::exists(std::filesystem::path{token}, ec)) + if (std::filesystem::is_regular_file(std::filesystem::path{token}, ec)) option = "queries-file"; else if (token.contains(' ')) option = "query"; From 0ac6ce56bd08e25fc9c22022fec21f3346a753c5 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 7 Nov 2024 18:19:26 +0000 Subject: [PATCH 093/171] Trying to fix short-circuit for FilterStep. --- src/Processors/QueryPlan/FilterStep.cpp | 98 ++++++++++++++++++++++++- 1 file changed, 97 insertions(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index 862e03d74f2..64c46332c34 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -5,6 +5,9 @@ #include #include #include +#include +#include +#include namespace DB { @@ -24,6 +27,78 @@ static ITransformingStep::Traits getTraits() }; } +static bool isTrivialSubtree(const ActionsDAG::Node * node) +{ + while (node->type == ActionsDAG::ActionType::ALIAS) + node = node->children.at(0); + + return node->type != ActionsDAG::ActionType::FUNCTION && node->type != ActionsDAG::ActionType::ARRAY_JOIN; +} + +struct ActionsAndName +{ + ActionsDAG dag; + std::string name; +}; + +static ActionsAndName splitSingleAndFilter(ActionsDAG & dag, const ActionsDAG::Node * filter_node) +{ + auto name = filter_node->result_name; + auto split_result = dag.split({filter_node}, true); + dag = std::move(split_result.second); + split_result.first.getOutputs().emplace(split_result.first.getOutputs().begin(), split_result.split_nodes_mapping[filter_node]); + return ActionsAndName{std::move(split_result.first), std::move(name)}; +} + +static std::optional trySplitSingleAndFilter(ActionsDAG & dag, const std::string & filter_name) +{ + const auto * filter = &dag.findInOutputs(filter_name); + while (filter->type == ActionsDAG::ActionType::ALIAS) + filter = filter->children.at(0); + + if (filter->type != ActionsDAG::ActionType::FUNCTION || filter->function_base->getName() != "and") + return {}; + + const ActionsDAG::Node * condition_to_split = nullptr; + std::stack nodes; + nodes.push(filter); + while (!nodes.empty()) + { + const auto * node = nodes.top(); + nodes.pop(); + + if (node->type == ActionsDAG::ActionType::FUNCTION && node->function_base->getName() == "and") + { + for (const auto * child : node->children | std::ranges::views::reverse) + nodes.push(child); + + continue; + } + + if (isTrivialSubtree(node)) + continue; + + /// Do not split subtree if it's the last non-trivial one. + /// So, split the first found condition only when there is a another one found. + if (condition_to_split) + return splitSingleAndFilter(dag, condition_to_split); + + condition_to_split = node; + } + + return {}; +} + +std::vector splitAndChainIntoMultipleFilters(ActionsDAG & dag, const std::string & filter_name) +{ + std::vector res; + + while (auto condition = trySplitSingleAndFilter(dag, filter_name)) + res.push_back(std::move(*condition)); + + return res; +} + FilterStep::FilterStep( const Header & input_header_, ActionsDAG actions_dag_, @@ -50,6 +125,17 @@ FilterStep::FilterStep( void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { + auto and_atoms = splitAndChainIntoMultipleFilters(actions_dag, filter_column_name); + for (auto & and_atom : and_atoms) + { + auto expression = std::make_shared(std::move(and_atom.dag), settings.getActionsSettings()); + pipeline.addSimpleTransform([&](const Block & header, QueryPipelineBuilder::StreamType stream_type) + { + bool on_totals = stream_type == QueryPipelineBuilder::StreamType::Totals; + return std::make_shared(header, expression, and_atom.name, true, on_totals); + }); + } + auto expression = std::make_shared(std::move(actions_dag), settings.getActionsSettings()); pipeline.addSimpleTransform([&](const Block & header, QueryPipelineBuilder::StreamType stream_type) @@ -76,13 +162,23 @@ void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQ void FilterStep::describeActions(FormatSettings & settings) const { String prefix(settings.offset, settings.indent_char); + + auto cloned_dag = actions_dag.clone(); + auto and_atoms = splitAndChainIntoMultipleFilters(cloned_dag, filter_column_name); + for (auto & and_atom : and_atoms) + { + auto expression = std::make_shared(std::move(and_atom.dag)); + settings.out << prefix << "AND column: " << and_atom.name; + expression->describeActions(settings.out, prefix); + } + settings.out << prefix << "Filter column: " << filter_column_name; if (remove_filter_column) settings.out << " (removed)"; settings.out << '\n'; - auto expression = std::make_shared(actions_dag.clone()); + auto expression = std::make_shared(std::move(cloned_dag)); expression->describeActions(settings.out, prefix); } From 1157028e35139ead00c7475045795bde966a2e40 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 7 Nov 2024 19:53:30 +0000 Subject: [PATCH 094/171] Fix getting column sample for not finalized part --- src/Columns/ColumnVariant.cpp | 2 +- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 11 ++++++----- src/Storages/MergeTree/IMergeTreeDataPart.h | 6 +++--- src/Storages/MergeTree/IMergeTreeDataPartWriter.h | 2 ++ src/Storages/MergeTree/MergeTreeDataPartCompact.cpp | 2 +- src/Storages/MergeTree/MergeTreeDataPartCompact.h | 2 +- src/Storages/MergeTree/MergeTreeDataPartWide.cpp | 8 ++++---- src/Storages/MergeTree/MergeTreeDataPartWide.h | 4 ++-- .../MergeTree/MergeTreeDataPartWriterOnDisk.h | 2 ++ src/Storages/MergeTree/MergedBlockOutputStream.cpp | 2 +- 10 files changed, 23 insertions(+), 18 deletions(-) diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index 54f0421fc4b..2fa59b8e33c 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -952,7 +952,7 @@ ColumnPtr ColumnVariant::permute(const Permutation & perm, size_t limit) const if (hasOnlyNulls()) { if (limit) - return cloneResized(limit); + return cloneResized(limit ? std::min(size(), limit) : size()); /// If no limit, we can just return current immutable column. return this->getPtr(); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index b631d991e90..f73b52dbafd 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -2252,18 +2252,18 @@ void IMergeTreeDataPart::checkConsistencyWithProjections(bool require_part_metad proj_part->checkConsistency(require_part_metadata); } -void IMergeTreeDataPart::calculateColumnsAndSecondaryIndicesSizesOnDisk() +void IMergeTreeDataPart::calculateColumnsAndSecondaryIndicesSizesOnDisk(std::optional columns_sample) { - calculateColumnsSizesOnDisk(); + calculateColumnsSizesOnDisk(columns_sample); calculateSecondaryIndicesSizesOnDisk(); } -void IMergeTreeDataPart::calculateColumnsSizesOnDisk() +void IMergeTreeDataPart::calculateColumnsSizesOnDisk(std::optional columns_sample) { if (getColumns().empty() || checksums.empty()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot calculate columns sizes when columns or checksums are not initialized"); - calculateEachColumnSizes(columns_sizes, total_columns_size); + calculateEachColumnSizes(columns_sizes, total_columns_size, columns_sample); } void IMergeTreeDataPart::calculateSecondaryIndicesSizesOnDisk() @@ -2501,7 +2501,7 @@ ColumnPtr IMergeTreeDataPart::getColumnSample(const NameAndTypePair & column) co { const size_t total_mark = getMarksCount(); /// If column doesn't have dynamic subcolumns or part has no data, just create column using it's type. - if (is_temp || !column.type->hasDynamicSubcolumns() || !total_mark) + if (!column.type->hasDynamicSubcolumns() || !total_mark) return column.type->createColumn(); /// Otherwise, read sample column with 0 rows from the part, so it will load dynamic structure. @@ -2527,6 +2527,7 @@ ColumnPtr IMergeTreeDataPart::getColumnSample(const NameAndTypePair & column) co Columns result; result.resize(1); + LOG_DEBUG(getLogger("IMergeTreeDataPart"), "getColumnSample"); reader->readRows(0, total_mark, false, 0, result); return result[0]; } diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index b41a1d840e1..a7051a2491a 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -426,7 +426,7 @@ public: bool shallParticipateInMerges(const StoragePolicyPtr & storage_policy) const; /// Calculate column and secondary indices sizes on disk. - void calculateColumnsAndSecondaryIndicesSizesOnDisk(); + void calculateColumnsAndSecondaryIndicesSizesOnDisk(std::optional columns_sample = std::nullopt); std::optional getRelativePathForPrefix(const String & prefix, bool detached = false, bool broken = false) const; @@ -631,7 +631,7 @@ protected: /// Fill each_columns_size and total_size with sizes from columns files on /// disk using columns and checksums. - virtual void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size) const = 0; + virtual void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size, std::optional columns_sample) const = 0; std::optional getRelativePathForDetachedPart(const String & prefix, bool broken) const; @@ -713,7 +713,7 @@ private: void loadPartitionAndMinMaxIndex(); - void calculateColumnsSizesOnDisk(); + void calculateColumnsSizesOnDisk(std::optional columns_sample = std::nullopt); void calculateSecondaryIndicesSizesOnDisk(); diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h index d1c76505d7c..8923f6a59ca 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h @@ -54,6 +54,8 @@ public: const MergeTreeIndexGranularity & getIndexGranularity() const { return index_granularity; } + virtual Block getColumnsSample() const = 0; + protected: SerializationPtr getSerialization(const String & column_name) const; diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp index 14c2da82de1..8856f467b90 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.cpp @@ -80,7 +80,7 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartCompactWriter( } -void MergeTreeDataPartCompact::calculateEachColumnSizes(ColumnSizeByName & /*each_columns_size*/, ColumnSize & total_size) const +void MergeTreeDataPartCompact::calculateEachColumnSizes(ColumnSizeByName & /*each_columns_size*/, ColumnSize & total_size, std::optional /*columns_sample*/) const { auto bin_checksum = checksums.files.find(DATA_FILE_NAME_WITH_EXTENSION); if (bin_checksum != checksums.files.end()) diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.h b/src/Storages/MergeTree/MergeTreeDataPartCompact.h index 8e279571578..c394de0d7c1 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.h @@ -70,7 +70,7 @@ private: void loadIndexGranularity() override; /// Compact parts don't support per column size, only total size - void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size) const override; + void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size, std::optional columns_sample) const override; }; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp index b3b6a0dded6..39f96ba06ad 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.cpp @@ -82,7 +82,7 @@ MergeTreeDataPartWriterPtr createMergeTreeDataPartWideWriter( /// Takes into account the fact that several columns can e.g. share their .size substreams. /// When calculating totals these should be counted only once. ColumnSize MergeTreeDataPartWide::getColumnSizeImpl( - const NameAndTypePair & column, std::unordered_set * processed_substreams) const + const NameAndTypePair & column, std::unordered_set * processed_substreams, std::optional columns_sample) const { ColumnSize size; if (checksums.empty()) @@ -108,7 +108,7 @@ ColumnSize MergeTreeDataPartWide::getColumnSizeImpl( auto mrk_checksum = checksums.files.find(*stream_name + getMarksFileExtension()); if (mrk_checksum != checksums.files.end()) size.marks += mrk_checksum->second.file_size; - }, column.type, getColumnSample(column)); + }, column.type, columns_sample && columns_sample->has(column.name) ? columns_sample->getByName(column.name).column : getColumnSample(column)); return size; } @@ -374,12 +374,12 @@ std::optional MergeTreeDataPartWide::getFileNameForColumn(const NameAndT return filename; } -void MergeTreeDataPartWide::calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size) const +void MergeTreeDataPartWide::calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size, std::optional columns_sample) const { std::unordered_set processed_substreams; for (const auto & column : columns) { - ColumnSize size = getColumnSizeImpl(column, &processed_substreams); + ColumnSize size = getColumnSizeImpl(column, &processed_substreams, columns_sample); each_columns_size[column.name] = size; total_size.add(size); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWide.h b/src/Storages/MergeTree/MergeTreeDataPartWide.h index 022a5fb746c..a6d4897ed87 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWide.h @@ -64,9 +64,9 @@ private: /// Loads marks index granularity into memory void loadIndexGranularity() override; - ColumnSize getColumnSizeImpl(const NameAndTypePair & column, std::unordered_set * processed_substreams) const; + ColumnSize getColumnSizeImpl(const NameAndTypePair & column, std::unordered_set * processed_substreams, std::optional columns_sample) const; - void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size) const override; + void calculateEachColumnSizes(ColumnSizeByName & each_columns_size, ColumnSize & total_size, std::optional columns_sample) const override; }; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index 49d654c15e1..b22d58ba51e 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -123,6 +123,8 @@ public: written_offset_columns = written_offset_columns_; } + Block getColumnsSample() const override { return block_sample; } + protected: /// Count index_granularity for block and store in `index_granularity` size_t computeIndexGranularity(const Block & block) const; diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 77c34aae30a..604b2fda20a 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -207,7 +207,7 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( new_part->setBytesOnDisk(checksums.getTotalSizeOnDisk()); new_part->setBytesUncompressedOnDisk(checksums.getTotalSizeUncompressedOnDisk()); new_part->index_granularity = writer->getIndexGranularity(); - new_part->calculateColumnsAndSecondaryIndicesSizesOnDisk(); + new_part->calculateColumnsAndSecondaryIndicesSizesOnDisk(writer->getColumnsSample()); /// In mutation, existing_rows_count is already calculated in PartMergerWriter /// In merge situation, lightweight deleted rows was physically deleted, existing_rows_count equals rows_count From 0ff0c96b007108ab222a264e4a3bf8aa7cb7a18e Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 7 Nov 2024 20:01:40 +0000 Subject: [PATCH 095/171] Remove logging --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index f73b52dbafd..4e400fb1f94 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -2527,7 +2527,6 @@ ColumnPtr IMergeTreeDataPart::getColumnSample(const NameAndTypePair & column) co Columns result; result.resize(1); - LOG_DEBUG(getLogger("IMergeTreeDataPart"), "getColumnSample"); reader->readRows(0, total_mark, false, 0, result); return result[0]; } From dd1ca389dbc3b3f5c5f456bc0d070a972acca806 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 8 Nov 2024 10:45:13 +0000 Subject: [PATCH 096/171] Trying to cast filter column. --- src/Processors/QueryPlan/FilterStep.cpp | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index 64c46332c34..7613aac618e 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -5,6 +5,8 @@ #include #include #include +#include +#include #include #include #include @@ -46,7 +48,19 @@ static ActionsAndName splitSingleAndFilter(ActionsDAG & dag, const ActionsDAG::N auto name = filter_node->result_name; auto split_result = dag.split({filter_node}, true); dag = std::move(split_result.second); - split_result.first.getOutputs().emplace(split_result.first.getOutputs().begin(), split_result.split_nodes_mapping[filter_node]); + + const auto * split_filter_node = split_result.split_nodes_mapping[filter_node]; + auto filter_type = removeLowCardinality(split_filter_node->result_type); + if (!filter_type->onlyNull() && !isUInt8(removeNullable(filter_type))) + { + DataTypePtr cast_type = std::make_shared(); + if (filter_type->isNullable()) + cast_type = std::make_shared(std::move(cast_type)); + + split_result.first.addCast(*split_filter_node, cast_type, {}); + } + + split_result.first.getOutputs().emplace(split_result.first.getOutputs().begin(), split_filter_node); return ActionsAndName{std::move(split_result.first), std::move(name)}; } @@ -168,7 +182,7 @@ void FilterStep::describeActions(FormatSettings & settings) const for (auto & and_atom : and_atoms) { auto expression = std::make_shared(std::move(and_atom.dag)); - settings.out << prefix << "AND column: " << and_atom.name; + settings.out << prefix << "AND column: " << and_atom.name << '\n'; expression->describeActions(settings.out, prefix); } From 5275c0a8c44fd3cae1d078411efe42e2f34df437 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Fri, 8 Nov 2024 11:53:46 +0100 Subject: [PATCH 097/171] Reverse order on implicit options --- src/Client/ClientBaseOptimizedParts.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Client/ClientBaseOptimizedParts.cpp b/src/Client/ClientBaseOptimizedParts.cpp index afffe775029..6eaa3708df6 100644 --- a/src/Client/ClientBaseOptimizedParts.cpp +++ b/src/Client/ClientBaseOptimizedParts.cpp @@ -109,8 +109,8 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de && !op.original_tokens[0].empty() && !op.value.empty()) { /// Two special cases for better usability: - /// - if the option is a filesystem file, then it's likely a queries file (clickhouse repro.sql) /// - if the option contains a whitespace, it might be a query: clickhouse "SELECT 1" + /// - if the option is a filesystem file, then it's likely a queries file (clickhouse repro.sql) /// These are relevant for interactive usage - user-friendly, but questionable in general. /// In case of ambiguity or for scripts, prefer using proper options. @@ -119,10 +119,10 @@ void ClientApplicationBase::parseAndCheckOptions(OptionsDescription & options_de const char * option; std::error_code ec; - if (std::filesystem::is_regular_file(std::filesystem::path{token}, ec)) - option = "queries-file"; - else if (token.contains(' ')) + if (token.contains(' ')) option = "query"; + else if (std::filesystem::is_regular_file(std::filesystem::path{token}, ec)) + option = "queries-file"; else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Positional option `{}` is not supported.", token); From 164e3c26677a209bc7990d326869e71eb3be3bef Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 8 Nov 2024 11:54:43 +0100 Subject: [PATCH 098/171] Update settings changes history --- src/Core/SettingsChangesHistory.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 64964f294bd..efa47302343 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -74,6 +74,7 @@ static std::initializer_list Date: Fri, 8 Nov 2024 11:38:43 +0000 Subject: [PATCH 099/171] Small refactor to ease debugging when something happens on the CI --- src/Interpreters/QueryMetricLog.cpp | 13 ++++++------- .../03203_system_query_metric_log.reference | 6 +++--- .../0_stateless/03203_system_query_metric_log.sh | 6 +++--- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/src/Interpreters/QueryMetricLog.cpp b/src/Interpreters/QueryMetricLog.cpp index e784c357b29..4fbe4f9e1b5 100644 --- a/src/Interpreters/QueryMetricLog.cpp +++ b/src/Interpreters/QueryMetricLog.cpp @@ -148,7 +148,7 @@ void QueryMetricLog::startQuery(const String & query_id, TimePoint start_time, U { QueryMetricLogStatus query_status; query_status.interval_milliseconds = interval_milliseconds; - query_status.next_collect_time = start_time + std::chrono::milliseconds(interval_milliseconds); + query_status.next_collect_time = start_time; auto context = getContext(); const auto & process_list = context->getProcessList(); @@ -213,6 +213,7 @@ void QueryMetricLog::finishQuery(const String & query_id, TimePoint finish_time, void QueryMetricLogStatus::scheduleNext(String query_id) { + next_collect_time += std::chrono::milliseconds(interval_milliseconds); const auto now = std::chrono::system_clock::now(); if (next_collect_time > now) { @@ -229,8 +230,9 @@ void QueryMetricLogStatus::scheduleNext(String query_id) std::optional QueryMetricLogStatus::createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next) { - LOG_TRACE(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Schedule next: {}", - query_id, interval_milliseconds, timePointToString(query_info_time), schedule_next); + LOG_TRACE(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Next collection time: {}", + query_id, interval_milliseconds, timePointToString(query_info_time), + schedule_next ? timePointToString(next_collect_time + std::chrono::milliseconds(interval_milliseconds)) : "finished"); if (query_info_time <= last_collect_time) { @@ -276,15 +278,12 @@ std::optional QueryMetricLogStatus::createLogMetricElemen } else { - LOG_TRACE(logger, "Query {} has no profile counters", query_id); + LOG_WARNING(logger, "Query {} has no profile counters", query_id); elem.profile_events = std::vector(ProfileEvents::end()); } if (schedule_next) - { - next_collect_time += std::chrono::milliseconds(interval_milliseconds); scheduleNext(query_id); - } return elem; } diff --git a/tests/queries/0_stateless/03203_system_query_metric_log.reference b/tests/queries/0_stateless/03203_system_query_metric_log.reference index 940b0c4e178..fa8e27a7e90 100644 --- a/tests/queries/0_stateless/03203_system_query_metric_log.reference +++ b/tests/queries/0_stateless/03203_system_query_metric_log.reference @@ -23,8 +23,8 @@ --Interval 123: check that the SleepFunctionCalls, SleepFunctionMilliseconds and ProfileEvent_SleepFunctionElapsedMicroseconds are correct 1 --Check that a query_metric_log_interval=0 disables the collection -0 +1 -Check that a query which execution time is less than query_metric_log_interval is never collected -0 +1 --Check that there is a final event when queries finish -3 +1 diff --git a/tests/queries/0_stateless/03203_system_query_metric_log.sh b/tests/queries/0_stateless/03203_system_query_metric_log.sh index bf94be79d7c..abcd14c8e5d 100755 --- a/tests/queries/0_stateless/03203_system_query_metric_log.sh +++ b/tests/queries/0_stateless/03203_system_query_metric_log.sh @@ -84,17 +84,17 @@ check_log 123 # query_metric_log_interval=0 disables the collection altogether $CLICKHOUSE_CLIENT -m -q """ SELECT '--Check that a query_metric_log_interval=0 disables the collection'; - SELECT count() FROM system.query_metric_log WHERE event_date >= yesterday() AND query_id = '${query_prefix}_0' + SELECT count() == 0 FROM system.query_metric_log WHERE event_date >= yesterday() AND query_id = '${query_prefix}_0' """ # a quick query that takes less than query_metric_log_interval is never collected $CLICKHOUSE_CLIENT -m -q """ SELECT '-Check that a query which execution time is less than query_metric_log_interval is never collected'; - SELECT count() FROM system.query_metric_log WHERE event_date >= yesterday() AND query_id = '${query_prefix}_fast' + SELECT count() == 0 FROM system.query_metric_log WHERE event_date >= yesterday() AND query_id = '${query_prefix}_fast' """ # a query that takes more than query_metric_log_interval is collected including the final row $CLICKHOUSE_CLIENT -m -q """ SELECT '--Check that there is a final event when queries finish'; - SELECT count() FROM system.query_metric_log WHERE event_date >= yesterday() AND query_id = '${query_prefix}_1000' + SELECT count() > 2 FROM system.query_metric_log WHERE event_date >= yesterday() AND query_id = '${query_prefix}_1000' """ From 0dbc041d8bc49d2760fe85a8a76431395571dfb8 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Fri, 8 Nov 2024 12:00:34 +0000 Subject: [PATCH 100/171] Log when the query finishes for system.query_metric_log ASAP There are logs where we can see that after the query finishes, executeQuery takes up to 2 seconds to call finishQuery. --- src/Interpreters/executeQuery.cpp | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 9250c069283..4507126b7b3 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -506,6 +506,7 @@ void logQueryFinish( auto time_now = std::chrono::system_clock::now(); QueryStatusInfo info = process_list_elem->getInfo(true, settings[Setting::log_profile_events]); + logQueryMetricLogFinish(context, internal, elem.client_info.current_query_id, time_now, std::make_shared(info)); elem.type = QueryLogElementType::QUERY_FINISH; addStatusInfoToQueryLogElement(elem, info, query_ast, context); @@ -551,6 +552,7 @@ void logQueryFinish( if (auto query_log = context->getQueryLog()) query_log->add(elem); } + if (settings[Setting::log_processors_profiles]) { if (auto processors_profile_log = context->getProcessorsProfileLog()) @@ -598,8 +600,6 @@ void logQueryFinish( } } } - - logQueryMetricLogFinish(context, internal, elem.client_info.current_query_id, time_now, std::make_shared(info)); } if (query_span) @@ -669,6 +669,7 @@ void logQueryException( { elem.query_duration_ms = start_watch.elapsedMilliseconds(); } + logQueryMetricLogFinish(context, internal, elem.client_info.current_query_id, time_now, info); elem.query_cache_usage = QueryCache::Usage::None; @@ -698,8 +699,6 @@ void logQueryException( query_span->addAttribute("clickhouse.exception_code", elem.exception_code); query_span->finish(); } - - logQueryMetricLogFinish(context, internal, elem.client_info.current_query_id, time_now, info); } void logExceptionBeforeStart( @@ -753,6 +752,8 @@ void logExceptionBeforeStart( elem.client_info = context->getClientInfo(); + logQueryMetricLogFinish(context, false, elem.client_info.current_query_id, std::chrono::system_clock::now(), nullptr); + elem.log_comment = settings[Setting::log_comment]; if (elem.log_comment.size() > settings[Setting::max_query_size]) elem.log_comment.resize(settings[Setting::max_query_size]); @@ -797,8 +798,6 @@ void logExceptionBeforeStart( ProfileEvents::increment(ProfileEvents::FailedInsertQuery); } } - - logQueryMetricLogFinish(context, false, elem.client_info.current_query_id, std::chrono::system_clock::now(), nullptr); } void validateAnalyzerSettings(ASTPtr ast, bool context_value) From 1dcd06f098fbd661d1327cd9ecdabd32f67831ce Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Fri, 8 Nov 2024 13:11:36 +0100 Subject: [PATCH 101/171] squash! Missing tests in several tests in 24.10 Added corner cases for tests for: to_utc_timestamp and from_utc_timestamp (more timezones, spetial timezones, epoch corners does not look right, raising a bug over that) arrayUnion (empty and big arrays) quantilesExactWeightedInterpolated (more data types) --- tests/queries/0_stateless/02812_from_to_utc_timestamp.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh b/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh index 20ae224332c..9eb4484ace0 100755 --- a/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh +++ b/tests/queries/0_stateless/02812_from_to_utc_timestamp.sh @@ -16,6 +16,7 @@ $CLICKHOUSE_CLIENT -q "select x, to_utc_timestamp(toDateTime('2023-03-16 11:22:3 $CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-02-24 11:22:33'), 'Europe/Madrid'), from_utc_timestamp(toDateTime('2024-02-24 11:22:33'), 'Europe/Madrid')" $CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'Europe/Madrid'), from_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'Europe/Madrid')" $CLICKHOUSE_CLIENT -q "select to_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'EST')" + $CLICKHOUSE_CLIENT -q "select 'leap year:', to_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'EST')" $CLICKHOUSE_CLIENT -q "select 'non-leap year:', to_utc_timestamp(toDateTime('2023-02-29 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2023-02-29 11:22:33'), 'EST')" $CLICKHOUSE_CLIENT -q "select 'leap year:', to_utc_timestamp(toDateTime('2024-02-28 23:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-03-01 00:22:33'), 'EST')" From 6f74b3236bef52beed01aca5007dad13df7a5ae4 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 8 Nov 2024 12:22:57 +0000 Subject: [PATCH 102/171] Fix some tests. --- src/Core/SettingsChangesHistory.cpp | 1 - src/Processors/QueryPlan/FilterStep.cpp | 4 +- .../01655_plan_optimizations.reference | 5 +-- .../0_stateless/01655_plan_optimizations.sh | 4 +- .../02496_remove_redundant_sorting.reference | 13 +++---- ...rouping_sets_predicate_push_down.reference | 38 ++++++++----------- 6 files changed, 27 insertions(+), 38 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index dedf8279533..8f01bacf254 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -75,7 +75,6 @@ static std::initializer_listresult_name; auto split_result = dag.split({filter_node}, true); dag = std::move(split_result.second); @@ -57,10 +56,11 @@ static ActionsAndName splitSingleAndFilter(ActionsDAG & dag, const ActionsDAG::N if (filter_type->isNullable()) cast_type = std::make_shared(std::move(cast_type)); - split_result.first.addCast(*split_filter_node, cast_type, {}); + split_filter_node = &split_result.first.addCast(*split_filter_node, cast_type, {}); } split_result.first.getOutputs().emplace(split_result.first.getOutputs().begin(), split_filter_node); + auto name = split_filter_node->result_name; return ActionsAndName{std::move(split_result.first), std::move(name)}; } diff --git a/tests/queries/0_stateless/01655_plan_optimizations.reference b/tests/queries/0_stateless/01655_plan_optimizations.reference index edf93b4b39f..7fc7556e85b 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations.reference +++ b/tests/queries/0_stateless/01655_plan_optimizations.reference @@ -82,12 +82,12 @@ Filter column: notEquals(__table1.y, 0_UInt8) 9 10 > one condition of filter should be pushed down after aggregating, other two conditions are ANDed Filter column -FUNCTION and(minus(s, 8) :: 5, minus(s, 4) :: 2) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4)) +FUNCTION and(minus(s, 8) :: 3, minus(s, 4) :: 5) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4)) Aggregating Filter column: notEquals(y, 0) > (analyzer) one condition of filter should be pushed down after aggregating, other two conditions are ANDed Filter column -FUNCTION and(minus(__table1.s, 8_UInt8) :: 1, minus(__table1.s, 4_UInt8) :: 2) -> and(notEquals(__table1.y, 0_UInt8), minus(__table1.s, 8_UInt8), minus(__table1.s, 4_UInt8)) +FUNCTION and(minus(__table1.s, 8_UInt8) :: 3, minus(__table1.s, 4_UInt8) :: 5) -> and(notEquals(__table1.y, 0_UInt8), minus(__table1.s, 8_UInt8), minus(__table1.s, 4_UInt8)) Aggregating Filter column: notEquals(__table1.y, 0_UInt8) 0 1 @@ -163,7 +163,6 @@ Filter column: notEquals(__table1.y, 2_UInt8) > filter is pushed down before CreatingSets CreatingSets Filter -Filter 1 3 > one condition of filter is pushed down before LEFT JOIN diff --git a/tests/queries/0_stateless/01655_plan_optimizations.sh b/tests/queries/0_stateless/01655_plan_optimizations.sh index 42cdac8c01f..04ab9bbd11c 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations.sh @@ -89,14 +89,14 @@ $CLICKHOUSE_CLIENT --enable_analyzer=0 --convert_query_to_cnf=0 -q " select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s - 8 and s - 4 settings enable_optimize_predicate_expression=0" | - grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|FUNCTION and(minus(s, 8) :: 5, minus(s, 4) :: 2) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4))" + grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|FUNCTION and(minus(s, 8) :: 3, minus(s, 4) :: 5) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4))" echo "> (analyzer) one condition of filter should be pushed down after aggregating, other two conditions are ANDed" $CLICKHOUSE_CLIENT --enable_analyzer=1 --convert_query_to_cnf=0 -q " explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s - 8 and s - 4 settings enable_optimize_predicate_expression=0" | - grep -o "Aggregating\|Filter column\|Filter column: notEquals(__table1.y, 0_UInt8)\|FUNCTION and(minus(__table1.s, 8_UInt8) :: 1, minus(__table1.s, 4_UInt8) :: 2) -> and(notEquals(__table1.y, 0_UInt8), minus(__table1.s, 8_UInt8), minus(__table1.s, 4_UInt8))" + grep -o "Aggregating\|Filter column\|Filter column: notEquals(__table1.y, 0_UInt8)\|FUNCTION and(minus(__table1.s, 8_UInt8) :: 3, minus(__table1.s, 4_UInt8) :: 5) -> and(notEquals(__table1.y, 0_UInt8), minus(__table1.s, 8_UInt8), minus(__table1.s, 4_UInt8))" $CLICKHOUSE_CLIENT -q " select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting.reference b/tests/queries/0_stateless/02496_remove_redundant_sorting.reference index 7824fd8cba9..00db41e8ac5 100644 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting.reference +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting.reference @@ -332,13 +332,12 @@ SETTINGS optimize_aggregators_of_group_by_keys=0 -- avoid removing any() as it d Expression (Projection) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) - Filter ((WHERE + (Projection + Before ORDER BY))) - Filter (HAVING) - Aggregating - Expression ((Before GROUP BY + Projection)) - Sorting (Sorting for ORDER BY) - Expression ((Before ORDER BY + (Projection + Before ORDER BY))) - ReadFromSystemNumbers + Filter (((WHERE + (Projection + Before ORDER BY)) + HAVING)) + Aggregating + Expression ((Before GROUP BY + Projection)) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + Before ORDER BY))) + ReadFromSystemNumbers -- execute 1 2 diff --git a/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference b/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference index 9bb0c022752..a382e14ce03 100644 --- a/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference +++ b/tests/queries/0_stateless/02554_fix_grouping_sets_predicate_push_down.reference @@ -28,21 +28,17 @@ WHERE type_1 = \'all\' (Expression) ExpressionTransform × 2 (Filter) - FilterTransform × 2 - (Filter) - FilterTransform × 2 - (Filter) - FilterTransform × 2 - (Aggregating) - ExpressionTransform × 2 - AggregatingTransform × 2 - Copy 1 → 2 - (Expression) - ExpressionTransform - (Expression) - ExpressionTransform - (ReadFromMergeTree) - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + FilterTransform × 6 + (Aggregating) + ExpressionTransform × 2 + AggregatingTransform × 2 + Copy 1 → 2 + (Expression) + ExpressionTransform + (Expression) + ExpressionTransform + (ReadFromMergeTree) + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 (Expression) ExpressionTransform × 2 (Filter) @@ -68,14 +64,10 @@ ExpressionTransform × 2 ExpressionTransform × 2 AggregatingTransform × 2 Copy 1 → 2 - (Filter) - FilterTransform - (Filter) - FilterTransform - (Expression) - ExpressionTransform - (ReadFromMergeTree) - MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 + (Expression) + ExpressionTransform + (ReadFromMergeTree) + MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder) 0 → 1 (Expression) ExpressionTransform × 2 (Aggregating) From 955f537bd5ef2f4a29717ac4999ce2af47b4c039 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Fri, 8 Nov 2024 12:28:06 +0000 Subject: [PATCH 103/171] Add a new setting query_metric_log_debug to avoid the noise --- src/Core/Settings.cpp | 5 +++++ src/Core/SettingsChangesHistory.cpp | 1 + src/Interpreters/QueryMetricLog.cpp | 18 +++++++++++------- src/Interpreters/QueryMetricLog.h | 3 ++- src/Interpreters/executeQuery.cpp | 3 ++- .../03203_system_query_metric_log.sh | 10 +++++----- 6 files changed, 26 insertions(+), 14 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 081e07ca2ce..d07cd7352a1 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -2784,6 +2784,11 @@ If set to any negative value, it will take the value `collect_interval_milliseco To disable the collection of a single query, set `query_metric_log_interval` to 0. Default value: -1 + )", 0) \ + DECLARE(Bool, query_metric_log_debug, false, R"( +Turns on debugging traces for system.query_metric_log + +Default value: false )", 0) \ DECLARE(LogsLevel, send_logs_level, LogsLevel::fatal, R"( Send server text logs with specified minimum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none' diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index ed87fde8b7e..a3e21aa670f 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -75,6 +75,7 @@ static std::initializer_listgetProcessList(); @@ -222,21 +223,24 @@ void QueryMetricLogStatus::scheduleNext(String query_id) } else { - LOG_TRACE(logger, "The next collecting task for query {} should have already run at {}. Scheduling it right now", - query_id, timePointToString(next_collect_time)); + if (debug_traces) + LOG_DEBUG(logger, "The next collecting task for query {} should have already run at {}. Scheduling it right now", + query_id, timePointToString(next_collect_time)); task->schedule(); } } std::optional QueryMetricLogStatus::createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next) { - LOG_TRACE(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Next collection time: {}", - query_id, interval_milliseconds, timePointToString(query_info_time), - schedule_next ? timePointToString(next_collect_time + std::chrono::milliseconds(interval_milliseconds)) : "finished"); + if (debug_traces) + LOG_DEBUG(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Next collection time: {}", + query_id, interval_milliseconds, timePointToString(query_info_time), + schedule_next ? timePointToString(next_collect_time + std::chrono::milliseconds(interval_milliseconds)) : "finished"); if (query_info_time <= last_collect_time) { - LOG_TRACE(logger, "Query {} has a more recent metrics collected. Skipping this one", query_id); + if (debug_traces) + LOG_DEBUG(logger, "Query {} has a more recent metrics collected. Skipping this one", query_id); return {}; } diff --git a/src/Interpreters/QueryMetricLog.h b/src/Interpreters/QueryMetricLog.h index 65764229b0a..5f301b2cd13 100644 --- a/src/Interpreters/QueryMetricLog.h +++ b/src/Interpreters/QueryMetricLog.h @@ -51,6 +51,7 @@ struct QueryMetricLogStatus std::chrono::system_clock::time_point next_collect_time TSA_GUARDED_BY(getMutex()); std::vector last_profile_events TSA_GUARDED_BY(getMutex()) = std::vector(ProfileEvents::end()); BackgroundSchedulePool::TaskHolder task TSA_GUARDED_BY(getMutex()); + bool debug_traces = false; /// We need to be able to move it for the hash map, so we need to add an indirection here. std::unique_ptr mutex = std::make_unique(); @@ -78,7 +79,7 @@ public: void shutdown() final; /// Both startQuery and finishQuery are called from the thread that executes the query. - void startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds); + void startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds, bool debug_traces = false); void finishQuery(const String & query_id, TimePoint finish_time, QueryStatusInfoPtr query_info = nullptr); private: diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 4507126b7b3..794d3dab0e6 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -146,6 +146,7 @@ namespace Setting extern const SettingsQueryCacheSystemTableHandling query_cache_system_table_handling; extern const SettingsSeconds query_cache_ttl; extern const SettingsInt64 query_metric_log_interval; + extern const SettingsBool query_metric_log_debug; extern const SettingsOverflowMode read_overflow_mode; extern const SettingsOverflowMode read_overflow_mode_leaf; extern const SettingsOverflowMode result_overflow_mode; @@ -455,7 +456,7 @@ QueryLogElement logQueryStart( { auto interval_milliseconds = getQueryMetricLogInterval(context); if (interval_milliseconds > 0) - query_metric_log->startQuery(elem.client_info.current_query_id, query_start_time, interval_milliseconds); + query_metric_log->startQuery(elem.client_info.current_query_id, query_start_time, interval_milliseconds, settings[Setting::query_metric_log_debug]); } return elem; diff --git a/tests/queries/0_stateless/03203_system_query_metric_log.sh b/tests/queries/0_stateless/03203_system_query_metric_log.sh index abcd14c8e5d..4bc764b777c 100755 --- a/tests/queries/0_stateless/03203_system_query_metric_log.sh +++ b/tests/queries/0_stateless/03203_system_query_metric_log.sh @@ -6,11 +6,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) readonly query_prefix=$CLICKHOUSE_DATABASE -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_1000" -q "SELECT sleep(2.5) FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_400" -q "SELECT sleep(2.5) SETTINGS query_metric_log_interval=400 FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_123" -q "SELECT sleep(2.5) SETTINGS query_metric_log_interval=123 FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_0" -q "SELECT sleep(2.5) SETTINGS query_metric_log_interval=0 FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_fast" -q "SELECT sleep(0.1) FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_1000" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_400" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true, query_metric_log_interval=400 FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_123" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true, query_metric_log_interval=123 FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_0" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true, query_metric_log_interval=0 FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_fast" -q "SELECT sleep(0.1) SETTINGS query_metric_log_debug=true FORMAT Null" & wait From fd9f32708371246e36b289164cf402230bc860c6 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 8 Nov 2024 13:49:08 +0100 Subject: [PATCH 104/171] Allow to disable memory buffer increase for filesystem cache --- src/Core/Settings.cpp | 3 +++ src/Disks/ObjectStorages/DiskObjectStorage.cpp | 5 ++++- src/IO/ReadSettings.h | 1 + src/Interpreters/Context.cpp | 2 ++ src/Storages/ObjectStorage/StorageObjectStorageSource.cpp | 2 +- 5 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 6f0109fa300..9a821879c5b 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4872,6 +4872,9 @@ Limit on size of a single batch of file segments that a read buffer can request )", 0) \ DECLARE(UInt64, filesystem_cache_reserve_space_wait_lock_timeout_milliseconds, 1000, R"( Wait time to lock cache for space reservation in filesystem cache +)", 0) \ + DECLARE(Bool, filesystem_cache_prefer_bigger_buffer_size, true, R"( +Prefer bigger buffer size if filesystem cache is enabled to avoid writing small file segments which detiriorate cache performance )", 0) \ DECLARE(UInt64, temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds, (10 * 60 * 1000), R"( Wait time to lock cache for space reservation for temporary data in filesystem cache diff --git a/src/Disks/ObjectStorages/DiskObjectStorage.cpp b/src/Disks/ObjectStorages/DiskObjectStorage.cpp index 3720c04a471..fba45d5a0c9 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorage.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorage.cpp @@ -642,7 +642,10 @@ std::unique_ptr DiskObjectStorage::readFile( }; /// Avoid cache fragmentation by choosing bigger buffer size. - bool prefer_bigger_buffer_size = object_storage->supportsCache() && read_settings.enable_filesystem_cache; + bool prefer_bigger_buffer_size = read_settings.filesystem_cache_prefer_bigger_buffer_size + && object_storage->supportsCache() + && read_settings.enable_filesystem_cache; + size_t buffer_size = prefer_bigger_buffer_size ? std::max(settings.remote_fs_buffer_size, DBMS_DEFAULT_BUFFER_SIZE) : settings.remote_fs_buffer_size; diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index 6ed02212095..c1747314c76 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -61,6 +61,7 @@ struct ReadSettings bool filesystem_cache_allow_background_download = true; bool filesystem_cache_allow_background_download_for_metadata_files_in_packed_storage = true; bool filesystem_cache_allow_background_download_during_fetch = true; + bool filesystem_cache_prefer_bigger_buffer_size = true; bool use_page_cache_for_disks_without_file_cache = false; bool read_from_page_cache_if_exists_otherwise_bypass_cache = false; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index c1fa2c8549a..d42002bf98d 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -196,6 +196,7 @@ namespace Setting extern const SettingsUInt64 filesystem_cache_segments_batch_size; extern const SettingsBool filesystem_cache_enable_background_download_for_metadata_files_in_packed_storage; extern const SettingsBool filesystem_cache_enable_background_download_during_fetch; + extern const SettingsBool filesystem_cache_prefer_bigger_buffer_size; extern const SettingsBool http_make_head_request; extern const SettingsUInt64 http_max_fields; extern const SettingsUInt64 http_max_field_name_size; @@ -5751,6 +5752,7 @@ ReadSettings Context::getReadSettings() const res.filesystem_cache_allow_background_download_for_metadata_files_in_packed_storage = settings_ref[Setting::filesystem_cache_enable_background_download_for_metadata_files_in_packed_storage]; res.filesystem_cache_allow_background_download_during_fetch = settings_ref[Setting::filesystem_cache_enable_background_download_during_fetch]; + res.filesystem_cache_prefer_bigger_buffer_size = settings_ref[Setting::filesystem_cache_prefer_bigger_buffer_size]; res.filesystem_cache_max_download_size = settings_ref[Setting::filesystem_cache_max_download_size]; res.skip_download_if_exceeds_query_cache = settings_ref[Setting::skip_download_if_exceeds_query_cache]; diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 563bdc44760..1ccf23ade90 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -517,7 +517,7 @@ std::unique_ptr StorageObjectStorageSource::createReadBu LOG_TRACE(log, "Downloading object of size {} with initial prefetch", object_size); - bool prefer_bigger_buffer_size = impl->isCached(); + bool prefer_bigger_buffer_size = read_settings.filesystem_cache_prefer_bigger_buffer_size && impl->isCached(); size_t buffer_size = prefer_bigger_buffer_size ? std::max(read_settings.remote_fs_buffer_size, DBMS_DEFAULT_BUFFER_SIZE) : read_settings.remote_fs_buffer_size; From fe73c1880a67340b8eea8c7d27a4f0a58aa42cd9 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Fri, 8 Nov 2024 14:06:59 +0100 Subject: [PATCH 105/171] Update src/Core/Settings.cpp Co-authored-by: Nikita Taranov --- src/Core/Settings.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 9a821879c5b..8feb758df0f 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4874,7 +4874,7 @@ Limit on size of a single batch of file segments that a read buffer can request Wait time to lock cache for space reservation in filesystem cache )", 0) \ DECLARE(Bool, filesystem_cache_prefer_bigger_buffer_size, true, R"( -Prefer bigger buffer size if filesystem cache is enabled to avoid writing small file segments which detiriorate cache performance +Prefer bigger buffer size if filesystem cache is enabled to avoid writing small file segments which deteriorate cache performance )", 0) \ DECLARE(UInt64, temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds, (10 * 60 * 1000), R"( Wait time to lock cache for space reservation for temporary data in filesystem cache From aeb2cbf934c76d082b01dc023b28562efb5d6e02 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Fri, 8 Nov 2024 15:26:41 +0100 Subject: [PATCH 106/171] Update Settings.cpp --- src/Core/Settings.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 8feb758df0f..07a2c52d72f 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4874,7 +4874,7 @@ Limit on size of a single batch of file segments that a read buffer can request Wait time to lock cache for space reservation in filesystem cache )", 0) \ DECLARE(Bool, filesystem_cache_prefer_bigger_buffer_size, true, R"( -Prefer bigger buffer size if filesystem cache is enabled to avoid writing small file segments which deteriorate cache performance +Prefer bigger buffer size if filesystem cache is enabled to avoid writing small file segments which deteriorate cache performance. On the other hand, enabling this setting might increase memory usage. )", 0) \ DECLARE(UInt64, temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds, (10 * 60 * 1000), R"( Wait time to lock cache for space reservation for temporary data in filesystem cache From 0929f66516261dea7b31479b8f5eaac1b4b8e38a Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 8 Nov 2024 17:01:09 +0100 Subject: [PATCH 107/171] Update test --- tests/integration/test_storage_s3_queue/test.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index 284b304c632..62afc0b1c1d 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -1000,6 +1000,9 @@ def test_max_set_age(started_cluster): assert "Cannot parse input" in node.query( f"SELECT exception FROM system.s3queue WHERE file_name ilike '%{file_with_error}'" ) + assert "Cannot parse input" in node.query( + f"SELECT exception FROM system.s3queue_log WHERE file_name ilike '%{file_with_error}' ORDER BY processing_end_time DESC LIMIT 1" + ) assert 1 == int( node.query( From bf58f468082917f871dc706f8596020d0364b43e Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Sat, 9 Nov 2024 13:04:39 +0800 Subject: [PATCH 108/171] Fix empty tuple ALTER --- src/Functions/FunctionsComparison.h | 3 +++ src/Functions/if.cpp | 3 +++ .../0_stateless/03268_empty_tuple_update.reference | 1 + .../queries/0_stateless/03268_empty_tuple_update.sql | 11 +++++++++++ 4 files changed, 18 insertions(+) create mode 100644 tests/queries/0_stateless/03268_empty_tuple_update.reference create mode 100644 tests/queries/0_stateless/03268_empty_tuple_update.sql diff --git a/src/Functions/FunctionsComparison.h b/src/Functions/FunctionsComparison.h index be0875581a5..9b2328065fc 100644 --- a/src/Functions/FunctionsComparison.h +++ b/src/Functions/FunctionsComparison.h @@ -1033,6 +1033,9 @@ private: size_t tuple_size, size_t input_rows_count) const { + if (0 == tuple_size) + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Comparison of zero-sized tuples is not implemented"); + ColumnsWithTypeAndName less_columns(tuple_size); ColumnsWithTypeAndName equal_columns(tuple_size - 1); ColumnsWithTypeAndName tmp_columns(2); diff --git a/src/Functions/if.cpp b/src/Functions/if.cpp index e03b27b3c39..5e1e7067e86 100644 --- a/src/Functions/if.cpp +++ b/src/Functions/if.cpp @@ -668,6 +668,9 @@ private: temporary_columns[0] = arguments[0]; size_t tuple_size = type1.getElements().size(); + if (tuple_size == 0) + return ColumnTuple::create(input_rows_count); + Columns tuple_columns(tuple_size); for (size_t i = 0; i < tuple_size; ++i) diff --git a/tests/queries/0_stateless/03268_empty_tuple_update.reference b/tests/queries/0_stateless/03268_empty_tuple_update.reference new file mode 100644 index 00000000000..30bc45d7a18 --- /dev/null +++ b/tests/queries/0_stateless/03268_empty_tuple_update.reference @@ -0,0 +1 @@ +() 2 diff --git a/tests/queries/0_stateless/03268_empty_tuple_update.sql b/tests/queries/0_stateless/03268_empty_tuple_update.sql new file mode 100644 index 00000000000..343117719fc --- /dev/null +++ b/tests/queries/0_stateless/03268_empty_tuple_update.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Tuple(), c1 int) ENGINE = Memory(); + +INSERT INTO t0 VALUES ((), 1); + +ALTER TABLE t0 UPDATE c0 = (), c1 = 2 WHERE EXISTS (SELECT 1); + +SELECT * FROM t0; + +DROP TABLE t0; From a888db338e1c79166a3ff6993b71d2fd17dc8736 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Sat, 9 Nov 2024 08:23:25 +0100 Subject: [PATCH 109/171] Revert "Add a new setting query_metric_log_debug to avoid the noise" This reverts commit 955f537bd5ef2f4a29717ac4999ce2af47b4c039. --- src/Core/Settings.cpp | 5 ----- src/Core/SettingsChangesHistory.cpp | 1 - src/Interpreters/QueryMetricLog.cpp | 18 +++++++----------- src/Interpreters/QueryMetricLog.h | 3 +-- src/Interpreters/executeQuery.cpp | 3 +-- .../03203_system_query_metric_log.sh | 10 +++++----- 6 files changed, 14 insertions(+), 26 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 2677bde4d55..6f0109fa300 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -2787,11 +2787,6 @@ If set to any negative value, it will take the value `collect_interval_milliseco To disable the collection of a single query, set `query_metric_log_interval` to 0. Default value: -1 - )", 0) \ - DECLARE(Bool, query_metric_log_debug, false, R"( -Turns on debugging traces for system.query_metric_log - -Default value: false )", 0) \ DECLARE(LogsLevel, send_logs_level, LogsLevel::fatal, R"( Send server text logs with specified minimum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none' diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 008980aae11..c6223bef2b2 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -76,7 +76,6 @@ static std::initializer_listgetProcessList(); @@ -223,24 +222,21 @@ void QueryMetricLogStatus::scheduleNext(String query_id) } else { - if (debug_traces) - LOG_DEBUG(logger, "The next collecting task for query {} should have already run at {}. Scheduling it right now", - query_id, timePointToString(next_collect_time)); + LOG_TRACE(logger, "The next collecting task for query {} should have already run at {}. Scheduling it right now", + query_id, timePointToString(next_collect_time)); task->schedule(); } } std::optional QueryMetricLogStatus::createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next) { - if (debug_traces) - LOG_DEBUG(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Next collection time: {}", - query_id, interval_milliseconds, timePointToString(query_info_time), - schedule_next ? timePointToString(next_collect_time + std::chrono::milliseconds(interval_milliseconds)) : "finished"); + LOG_TRACE(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Next collection time: {}", + query_id, interval_milliseconds, timePointToString(query_info_time), + schedule_next ? timePointToString(next_collect_time + std::chrono::milliseconds(interval_milliseconds)) : "finished"); if (query_info_time <= last_collect_time) { - if (debug_traces) - LOG_DEBUG(logger, "Query {} has a more recent metrics collected. Skipping this one", query_id); + LOG_TRACE(logger, "Query {} has a more recent metrics collected. Skipping this one", query_id); return {}; } diff --git a/src/Interpreters/QueryMetricLog.h b/src/Interpreters/QueryMetricLog.h index 5f301b2cd13..65764229b0a 100644 --- a/src/Interpreters/QueryMetricLog.h +++ b/src/Interpreters/QueryMetricLog.h @@ -51,7 +51,6 @@ struct QueryMetricLogStatus std::chrono::system_clock::time_point next_collect_time TSA_GUARDED_BY(getMutex()); std::vector last_profile_events TSA_GUARDED_BY(getMutex()) = std::vector(ProfileEvents::end()); BackgroundSchedulePool::TaskHolder task TSA_GUARDED_BY(getMutex()); - bool debug_traces = false; /// We need to be able to move it for the hash map, so we need to add an indirection here. std::unique_ptr mutex = std::make_unique(); @@ -79,7 +78,7 @@ public: void shutdown() final; /// Both startQuery and finishQuery are called from the thread that executes the query. - void startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds, bool debug_traces = false); + void startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds); void finishQuery(const String & query_id, TimePoint finish_time, QueryStatusInfoPtr query_info = nullptr); private: diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 794d3dab0e6..4507126b7b3 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -146,7 +146,6 @@ namespace Setting extern const SettingsQueryCacheSystemTableHandling query_cache_system_table_handling; extern const SettingsSeconds query_cache_ttl; extern const SettingsInt64 query_metric_log_interval; - extern const SettingsBool query_metric_log_debug; extern const SettingsOverflowMode read_overflow_mode; extern const SettingsOverflowMode read_overflow_mode_leaf; extern const SettingsOverflowMode result_overflow_mode; @@ -456,7 +455,7 @@ QueryLogElement logQueryStart( { auto interval_milliseconds = getQueryMetricLogInterval(context); if (interval_milliseconds > 0) - query_metric_log->startQuery(elem.client_info.current_query_id, query_start_time, interval_milliseconds, settings[Setting::query_metric_log_debug]); + query_metric_log->startQuery(elem.client_info.current_query_id, query_start_time, interval_milliseconds); } return elem; diff --git a/tests/queries/0_stateless/03203_system_query_metric_log.sh b/tests/queries/0_stateless/03203_system_query_metric_log.sh index 4bc764b777c..abcd14c8e5d 100755 --- a/tests/queries/0_stateless/03203_system_query_metric_log.sh +++ b/tests/queries/0_stateless/03203_system_query_metric_log.sh @@ -6,11 +6,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) readonly query_prefix=$CLICKHOUSE_DATABASE -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_1000" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_400" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true, query_metric_log_interval=400 FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_123" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true, query_metric_log_interval=123 FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_0" -q "SELECT sleep(2.5) SETTINGS query_metric_log_debug=true, query_metric_log_interval=0 FORMAT Null" & -$CLICKHOUSE_CLIENT --query-id="${query_prefix}_fast" -q "SELECT sleep(0.1) SETTINGS query_metric_log_debug=true FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_1000" -q "SELECT sleep(2.5) FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_400" -q "SELECT sleep(2.5) SETTINGS query_metric_log_interval=400 FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_123" -q "SELECT sleep(2.5) SETTINGS query_metric_log_interval=123 FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_0" -q "SELECT sleep(2.5) SETTINGS query_metric_log_interval=0 FORMAT Null" & +$CLICKHOUSE_CLIENT --query-id="${query_prefix}_fast" -q "SELECT sleep(0.1) FORMAT Null" & wait From 516300e733c8b3a116139a0306797f779b818f56 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Sat, 9 Nov 2024 08:28:47 +0100 Subject: [PATCH 110/171] Demote log from warning to debug to avoid failing the test --- src/Interpreters/QueryMetricLog.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/QueryMetricLog.cpp b/src/Interpreters/QueryMetricLog.cpp index 4fbe4f9e1b5..62700f49605 100644 --- a/src/Interpreters/QueryMetricLog.cpp +++ b/src/Interpreters/QueryMetricLog.cpp @@ -107,6 +107,7 @@ void QueryMetricLog::collectMetric(const ProcessList & process_list, String quer const auto query_info = process_list.getQueryInfo(query_id, false, true, false); if (!query_info) { + /// TODO: remove trace before 24.11 release after checking everything is fine on the CI LOG_TRACE(logger, "Query {} is not running anymore, so we couldn't get its QueryStatusInfo", query_id); return; } @@ -118,6 +119,7 @@ void QueryMetricLog::collectMetric(const ProcessList & process_list, String quer if (it == queries.end()) { global_lock.unlock(); + /// TODO: remove trace before 24.11 release after checking everything is fine on the CI LOG_TRACE(logger, "Query {} not found in the list. Finished while this collecting task was running", query_id); return; } @@ -126,6 +128,7 @@ void QueryMetricLog::collectMetric(const ProcessList & process_list, String quer if (!query_status.mutex) { global_lock.unlock(); + /// TODO: remove trace before 24.11 release after checking everything is fine on the CI LOG_TRACE(logger, "Query {} finished while this collecting task was running", query_id); return; } @@ -230,12 +233,14 @@ void QueryMetricLogStatus::scheduleNext(String query_id) std::optional QueryMetricLogStatus::createLogMetricElement(const String & query_id, const QueryStatusInfo & query_info, TimePoint query_info_time, bool schedule_next) { + /// TODO: remove trace before 24.11 release after checking everything is fine on the CI LOG_TRACE(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Next collection time: {}", query_id, interval_milliseconds, timePointToString(query_info_time), schedule_next ? timePointToString(next_collect_time + std::chrono::milliseconds(interval_milliseconds)) : "finished"); if (query_info_time <= last_collect_time) { + /// TODO: remove trace before 24.11 release after checking everything is fine on the CI LOG_TRACE(logger, "Query {} has a more recent metrics collected. Skipping this one", query_id); return {}; } @@ -278,7 +283,8 @@ std::optional QueryMetricLogStatus::createLogMetricElemen } else { - LOG_WARNING(logger, "Query {} has no profile counters", query_id); + /// TODO: remove trace before 24.11 release after checking everything is fine on the CI + LOG_DEBUG(logger, "Query {} has no profile counters", query_id); elem.profile_events = std::vector(ProfileEvents::end()); } From 55540c2119ca0dbc5d4eb51763155f27883df0b9 Mon Sep 17 00:00:00 2001 From: Eduard Karacharov Date: Sun, 10 Nov 2024 11:18:12 +0200 Subject: [PATCH 111/171] fix: transform set constant only if allowed --- src/Storages/MergeTree/KeyCondition.cpp | 7 +- src/Storages/MergeTree/KeyCondition.h | 1 + .../03269_partition_key_not_in_set.reference | 13 +++ .../03269_partition_key_not_in_set.sql | 81 +++++++++++++++++++ 4 files changed, 100 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03269_partition_key_not_in_set.reference create mode 100644 tests/queries/0_stateless/03269_partition_key_not_in_set.sql diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index 17723d341fb..a2783ff4efe 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -1158,6 +1158,7 @@ bool KeyCondition::tryPrepareSetIndex( const RPNBuilderFunctionTreeNode & func, RPNElement & out, size_t & out_key_column_num, + bool & allow_constant_transformation, bool & is_constant_transformed) { const auto & left_arg = func.getArgumentAt(0); @@ -1184,7 +1185,9 @@ bool KeyCondition::tryPrepareSetIndex( set_transforming_chains.push_back(set_transforming_chain); } // For partition index, checking if set can be transformed to prune any partitions - else if (single_point && canSetValuesBeWrappedByFunctions(node, index_mapping.key_index, data_type, set_transforming_chain)) + else if ( + single_point && allow_constant_transformation + && canSetValuesBeWrappedByFunctions(node, index_mapping.key_index, data_type, set_transforming_chain)) { indexes_mapping.push_back(index_mapping); data_types.push_back(data_type); @@ -1954,7 +1957,7 @@ bool KeyCondition::extractAtomFromTree(const RPNBuilderTreeNode & node, RPNEleme if (functionIsInOrGlobalInOperator(func_name)) { - if (tryPrepareSetIndex(func, out, key_column_num, is_constant_transformed)) + if (tryPrepareSetIndex(func, out, key_column_num, allow_constant_transformation, is_constant_transformed)) { key_arg_pos = 0; is_set_const = true; diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index 8c946bd3bbd..20b40271dc2 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -312,6 +312,7 @@ private: const RPNBuilderFunctionTreeNode & func, RPNElement & out, size_t & out_key_column_num, + bool & allow_constant_transformation, bool & is_constant_transformed); /// Checks that the index can not be used. diff --git a/tests/queries/0_stateless/03269_partition_key_not_in_set.reference b/tests/queries/0_stateless/03269_partition_key_not_in_set.reference new file mode 100644 index 00000000000..1e34df0c77e --- /dev/null +++ b/tests/queries/0_stateless/03269_partition_key_not_in_set.reference @@ -0,0 +1,13 @@ +-- Monotonic function in partition key +48 +48 +-- Non-monotonic function in partition key +48 +48 +-- Multiple partition columns +50 +50 +96 +96 +98 +98 diff --git a/tests/queries/0_stateless/03269_partition_key_not_in_set.sql b/tests/queries/0_stateless/03269_partition_key_not_in_set.sql new file mode 100644 index 00000000000..562521fb7ee --- /dev/null +++ b/tests/queries/0_stateless/03269_partition_key_not_in_set.sql @@ -0,0 +1,81 @@ +-- Related to https://github.com/ClickHouse/ClickHouse/issues/69829 +-- +-- The main goal of the test is to assert that constant transformation +-- for set constant while partition pruning won't be performed +-- if it's not allowed (NOT IN operator case) + +DROP TABLE IF EXISTS 03269_filters; +CREATE TABLE 03269_filters ( + id Int32, + dt Date +) +engine = MergeTree +order by id; + +INSERT INTO 03269_filters +SELECT 6, '2020-01-01' +UNION ALL +SELECT 38, '2021-01-01'; + +SELECT '-- Monotonic function in partition key'; + +DROP TABLE IF EXISTS 03269_single_monotonic; +CREATE TABLE 03269_single_monotonic( + id Int32 +) +ENGINE = MergeTree +PARTITION BY intDiv(id, 10) +ORDER BY id; + +INSERT INTO 03269_single_monotonic SELECT number FROM numbers(50); + +SELECT count() FROM 03269_single_monotonic WHERE id NOT IN (6, 38); +SELECT count() FROM 03269_single_monotonic WHERE id NOT IN ( + SELECT id FROM 03269_filters +); + +DROP TABLE 03269_single_monotonic; + +SELECT '-- Non-monotonic function in partition key'; + +DROP TABLE IF EXISTS 03269_single_non_monotonic; +CREATE TABLE 03269_single_non_monotonic ( + id Int32 +) +ENGINE = MergeTree +PARTITION BY id % 10 +ORDER BY id; + +INSERT INTO 03269_single_non_monotonic SELECT number FROM numbers(50); + +SELECT count() FROM 03269_single_non_monotonic WHERE id NOT IN (6, 38); +SELECT count() FROM 03269_single_non_monotonic WHERE id NOT IN (SELECT id FROM 03269_filters); + +DROP TABLE 03269_single_non_monotonic; + +SELECT '-- Multiple partition columns'; + +DROP TABLE IF EXISTS 03269_multiple_part_cols; +CREATE TABLE 03269_multiple_part_cols ( + id Int32, + dt Date, +) +ENGINE = MergeTree +PARTITION BY (dt, intDiv(id, 10)) +ORDER BY id; + +INSERT INTO 03269_multiple_part_cols +SELECT number, '2020-01-01' FROM numbers(50) +UNION ALL +SELECT number, '2021-01-01' FROM numbers(50); + +SELECT count() FROM 03269_multiple_part_cols WHERE dt NOT IN ('2020-01-01'); +SELECT count() FROM 03269_multiple_part_cols WHERE dt NOT IN (SELECT dt FROM 03269_filters WHERE dt < '2021-01-01'); + +SELECT count() FROM 03269_multiple_part_cols WHERE id NOT IN (6, 38); +SELECT count() FROM 03269_multiple_part_cols WHERE id NOT IN (SELECT id FROM 03269_filters); + +SELECT count() FROM 03269_multiple_part_cols WHERE (id, dt) NOT IN ((6, '2020-01-01'), (38, '2021-01-01')); +SELECT count() FROM 03269_multiple_part_cols WHERE (id, dt) NOT IN (SELECT id, dt FROM 03269_filters); + +DROP TABLE 03269_multiple_part_cols; From d1e638da6e65a2f0de4aa72b78fd894c090606de Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Sun, 10 Nov 2024 15:12:21 +0100 Subject: [PATCH 112/171] Let's name cherry-pick branches the same way as backports --- tests/ci/cherry_pick.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/cherry_pick.py b/tests/ci/cherry_pick.py index a796f63de6c..9bdc184f661 100644 --- a/tests/ci/cherry_pick.py +++ b/tests/ci/cherry_pick.py @@ -97,7 +97,7 @@ close it. self.pr = pr self.repo = repo - self.cherrypick_branch = f"cherrypick/{name}/{pr.merge_commit_sha}" + self.cherrypick_branch = f"cherrypick/{name}/{pr.number}" self.backport_branch = f"backport/{name}/{pr.number}" self.cherrypick_pr = None # type: Optional[PullRequest] self.backport_pr = None # type: Optional[PullRequest] From f9fa5ed515daaf6bb30ed13fd882a4c92cb84351 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 10 Nov 2024 20:38:51 +0000 Subject: [PATCH 113/171] Docs: Steps to populate TPC-H tables from S3 --- .../getting-started/example-datasets/tpch.md | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/docs/en/getting-started/example-datasets/tpch.md b/docs/en/getting-started/example-datasets/tpch.md index 379b92cbe9a..c0bf54a5a7c 100644 --- a/docs/en/getting-started/example-datasets/tpch.md +++ b/docs/en/getting-started/example-datasets/tpch.md @@ -166,10 +166,26 @@ clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO orders FORMAT clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO lineitem FORMAT CSV" < lineitem.tbl ``` -The queries are generated by `./qgen -s `. Example queries for `s = 100`: +:::note +Instead of using tpch-kit and generating the tables by yourself, you can alternatively import the data from a public S3 bucket. Make sure +to create empty tables first using above `CREATE` statements. + +```sql +INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/nation.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/region.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/part.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/supplier.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/partsupp.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/customer.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/orders.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/lineitem.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +```` +::: ## Queries +The queries are generated by `./qgen -s `. Example queries for `s = 100`: + **Correctness** The result of the queries agrees with the official results unless mentioned otherwise. To verify, generate a TPC-H database with scale From 892d43bd7d57faed05cdcef77e684a09dbad3e36 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 10 Nov 2024 20:50:07 +0000 Subject: [PATCH 114/171] SF 1 vs. 100 --- .../getting-started/example-datasets/tpch.md | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/docs/en/getting-started/example-datasets/tpch.md b/docs/en/getting-started/example-datasets/tpch.md index c0bf54a5a7c..de2c425b402 100644 --- a/docs/en/getting-started/example-datasets/tpch.md +++ b/docs/en/getting-started/example-datasets/tpch.md @@ -171,14 +171,25 @@ Instead of using tpch-kit and generating the tables by yourself, you can alterna to create empty tables first using above `CREATE` statements. ```sql -INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/nation.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; -INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/region.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; -INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/part.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; -INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/supplier.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; -INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/partsupp.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; -INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/customer.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; -INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/orders.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; -INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/lineitem.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter='|', input_format_defaults_for_omitted_fields=1, input_format_csv_empty_as_default=1; +-- Scaling factor 1 +INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/nation.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/region.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/part.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/supplier.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/partsupp.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/customer.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/orders.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/lineitem.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; + +-- Scaling factor 100 +INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/nation.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/region.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/part.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/supplier.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/partsupp.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/customer.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/orders.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; +INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/lineitem.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1; ```` ::: From a74f491df3c217bf4132b08118e4708b05d3bf60 Mon Sep 17 00:00:00 2001 From: Shaun Struwig <41984034+Blargian@users.noreply.github.com> Date: Sun, 10 Nov 2024 22:02:01 +0100 Subject: [PATCH 115/171] Fix typo --- docs/en/sql-reference/functions/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/functions/index.md b/docs/en/sql-reference/functions/index.md index c0256ba4735..04a87c369ab 100644 --- a/docs/en/sql-reference/functions/index.md +++ b/docs/en/sql-reference/functions/index.md @@ -24,7 +24,7 @@ All expressions in a query that have the same AST (the same record or same resul ## Types of Results -All functions return a single return as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function. +All functions return a single value as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function. ## Constants From 866e4daeecb301030e3f89eb56395c1156cb840d Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 01:10:49 +0100 Subject: [PATCH 116/171] Update index.md --- .../aggregate-functions/reference/index.md | 174 +++++++++--------- 1 file changed, 85 insertions(+), 89 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/reference/index.md b/docs/en/sql-reference/aggregate-functions/reference/index.md index 2dce0afe2e1..d7b287f764b 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/index.md +++ b/docs/en/sql-reference/aggregate-functions/reference/index.md @@ -7,119 +7,115 @@ toc_hidden: true # List of Aggregate Functions -Standard aggregate functions: - -- [count](../reference/count.md) -- [min](../reference/min.md) -- [max](../reference/max.md) -- [sum](../reference/sum.md) -- [avg](../reference/avg.md) -- [any](../reference/any.md) -- [stddevPop](../reference/stddevpop.md) -- [stddevPopStable](../reference/stddevpopstable.md) -- [stddevSamp](../reference/stddevsamp.md) -- [stddevSampStable](../reference/stddevsampstable.md) -- [varPop](../reference/varpop.md) -- [varSamp](../reference/varsamp.md) -- [corr](../reference/corr.md) -- [corr](../reference/corrstable.md) -- [corrMatrix](../reference/corrmatrix.md) -- [covarPop](../reference/covarpop.md) -- [covarStable](../reference/covarpopstable.md) -- [covarPopMatrix](../reference/covarpopmatrix.md) -- [covarSamp](../reference/covarsamp.md) -- [covarSampStable](../reference/covarsampstable.md) -- [covarSampMatrix](../reference/covarsampmatrix.md) -- [entropy](../reference/entropy.md) -- [exponentialMovingAverage](../reference/exponentialmovingaverage.md) -- [intervalLengthSum](../reference/intervalLengthSum.md) -- [kolmogorovSmirnovTest](../reference/kolmogorovsmirnovtest.md) -- [mannwhitneyutest](../reference/mannwhitneyutest.md) -- [median](../reference/median.md) -- [rankCorr](../reference/rankCorr.md) -- [sumKahan](../reference/sumkahan.md) -- [studentTTest](../reference/studentttest.md) -- [welchTTest](../reference/welchttest.md) - -ClickHouse-specific aggregate functions: +ClickHouse supports all standard SQL functions (sum, avg, min, max, count) and a wide range of aggregate functions for various applications: - [aggThrow](../reference/aggthrow.md) - [analysisOfVariance](../reference/analysis_of_variance.md) -- [any](../reference/any.md) - [anyHeavy](../reference/anyheavy.md) - [anyLast](../reference/anylast.md) -- [boundingRatio](../reference/boundrat.md) -- [first_value](../reference/first_value.md) -- [last_value](../reference/last_value.md) -- [argMin](../reference/argmin.md) +- [any](../reference/any.md) - [argMax](../reference/argmax.md) +- [argMin](../reference/argmin.md) - [avgWeighted](../reference/avgweighted.md) -- [topK](../reference/topk.md) -- [topKWeighted](../reference/topkweighted.md) -- [deltaSum](../reference/deltasum.md) +- [avg](../reference/avg.md) +- [boundingRatio](../reference/boundrat.md) +- [categoricalInformationValue](../reference/categoricalinformationvalue.md) +- [contingency](../reference/contingency.md) +- [corrMatrix](../reference/corrmatrix.md) +- [corr](../reference/corr.md) +- [corr](../reference/corrstable.md) +- [count](../reference/count.md) +- [covarPopMatrix](../reference/covarpopmatrix.md) +- [covarPop](../reference/covarpop.md) +- [covarSampMatrix](../reference/covarsampmatrix.md) +- [covarSampStable](../reference/covarsampstable.md) +- [covarSamp](../reference/covarsamp.md) +- [covarStable](../reference/covarpopstable.md) +- [cramersVBiasCorrected](../reference/cramersvbiascorrected.md) +- [cramersV](../reference/cramersv.md) - [deltaSumTimestamp](../reference/deltasumtimestamp.md) +- [deltaSum](../reference/deltasum.md) +- [entropy](../reference/entropy.md) +- [exponentialMovingAverage](../reference/exponentialmovingaverage.md) +- [first_value](../reference/first_value.md) - [flameGraph](../reference/flame_graph.md) -- [groupArray](../reference/grouparray.md) -- [groupArrayLast](../reference/grouparraylast.md) -- [groupUniqArray](../reference/groupuniqarray.md) - [groupArrayInsertAt](../reference/grouparrayinsertat.md) +- [groupArrayIntersect](../reference/grouparrayintersect.md) +- [groupArrayLast](../reference/grouparraylast.md) - [groupArrayMovingAvg](../reference/grouparraymovingavg.md) - [groupArrayMovingSum](../reference/grouparraymovingsum.md) - [groupArraySample](../reference/grouparraysample.md) - [groupArraySorted](../reference/grouparraysorted.md) -- [groupArrayIntersect](../reference/grouparrayintersect.md) +- [groupArray](../reference/grouparray.md) - [groupBitAnd](../reference/groupbitand.md) - [groupBitOr](../reference/groupbitor.md) - [groupBitXor](../reference/groupbitxor.md) -- [groupBitmap](../reference/groupbitmap.md) - [groupBitmapAnd](../reference/groupbitmapand.md) - [groupBitmapOr](../reference/groupbitmapor.md) - [groupBitmapXor](../reference/groupbitmapxor.md) -- [sumWithOverflow](../reference/sumwithoverflow.md) -- [sumMap](../reference/summap.md) -- [sumMapWithOverflow](../reference/summapwithoverflow.md) -- [sumMapFiltered](../parametric-functions.md/#summapfiltered) -- [sumMapFilteredWithOverflow](../parametric-functions.md/#summapfilteredwithoverflow) -- [minMap](../reference/minmap.md) -- [maxMap](../reference/maxmap.md) -- [skewSamp](../reference/skewsamp.md) -- [skewPop](../reference/skewpop.md) -- [kurtSamp](../reference/kurtsamp.md) +- [groupBitmap](../reference/groupbitmap.md) +- [groupUniqArray](../reference/groupuniqarray.md) +- [intervalLengthSum](../reference/intervalLengthSum.md) +- [kolmogorovSmirnovTest](../reference/kolmogorovsmirnovtest.md) - [kurtPop](../reference/kurtpop.md) -- [uniq](../reference/uniq.md) -- [uniqExact](../reference/uniqexact.md) -- [uniqCombined](../reference/uniqcombined.md) -- [uniqCombined64](../reference/uniqcombined64.md) -- [uniqHLL12](../reference/uniqhll12.md) -- [uniqTheta](../reference/uniqthetasketch.md) -- [quantile](../reference/quantile.md) -- [quantiles](../reference/quantiles.md) -- [quantileExact](../reference/quantileexact.md) -- [quantileExactLow](../reference/quantileexact.md#quantileexactlow) -- [quantileExactHigh](../reference/quantileexact.md#quantileexacthigh) -- [quantileExactWeighted](../reference/quantileexactweighted.md) -- [quantileTiming](../reference/quantiletiming.md) -- [quantileTimingWeighted](../reference/quantiletimingweighted.md) -- [quantileDeterministic](../reference/quantiledeterministic.md) -- [quantileTDigest](../reference/quantiletdigest.md) -- [quantileTDigestWeighted](../reference/quantiletdigestweighted.md) -- [quantileBFloat16](../reference/quantilebfloat16.md#quantilebfloat16) -- [quantileBFloat16Weighted](../reference/quantilebfloat16.md#quantilebfloat16weighted) -- [quantileDD](../reference/quantileddsketch.md#quantileddsketch) -- [simpleLinearRegression](../reference/simplelinearregression.md) -- [singleValueOrNull](../reference/singlevalueornull.md) -- [stochasticLinearRegression](../reference/stochasticlinearregression.md) -- [stochasticLogisticRegression](../reference/stochasticlogisticregression.md) -- [categoricalInformationValue](../reference/categoricalinformationvalue.md) -- [contingency](../reference/contingency.md) -- [cramersV](../reference/cramersv.md) -- [cramersVBiasCorrected](../reference/cramersvbiascorrected.md) -- [theilsU](../reference/theilsu.md) -- [maxIntersections](../reference/maxintersections.md) +- [kurtSamp](../reference/kurtsamp.md) +- [largestTriangleThreeBuckets](../reference/largestTriangleThreeBuckets.md) +- [last_value](../reference/last_value.md) +- [mannwhitneyutest](../reference/mannwhitneyutest.md) - [maxIntersectionsPosition](../reference/maxintersectionsposition.md) +- [maxIntersections](../reference/maxintersections.md) +- [maxMap](../reference/maxmap.md) +- [max](../reference/max.md) - [meanZTest](../reference/meanztest.md) +- [median](../reference/median.md) +- [minMap](../reference/minmap.md) +- [min](../reference/min.md) +- [quantileBFloat16Weighted](../reference/quantilebfloat16.md#quantilebfloat16weighted) +- [quantileBFloat16](../reference/quantilebfloat16.md#quantilebfloat16) +- [quantileDD](../reference/quantileddsketch.md#quantileddsketch) +- [quantileDeterministic](../reference/quantiledeterministic.md) +- [quantileExactHigh](../reference/quantileexact.md#quantileexacthigh) +- [quantileExactLow](../reference/quantileexact.md#quantileexactlow) +- [quantileExactWeighted](../reference/quantileexactweighted.md) +- [quantileExact](../reference/quantileexact.md) - [quantileGK](../reference/quantileGK.md) - [quantileInterpolatedWeighted](../reference/quantileinterpolatedweighted.md) +- [quantileTDigestWeighted](../reference/quantiletdigestweighted.md) +- [quantileTDigest](../reference/quantiletdigest.md) +- [quantileTimingWeighted](../reference/quantiletimingweighted.md) +- [quantileTiming](../reference/quantiletiming.md) +- [quantile](../reference/quantile.md) +- [quantiles](../reference/quantiles.md) +- [rankCorr](../reference/rankCorr.md) +- [simpleLinearRegression](../reference/simplelinearregression.md) +- [singleValueOrNull](../reference/singlevalueornull.md) +- [skewPop](../reference/skewpop.md) +- [skewSamp](../reference/skewsamp.md) - [sparkBar](../reference/sparkbar.md) +- [stddevPopStable](../reference/stddevpopstable.md) +- [stddevPop](../reference/stddevpop.md) +- [stddevSampStable](../reference/stddevsampstable.md) +- [stddevSamp](../reference/stddevsamp.md) +- [stochasticLinearRegression](../reference/stochasticlinearregression.md) +- [stochasticLogisticRegression](../reference/stochasticlogisticregression.md) +- [studentTTest](../reference/studentttest.md) - [sumCount](../reference/sumcount.md) -- [largestTriangleThreeBuckets](../reference/largestTriangleThreeBuckets.md) +- [sumKahan](../reference/sumkahan.md) +- [sumMapFilteredWithOverflow](../parametric-functions.md/#summapfilteredwithoverflow) +- [sumMapFiltered](../parametric-functions.md/#summapfiltered) +- [sumMapWithOverflow](../reference/summapwithoverflow.md) +- [sumMap](../reference/summap.md) +- [sumWithOverflow](../reference/sumwithoverflow.md) +- [sum](../reference/sum.md) +- [theilsU](../reference/theilsu.md) +- [topKWeighted](../reference/topkweighted.md) +- [topK](../reference/topk.md) +- [uniqCombined64](../reference/uniqcombined64.md) +- [uniqCombined](../reference/uniqcombined.md) +- [uniqExact](../reference/uniqexact.md) +- [uniqHLL12](../reference/uniqhll12.md) +- [uniqTheta](../reference/uniqthetasketch.md) +- [uniq](../reference/uniq.md) +- [varPop](../reference/varpop.md) +- [varSamp](../reference/varsamp.md) +- [welchTTest](../reference/welchttest.md) From 2b20c2d2f22f9a399f4f43f0920f6b0df978c1a9 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 11 Nov 2024 01:46:00 +0100 Subject: [PATCH 117/171] Fix a race --- src/Databases/DatabaseAtomic.cpp | 14 +++++++++----- src/Databases/DatabaseAtomic.h | 2 +- src/Databases/DatabaseOnDisk.cpp | 8 ++++++-- src/Databases/DatabaseOnDisk.h | 2 +- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index 88727d0389e..bd077ccd7b5 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -19,6 +18,7 @@ #include #include + namespace fs = std::filesystem; namespace DB @@ -69,9 +69,13 @@ DatabaseAtomic::DatabaseAtomic(String name_, String metadata_path_, UUID uuid, C void DatabaseAtomic::createDirectories() { - if (database_atomic_directories_created.test_and_set()) - return; - DatabaseOnDisk::createDirectories(); + std::lock_guard lock(mutex); + createDirectoriesUnlocked(); +} + +void DatabaseAtomic::createDirectoriesUnlocked() +{ + DatabaseOnDisk::createDirectoriesUnlocked(); fs::create_directories(fs::path(getContext()->getPath()) / "metadata"); fs::create_directories(path_to_table_symlinks); tryCreateMetadataSymlink(); @@ -113,9 +117,9 @@ void DatabaseAtomic::drop(ContextPtr) void DatabaseAtomic::attachTable(ContextPtr /* context_ */, const String & name, const StoragePtr & table, const String & relative_table_path) { assert(relative_table_path != data_path && !relative_table_path.empty()); - createDirectories(); DetachedTables not_in_use; std::lock_guard lock(mutex); + createDirectoriesUnlocked(); not_in_use = cleanupDetachedTables(); auto table_id = table->getStorageID(); assertDetachedTableNotInUse(table_id.uuid); diff --git a/src/Databases/DatabaseAtomic.h b/src/Databases/DatabaseAtomic.h index 3d0b74e31a0..7e909128635 100644 --- a/src/Databases/DatabaseAtomic.h +++ b/src/Databases/DatabaseAtomic.h @@ -76,8 +76,8 @@ protected: using DetachedTables = std::unordered_map; [[nodiscard]] DetachedTables cleanupDetachedTables() TSA_REQUIRES(mutex); - std::atomic_flag database_atomic_directories_created = ATOMIC_FLAG_INIT; void createDirectories(); + void createDirectoriesUnlocked() TSA_REQUIRES(mutex); void tryCreateMetadataSymlink(); diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 2f4280fe485..93ecf9cf11c 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -185,8 +185,12 @@ DatabaseOnDisk::DatabaseOnDisk( void DatabaseOnDisk::createDirectories() { - if (directories_created.test_and_set()) - return; + std::lock_guard lock(mutex); + createDirectoriesUnlocked(); +} + +void DatabaseOnDisk::createDirectoriesUnlocked() +{ fs::create_directories(std::filesystem::path(getContext()->getPath()) / data_path); fs::create_directories(metadata_path); } diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index 0c0ecf76a26..1e11d21cc87 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -99,8 +99,8 @@ protected: virtual void removeDetachedPermanentlyFlag(ContextPtr context, const String & table_name, const String & table_metadata_path, bool attach); virtual void setDetachedTableNotInUseForce(const UUID & /*uuid*/) {} - std::atomic_flag directories_created = ATOMIC_FLAG_INIT; void createDirectories(); + void createDirectoriesUnlocked() TSA_REQUIRES(mutex); const String metadata_path; const String data_path; From f57bf2ee15fe93377b858efec767627321a69887 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 11 Nov 2024 09:38:26 +0000 Subject: [PATCH 118/171] Fix trash in the docs, pt. II --- .../getting-started/example-datasets/tpch.md | 2 +- .../aggregate-functions/reference/index.md | 113 +----------------- .../data-types/aggregatefunction.md | 4 +- docs/en/sql-reference/data-types/index.md | 29 +---- docs/en/sql-reference/data-types/json.md | 2 +- .../data-types/simpleaggregatefunction.md | 4 +- docs/en/sql-reference/functions/geo/index.md | 68 +---------- .../sql-reference/statements/create/index.md | 14 +-- docs/en/sql-reference/statements/index.md | 25 +--- 9 files changed, 16 insertions(+), 245 deletions(-) diff --git a/docs/en/getting-started/example-datasets/tpch.md b/docs/en/getting-started/example-datasets/tpch.md index de2c425b402..3ea4bffec38 100644 --- a/docs/en/getting-started/example-datasets/tpch.md +++ b/docs/en/getting-started/example-datasets/tpch.md @@ -46,7 +46,7 @@ Detailed table sizes with scale factor 100: | orders | 150.000.000 | 6.15 GB | | lineitem | 600.00.00 | 26.69 GB | -(The table sizes in ClickHouse are taken from `system.tables.total_bytes` and based on below table definitions. +(Compressed sizes in ClickHouse are taken from `system.tables.total_bytes` and based on below table definitions.) Now create tables in ClickHouse. diff --git a/docs/en/sql-reference/aggregate-functions/reference/index.md b/docs/en/sql-reference/aggregate-functions/reference/index.md index d7b287f764b..ee8f0d5882e 100644 --- a/docs/en/sql-reference/aggregate-functions/reference/index.md +++ b/docs/en/sql-reference/aggregate-functions/reference/index.md @@ -7,115 +7,4 @@ toc_hidden: true # List of Aggregate Functions -ClickHouse supports all standard SQL functions (sum, avg, min, max, count) and a wide range of aggregate functions for various applications: - -- [aggThrow](../reference/aggthrow.md) -- [analysisOfVariance](../reference/analysis_of_variance.md) -- [anyHeavy](../reference/anyheavy.md) -- [anyLast](../reference/anylast.md) -- [any](../reference/any.md) -- [argMax](../reference/argmax.md) -- [argMin](../reference/argmin.md) -- [avgWeighted](../reference/avgweighted.md) -- [avg](../reference/avg.md) -- [boundingRatio](../reference/boundrat.md) -- [categoricalInformationValue](../reference/categoricalinformationvalue.md) -- [contingency](../reference/contingency.md) -- [corrMatrix](../reference/corrmatrix.md) -- [corr](../reference/corr.md) -- [corr](../reference/corrstable.md) -- [count](../reference/count.md) -- [covarPopMatrix](../reference/covarpopmatrix.md) -- [covarPop](../reference/covarpop.md) -- [covarSampMatrix](../reference/covarsampmatrix.md) -- [covarSampStable](../reference/covarsampstable.md) -- [covarSamp](../reference/covarsamp.md) -- [covarStable](../reference/covarpopstable.md) -- [cramersVBiasCorrected](../reference/cramersvbiascorrected.md) -- [cramersV](../reference/cramersv.md) -- [deltaSumTimestamp](../reference/deltasumtimestamp.md) -- [deltaSum](../reference/deltasum.md) -- [entropy](../reference/entropy.md) -- [exponentialMovingAverage](../reference/exponentialmovingaverage.md) -- [first_value](../reference/first_value.md) -- [flameGraph](../reference/flame_graph.md) -- [groupArrayInsertAt](../reference/grouparrayinsertat.md) -- [groupArrayIntersect](../reference/grouparrayintersect.md) -- [groupArrayLast](../reference/grouparraylast.md) -- [groupArrayMovingAvg](../reference/grouparraymovingavg.md) -- [groupArrayMovingSum](../reference/grouparraymovingsum.md) -- [groupArraySample](../reference/grouparraysample.md) -- [groupArraySorted](../reference/grouparraysorted.md) -- [groupArray](../reference/grouparray.md) -- [groupBitAnd](../reference/groupbitand.md) -- [groupBitOr](../reference/groupbitor.md) -- [groupBitXor](../reference/groupbitxor.md) -- [groupBitmapAnd](../reference/groupbitmapand.md) -- [groupBitmapOr](../reference/groupbitmapor.md) -- [groupBitmapXor](../reference/groupbitmapxor.md) -- [groupBitmap](../reference/groupbitmap.md) -- [groupUniqArray](../reference/groupuniqarray.md) -- [intervalLengthSum](../reference/intervalLengthSum.md) -- [kolmogorovSmirnovTest](../reference/kolmogorovsmirnovtest.md) -- [kurtPop](../reference/kurtpop.md) -- [kurtSamp](../reference/kurtsamp.md) -- [largestTriangleThreeBuckets](../reference/largestTriangleThreeBuckets.md) -- [last_value](../reference/last_value.md) -- [mannwhitneyutest](../reference/mannwhitneyutest.md) -- [maxIntersectionsPosition](../reference/maxintersectionsposition.md) -- [maxIntersections](../reference/maxintersections.md) -- [maxMap](../reference/maxmap.md) -- [max](../reference/max.md) -- [meanZTest](../reference/meanztest.md) -- [median](../reference/median.md) -- [minMap](../reference/minmap.md) -- [min](../reference/min.md) -- [quantileBFloat16Weighted](../reference/quantilebfloat16.md#quantilebfloat16weighted) -- [quantileBFloat16](../reference/quantilebfloat16.md#quantilebfloat16) -- [quantileDD](../reference/quantileddsketch.md#quantileddsketch) -- [quantileDeterministic](../reference/quantiledeterministic.md) -- [quantileExactHigh](../reference/quantileexact.md#quantileexacthigh) -- [quantileExactLow](../reference/quantileexact.md#quantileexactlow) -- [quantileExactWeighted](../reference/quantileexactweighted.md) -- [quantileExact](../reference/quantileexact.md) -- [quantileGK](../reference/quantileGK.md) -- [quantileInterpolatedWeighted](../reference/quantileinterpolatedweighted.md) -- [quantileTDigestWeighted](../reference/quantiletdigestweighted.md) -- [quantileTDigest](../reference/quantiletdigest.md) -- [quantileTimingWeighted](../reference/quantiletimingweighted.md) -- [quantileTiming](../reference/quantiletiming.md) -- [quantile](../reference/quantile.md) -- [quantiles](../reference/quantiles.md) -- [rankCorr](../reference/rankCorr.md) -- [simpleLinearRegression](../reference/simplelinearregression.md) -- [singleValueOrNull](../reference/singlevalueornull.md) -- [skewPop](../reference/skewpop.md) -- [skewSamp](../reference/skewsamp.md) -- [sparkBar](../reference/sparkbar.md) -- [stddevPopStable](../reference/stddevpopstable.md) -- [stddevPop](../reference/stddevpop.md) -- [stddevSampStable](../reference/stddevsampstable.md) -- [stddevSamp](../reference/stddevsamp.md) -- [stochasticLinearRegression](../reference/stochasticlinearregression.md) -- [stochasticLogisticRegression](../reference/stochasticlogisticregression.md) -- [studentTTest](../reference/studentttest.md) -- [sumCount](../reference/sumcount.md) -- [sumKahan](../reference/sumkahan.md) -- [sumMapFilteredWithOverflow](../parametric-functions.md/#summapfilteredwithoverflow) -- [sumMapFiltered](../parametric-functions.md/#summapfiltered) -- [sumMapWithOverflow](../reference/summapwithoverflow.md) -- [sumMap](../reference/summap.md) -- [sumWithOverflow](../reference/sumwithoverflow.md) -- [sum](../reference/sum.md) -- [theilsU](../reference/theilsu.md) -- [topKWeighted](../reference/topkweighted.md) -- [topK](../reference/topk.md) -- [uniqCombined64](../reference/uniqcombined64.md) -- [uniqCombined](../reference/uniqcombined.md) -- [uniqExact](../reference/uniqexact.md) -- [uniqHLL12](../reference/uniqhll12.md) -- [uniqTheta](../reference/uniqthetasketch.md) -- [uniq](../reference/uniq.md) -- [varPop](../reference/varpop.md) -- [varSamp](../reference/varsamp.md) -- [welchTTest](../reference/welchttest.md) +ClickHouse supports all standard SQL aggregate functions ([sum](../reference/sum.md), [avg](../reference/avg.md), [min](../reference/min.md), [max](../reference/max.md), [count](../reference/count.md)), as well as a wide range of other aggregate functions. diff --git a/docs/en/sql-reference/data-types/aggregatefunction.md b/docs/en/sql-reference/data-types/aggregatefunction.md index 37f0d0e50ae..4cad27db68b 100644 --- a/docs/en/sql-reference/data-types/aggregatefunction.md +++ b/docs/en/sql-reference/data-types/aggregatefunction.md @@ -6,7 +6,9 @@ sidebar_label: AggregateFunction # AggregateFunction -Aggregate functions can have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md). The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix. To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix. +Aggregate functions have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md). +The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix. +To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix. `AggregateFunction(name, types_of_arguments...)` — parametric data type. diff --git a/docs/en/sql-reference/data-types/index.md b/docs/en/sql-reference/data-types/index.md index 2b89dd145e6..134678f71bb 100644 --- a/docs/en/sql-reference/data-types/index.md +++ b/docs/en/sql-reference/data-types/index.md @@ -6,29 +6,8 @@ sidebar_position: 1 # Data Types in ClickHouse -ClickHouse can store various kinds of data in table cells. This section describes the supported data types and special considerations for using and/or implementing them if any. +This section describes the data types supported by ClickHouse, for example [integers](int-uint.md), [floats](float.md) and [strings](string.md). -:::note -You can check whether a data type name is case-sensitive in the [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) table. -::: - -ClickHouse data types include: - -- **Integer types**: [signed and unsigned integers](./int-uint.md) (`UInt8`, `UInt16`, `UInt32`, `UInt64`, `UInt128`, `UInt256`, `Int8`, `Int16`, `Int32`, `Int64`, `Int128`, `Int256`) -- **Floating-point numbers**: [floats](./float.md)(`Float32` and `Float64`) and [`Decimal` values](./decimal.md) -- **Boolean**: ClickHouse has a [`Boolean` type](./boolean.md) -- **Strings**: [`String`](./string.md) and [`FixedString`](./fixedstring.md) -- **Dates**: use [`Date`](./date.md) and [`Date32`](./date32.md) for days, and [`DateTime`](./datetime.md) and [`DateTime64`](./datetime64.md) for instances in time -- **Object**: the [`Object`](./json.md) stores a JSON document in a single column (deprecated) -- **JSON**: the [`JSON` object](./newjson.md) stores a JSON document in a single column -- **UUID**: a performant option for storing [`UUID` values](./uuid.md) -- **Low cardinality types**: use an [`Enum`](./enum.md) when you have a handful of unique values, or use [`LowCardinality`](./lowcardinality.md) when you have up to 10,000 unique values of a column -- **Arrays**: any column can be defined as an [`Array` of values](./array.md) -- **Maps**: use [`Map`](./map.md) for storing key/value pairs -- **Aggregation function types**: use [`SimpleAggregateFunction`](./simpleaggregatefunction.md) and [`AggregateFunction`](./aggregatefunction.md) for storing the intermediate status of aggregate function results -- **Nested data structures**: A [`Nested` data structure](./nested-data-structures/index.md) is like a table inside a cell -- **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type. -- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column settings its default value for the data type) -- **IP addresses**: use [`IPv4`](./ipv4.md) and [`IPv6`](./ipv6.md) to efficiently store IP addresses -- **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon` -- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md) +System table [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) provides an +overview of all available data types. +It also shows whether a data type is an alias to another data type and its name is case-sensitive (e.g. `bool` vs. `BOOL`). diff --git a/docs/en/sql-reference/data-types/json.md b/docs/en/sql-reference/data-types/json.md index e48b308a620..ce69f15f0fa 100644 --- a/docs/en/sql-reference/data-types/json.md +++ b/docs/en/sql-reference/data-types/json.md @@ -7,7 +7,7 @@ keywords: [object, data type] # Object Data Type (deprecated) -**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864). +**This feature is not production-ready and deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
diff --git a/docs/en/sql-reference/data-types/simpleaggregatefunction.md b/docs/en/sql-reference/data-types/simpleaggregatefunction.md index 4fb74ac30e4..8edd8b5b8ff 100644 --- a/docs/en/sql-reference/data-types/simpleaggregatefunction.md +++ b/docs/en/sql-reference/data-types/simpleaggregatefunction.md @@ -5,7 +5,9 @@ sidebar_label: SimpleAggregateFunction --- # SimpleAggregateFunction -`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data. +`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value (intermediate state) of the aggregate function, but not its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. +This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. +This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data. The common way to produce an aggregate function value is by calling the aggregate function with the [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate) suffix. diff --git a/docs/en/sql-reference/functions/geo/index.md b/docs/en/sql-reference/functions/geo/index.md index d46e60281e2..51b6868611a 100644 --- a/docs/en/sql-reference/functions/geo/index.md +++ b/docs/en/sql-reference/functions/geo/index.md @@ -5,70 +5,4 @@ sidebar_position: 62 title: "Geo Functions" --- - -## Geographical Coordinates Functions - -- [greatCircleDistance](./coordinates.md#greatcircledistance) -- [geoDistance](./coordinates.md#geodistance) -- [greatCircleAngle](./coordinates.md#greatcircleangle) -- [pointInEllipses](./coordinates.md#pointinellipses) -- [pointInPolygon](./coordinates.md#pointinpolygon) - -## Geohash Functions -- [geohashEncode](./geohash.md#geohashencode) -- [geohashDecode](./geohash.md#geohashdecode) -- [geohashesInBox](./geohash.md#geohashesinbox) - -## H3 Indexes Functions - -- [h3IsValid](./h3.md#h3isvalid) -- [h3GetResolution](./h3.md#h3getresolution) -- [h3EdgeAngle](./h3.md#h3edgeangle) -- [h3EdgeLengthM](./h3.md#h3edgelengthm) -- [h3EdgeLengthKm](./h3.md#h3edgelengthkm) -- [geoToH3](./h3.md#geotoh3) -- [h3ToGeo](./h3.md#h3togeo) -- [h3ToGeoBoundary](./h3.md#h3togeoboundary) -- [h3kRing](./h3.md#h3kring) -- [h3GetBaseCell](./h3.md#h3getbasecell) -- [h3HexAreaM2](./h3.md#h3hexaream2) -- [h3HexAreaKm2](./h3.md#h3hexareakm2) -- [h3IndexesAreNeighbors](./h3.md#h3indexesareneighbors) -- [h3ToChildren](./h3.md#h3tochildren) -- [h3ToParent](./h3.md#h3toparent) -- [h3ToString](./h3.md#h3tostring) -- [stringToH3](./h3.md#stringtoh3) -- [h3GetResolution](./h3.md#h3getresolution) -- [h3IsResClassIII](./h3.md#h3isresclassiii) -- [h3IsPentagon](./h3.md#h3ispentagon) -- [h3GetFaces](./h3.md#h3getfaces) -- [h3CellAreaM2](./h3.md#h3cellaream2) -- [h3CellAreaRads2](./h3.md#h3cellarearads2) -- [h3ToCenterChild](./h3.md#h3tocenterchild) -- [h3ExactEdgeLengthM](./h3.md#h3exactedgelengthm) -- [h3ExactEdgeLengthKm](./h3.md#h3exactedgelengthkm) -- [h3ExactEdgeLengthRads](./h3.md#h3exactedgelengthrads) -- [h3NumHexagons](./h3.md#h3numhexagons) -- [h3Line](./h3.md#h3line) -- [h3Distance](./h3.md#h3distance) -- [h3HexRing](./h3.md#h3hexring) -- [h3GetUnidirectionalEdge](./h3.md#h3getunidirectionaledge) -- [h3UnidirectionalEdgeIsValid](./h3.md#h3unidirectionaledgeisvalid) -- [h3GetOriginIndexFromUnidirectionalEdge](./h3.md#h3getoriginindexfromunidirectionaledge) -- [h3GetDestinationIndexFromUnidirectionalEdge](./h3.md#h3getdestinationindexfromunidirectionaledge) -- [h3GetIndexesFromUnidirectionalEdge](./h3.md#h3getindexesfromunidirectionaledge) -- [h3GetUnidirectionalEdgesFromHexagon](./h3.md#h3getunidirectionaledgesfromhexagon) -- [h3GetUnidirectionalEdgeBoundary](./h3.md#h3getunidirectionaledgeboundary) - -## S2 Index Functions - -- [geoToS2](./s2.md#geotos2) -- [s2ToGeo](./s2.md#s2togeo) -- [s2GetNeighbors](./s2.md#s2getneighbors) -- [s2CellsIntersect](./s2.md#s2cellsintersect) -- [s2CapContains](./s2.md#s2capcontains) -- [s2CapUnion](./s2.md#s2capunion) -- [s2RectAdd](./s2.md#s2rectadd) -- [s2RectContains](./s2.md#s2rectcontains) -- [s2RectUnion](./s2.md#s2rectunion) -- [s2RectIntersection](./s2.md#s2rectintersection) +Functions for working with geometric objects, for example [to calculate distances between points on a sphere](./coordinates.md), [compute geohashes](./geohash.md), and work with [h3 indexes](./h3.md). diff --git a/docs/en/sql-reference/statements/create/index.md b/docs/en/sql-reference/statements/create/index.md index fa39526a53e..5854d7cf9d2 100644 --- a/docs/en/sql-reference/statements/create/index.md +++ b/docs/en/sql-reference/statements/create/index.md @@ -6,16 +6,4 @@ sidebar_label: CREATE # CREATE Queries -Create queries make a new entity of one of the following kinds: - -- [DATABASE](/docs/en/sql-reference/statements/create/database.md) -- [TABLE](/docs/en/sql-reference/statements/create/table.md) -- [VIEW](/docs/en/sql-reference/statements/create/view.md) -- [DICTIONARY](/docs/en/sql-reference/statements/create/dictionary.md) -- [FUNCTION](/docs/en/sql-reference/statements/create/function.md) -- [USER](/docs/en/sql-reference/statements/create/user.md) -- [ROLE](/docs/en/sql-reference/statements/create/role.md) -- [ROW POLICY](/docs/en/sql-reference/statements/create/row-policy.md) -- [QUOTA](/docs/en/sql-reference/statements/create/quota.md) -- [SETTINGS PROFILE](/docs/en/sql-reference/statements/create/settings-profile.md) -- [NAMED COLLECTION](/docs/en/sql-reference/statements/create/named-collection.md) +CREATE queries create (for example) new [databases](/docs/en/sql-reference/statements/create/database.md), [tables](/docs/en/sql-reference/statements/create/table.md) and [views](/docs/en/sql-reference/statements/create/view.md). diff --git a/docs/en/sql-reference/statements/index.md b/docs/en/sql-reference/statements/index.md index 5aa61cf8d21..f288b30b27b 100644 --- a/docs/en/sql-reference/statements/index.md +++ b/docs/en/sql-reference/statements/index.md @@ -6,27 +6,4 @@ sidebar_label: List of statements # ClickHouse SQL Statements -Statements represent various kinds of action you can perform using SQL queries. Each kind of statement has it’s own syntax and usage details that are described separately: - -- [SELECT](/docs/en/sql-reference/statements/select/index.md) -- [INSERT INTO](/docs/en/sql-reference/statements/insert-into.md) -- [CREATE](/docs/en/sql-reference/statements/create/index.md) -- [ALTER](/docs/en/sql-reference/statements/alter/index.md) -- [SYSTEM](/docs/en/sql-reference/statements/system.md) -- [SHOW](/docs/en/sql-reference/statements/show.md) -- [GRANT](/docs/en/sql-reference/statements/grant.md) -- [REVOKE](/docs/en/sql-reference/statements/revoke.md) -- [ATTACH](/docs/en/sql-reference/statements/attach.md) -- [CHECK TABLE](/docs/en/sql-reference/statements/check-table.md) -- [DESCRIBE TABLE](/docs/en/sql-reference/statements/describe-table.md) -- [DETACH](/docs/en/sql-reference/statements/detach.md) -- [DROP](/docs/en/sql-reference/statements/drop.md) -- [EXISTS](/docs/en/sql-reference/statements/exists.md) -- [KILL](/docs/en/sql-reference/statements/kill.md) -- [OPTIMIZE](/docs/en/sql-reference/statements/optimize.md) -- [RENAME](/docs/en/sql-reference/statements/rename.md) -- [SET](/docs/en/sql-reference/statements/set.md) -- [SET ROLE](/docs/en/sql-reference/statements/set-role.md) -- [TRUNCATE](/docs/en/sql-reference/statements/truncate.md) -- [USE](/docs/en/sql-reference/statements/use.md) -- [EXPLAIN](/docs/en/sql-reference/statements/explain.md) +Users interact with ClickHouse using SQL statements. ClickHouse supports common SQL statements like [SELECT](select/index.md) and [CREATE](create/index.md), but it also provides specialized statements like [KILL](kill.md) and [OPTIMIZE](optimize.md). From 5aa9e64070cda74b65fa6cb639e2c83cd1abee67 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 11 Nov 2024 10:11:23 +0000 Subject: [PATCH 119/171] Fix spelling --- utils/check-style/aspell-ignore/en/aspell-dict.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index a58b5e9ff58..a0d4d1d349e 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1811,6 +1811,7 @@ geocode geohash geohashDecode geohashEncode +geohashes geohashesInBox geoip geospatial From c4946cf1594c6083c01a39954495aea1be01f574 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mi=D1=81hael=20Stetsyuk?= <59827607+mstetsyuk@users.noreply.github.com> Date: Mon, 4 Nov 2024 10:41:26 +0000 Subject: [PATCH 120/171] style fix --- src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index c73c9f6d048..addaeb65350 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -29,7 +29,6 @@ namespace MergeTreeSetting namespace ErrorCodes { extern const int REPLICA_IS_ALREADY_ACTIVE; - extern const int REPLICA_STATUS_CHANGED; extern const int LOGICAL_ERROR; extern const int SUPPORT_IS_DISABLED; } From 05dfc6dbdba48964cfd147a3635613966da78f0a Mon Sep 17 00:00:00 2001 From: kssenii Date: Mon, 11 Nov 2024 11:53:24 +0100 Subject: [PATCH 121/171] Update settings changes history --- src/Core/SettingsChangesHistory.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index c6223bef2b2..7eb8455a169 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -76,6 +76,7 @@ static std::initializer_list Date: Mon, 11 Nov 2024 13:26:31 +0200 Subject: [PATCH 122/171] Fix typo Fix log message for more clean understanding --- docker/server/entrypoint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/server/entrypoint.sh b/docker/server/entrypoint.sh index 2f87008f2e5..947244dd97f 100755 --- a/docker/server/entrypoint.sh +++ b/docker/server/entrypoint.sh @@ -162,7 +162,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then tries=${CLICKHOUSE_INIT_TIMEOUT:-1000} while ! wget --spider --no-check-certificate -T 1 -q "$URL" 2>/dev/null; do if [ "$tries" -le "0" ]; then - echo >&2 'ClickHouse init process failed.' + echo >&2 'ClickHouse init process timeout.' exit 1 fi tries=$(( tries-1 )) From 33f9e8bc2e5540386e5ccf7fec591eaa1bf5cc24 Mon Sep 17 00:00:00 2001 From: nauu Date: Mon, 11 Nov 2024 20:25:57 +0800 Subject: [PATCH 123/171] fix error --- src/IO/S3/URI.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/IO/S3/URI.cpp b/src/IO/S3/URI.cpp index ad746ff3326..aefe3ff338c 100644 --- a/src/IO/S3/URI.cpp +++ b/src/IO/S3/URI.cpp @@ -117,7 +117,7 @@ URI::URI(const std::string & uri_, bool allow_archive_path_syntax) is_virtual_hosted_style = true; if (name == "oss-data-acc") { - bucket = bucket.substr(0, bucket.find(".")); + bucket = bucket.substr(0, bucket.find('.')); endpoint = uri.getScheme() + "://" + uri.getHost().substr(bucket.length() + 1); } else From 5f0f2628b15988fda3467b50e21be9a149fc9eb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Mon, 11 Nov 2024 13:46:50 +0100 Subject: [PATCH 124/171] Avoid failures on fault injection --- tests/docker_scripts/attach_gdb.lib | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/docker_scripts/attach_gdb.lib b/tests/docker_scripts/attach_gdb.lib index 4170a19176c..f8a08b5e39d 100644 --- a/tests/docker_scripts/attach_gdb.lib +++ b/tests/docker_scripts/attach_gdb.lib @@ -5,7 +5,8 @@ source /repo/tests/docker_scripts/utils.lib function attach_gdb_to_clickhouse() { - IS_ASAN=$(clickhouse-client --query "SELECT count() FROM system.build_options WHERE name = 'CXX_FLAGS' AND position('sanitize=address' IN value)") + # Use retries to avoid failures due to fault injections + IS_ASAN=$(run_with_retry 5 clickhouse-client --query "SELECT count() FROM system.build_options WHERE name = 'CXX_FLAGS' AND position('sanitize=address' IN value)") if [[ "$IS_ASAN" = "1" ]]; then echo "ASAN build detected. Not using gdb since it disables LeakSanitizer detections" From 17f7097d5b66129a3f72f98114cd28575ed839dc Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 11 Nov 2024 13:28:52 +0000 Subject: [PATCH 125/171] Fix CAST from LowCardinality(Nullable) to Dynamic --- src/Functions/FunctionsConversion.cpp | 2 +- ...3261_low_cardinality_nullable_to_dynamic_cast.reference | 2 ++ .../03261_low_cardinality_nullable_to_dynamic_cast.sql | 7 +++++++ 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.reference create mode 100644 tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.sql diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 0f6311c9716..5f1583f6e71 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -4390,7 +4390,7 @@ private: variant_column = IColumn::mutate(column); /// Otherwise we should filter column. else - variant_column = column->filter(filter, variant_size_hint)->assumeMutable(); + variant_column = IColumn::mutate(column->filter(filter, variant_size_hint)); assert_cast(*variant_column).nestedRemoveNullable(); return createVariantFromDescriptorsAndOneNonEmptyVariant(variant_types, std::move(discriminators), std::move(variant_column), variant_discr); diff --git a/tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.reference b/tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.reference new file mode 100644 index 00000000000..96e34d5a44c --- /dev/null +++ b/tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.reference @@ -0,0 +1,2 @@ +\N +\N diff --git a/tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.sql b/tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.sql new file mode 100644 index 00000000000..fdb497a62bf --- /dev/null +++ b/tests/queries/0_stateless/03261_low_cardinality_nullable_to_dynamic_cast.sql @@ -0,0 +1,7 @@ +SET allow_suspicious_low_cardinality_types = 1, allow_experimental_dynamic_type = 1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 LowCardinality(Nullable(Int))) ENGINE = Memory(); +INSERT INTO TABLE t0 (c0) VALUES (NULL); +SELECT c0::Dynamic FROM t0; +SELECT c0 FROM t0; +DROP TABLE t0; From 288756bc9aede92c6d005af34be94973a5d78203 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 11 Nov 2024 13:32:01 +0000 Subject: [PATCH 126/171] Fix for stateful functions. --- .../QueryPlan/BuildQueryPipelineSettings.cpp | 13 ++++++++++++- .../QueryPlan/BuildQueryPipelineSettings.h | 2 ++ src/Processors/QueryPlan/FilterStep.cpp | 12 ++++++++++-- .../queries/0_stateless/03199_merge_filters_bug.sql | 2 +- 4 files changed, 25 insertions(+), 4 deletions(-) diff --git a/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp b/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp index fb3ed7f80fc..ce02ef8b9ba 100644 --- a/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp +++ b/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp @@ -6,12 +6,23 @@ namespace DB { +namespace Setting +{ + extern const SettingsBool query_plan_merge_filters; +} + BuildQueryPipelineSettings BuildQueryPipelineSettings::fromContext(ContextPtr from) { + const auto & query_settings = from->getSettingsRef(); BuildQueryPipelineSettings settings; - settings.actions_settings = ExpressionActionsSettings::fromSettings(from->getSettingsRef(), CompileExpressions::yes); + settings.actions_settings = ExpressionActionsSettings::fromSettings(query_settings, CompileExpressions::yes); settings.process_list_element = from->getProcessListElement(); settings.progress_callback = from->getProgressCallback(); + + /// Setting query_plan_merge_filters is enabled by default. + /// But it can brake short-circuit without splitting fiter step into smaller steps. + /// So, enable and disable this optimizations together. + settings.enable_multiple_filters_transforms_for_and_chain = query_settings[Setting::query_plan_merge_filters]; return settings; } diff --git a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h index d99f9a7d1f1..6219e37db58 100644 --- a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h +++ b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h @@ -17,6 +17,8 @@ using TemporaryFileLookupPtr = std::shared_ptr; struct BuildQueryPipelineSettings { + bool enable_multiple_filters_transforms_for_and_chain = true; + ExpressionActionsSettings actions_settings; QueryStatusPtr process_list_element; ProgressCallback progress_callback = nullptr; diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index 3d56a2352dc..a6b157cdd1d 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -139,7 +139,11 @@ FilterStep::FilterStep( void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings) { - auto and_atoms = splitAndChainIntoMultipleFilters(actions_dag, filter_column_name); + std::vector and_atoms; + + if (settings.enable_multiple_filters_transforms_for_and_chain && !actions_dag.hasStatefulFunctions()) + and_atoms = splitAndChainIntoMultipleFilters(actions_dag, filter_column_name); + for (auto & and_atom : and_atoms) { auto expression = std::make_shared(std::move(and_atom.dag), settings.getActionsSettings()); @@ -178,7 +182,11 @@ void FilterStep::describeActions(FormatSettings & settings) const String prefix(settings.offset, settings.indent_char); auto cloned_dag = actions_dag.clone(); - auto and_atoms = splitAndChainIntoMultipleFilters(cloned_dag, filter_column_name); + + std::vector and_atoms; + if (!actions_dag.hasStatefulFunctions()) + and_atoms = splitAndChainIntoMultipleFilters(cloned_dag, filter_column_name); + for (auto & and_atom : and_atoms) { auto expression = std::make_shared(std::move(and_atom.dag)); diff --git a/tests/queries/0_stateless/03199_merge_filters_bug.sql b/tests/queries/0_stateless/03199_merge_filters_bug.sql index ed2ec2ea217..bb2a4255a3d 100644 --- a/tests/queries/0_stateless/03199_merge_filters_bug.sql +++ b/tests/queries/0_stateless/03199_merge_filters_bug.sql @@ -49,7 +49,7 @@ tmp1 AS fs1 FROM t2 LEFT JOIN tmp1 USING (fs1) - WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 0; + WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 0, query_plan_merge_filters=0; optimize table t1 final; From 8c2e541392e552343431a6b9b411ee55f37e8fe8 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 11 Nov 2024 14:27:48 +0000 Subject: [PATCH 127/171] Avoid using manes in multistage prewhere optimization. --- .../MergeTreeSplitPrewhereIntoReadSteps.cpp | 110 ++++++++++-------- 1 file changed, 60 insertions(+), 50 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 9c82817e8cb..73fe2600946 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include @@ -57,9 +58,9 @@ struct DAGNodeRef const ActionsDAG::Node * node; }; -/// Result name -> DAGNodeRef -using OriginalToNewNodeMap = std::unordered_map; -using NodeNameToLastUsedStepMap = std::unordered_map; +/// Result -> DAGNodeRef +using OriginalToNewNodeMap = std::unordered_map; +using NodeNameToLastUsedStepMap = std::unordered_map; /// Clones the part of original DAG responsible for computing the original_dag_node and adds it to the new DAG. const ActionsDAG::Node & addClonedDAGToDAG( @@ -69,12 +70,12 @@ const ActionsDAG::Node & addClonedDAGToDAG( OriginalToNewNodeMap & node_remap, NodeNameToLastUsedStepMap & node_to_step_map) { - const String & node_name = original_dag_node->result_name; + //const String & node_name = original_dag_node->result_name; /// Look for the node in the map of already known nodes - if (node_remap.contains(node_name)) + if (node_remap.contains(original_dag_node)) { /// If the node is already in the new DAG, return it - const auto & node_ref = node_remap.at(node_name); + const auto & node_ref = node_remap.at(original_dag_node); if (node_ref.dag == new_dag.get()) return *node_ref.node; @@ -83,11 +84,11 @@ const ActionsDAG::Node & addClonedDAGToDAG( { node_ref.dag->addOrReplaceInOutputs(*node_ref.node); const auto & new_node = new_dag->addInput(node_ref.node->result_name, node_ref.node->result_type); - node_remap[node_name] = {new_dag.get(), &new_node}; /// TODO: here we update the node reference. Is it always correct? + node_remap[original_dag_node] = {new_dag.get(), &new_node}; /// TODO: here we update the node reference. Is it always correct? /// Remember the index of the last step which reuses this node. /// We cannot remove this node from the outputs before that step. - node_to_step_map[node_name] = step; + node_to_step_map[original_dag_node] = step; return new_node; } } @@ -96,7 +97,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( if (original_dag_node->type == ActionsDAG::ActionType::INPUT) { const auto & new_node = new_dag->addInput(original_dag_node->result_name, original_dag_node->result_type); - node_remap[node_name] = {new_dag.get(), &new_node}; + node_remap[original_dag_node] = {new_dag.get(), &new_node}; return new_node; } @@ -105,7 +106,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( { const auto & new_node = new_dag->addColumn( ColumnWithTypeAndName(original_dag_node->column, original_dag_node->result_type, original_dag_node->result_name)); - node_remap[node_name] = {new_dag.get(), &new_node}; + node_remap[original_dag_node] = {new_dag.get(), &new_node}; return new_node; } @@ -113,7 +114,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( { const auto & alias_child = addClonedDAGToDAG(step, original_dag_node->children[0], new_dag, node_remap, node_to_step_map); const auto & new_node = new_dag->addAlias(alias_child, original_dag_node->result_name); - node_remap[node_name] = {new_dag.get(), &new_node}; + node_remap[original_dag_node] = {new_dag.get(), &new_node}; return new_node; } @@ -128,7 +129,7 @@ const ActionsDAG::Node & addClonedDAGToDAG( } const auto & new_node = new_dag->addFunction(original_dag_node->function_base, new_children, original_dag_node->result_name); - node_remap[node_name] = {new_dag.get(), &new_node}; + node_remap[original_dag_node] = {new_dag.get(), &new_node}; return new_node; } @@ -138,11 +139,11 @@ const ActionsDAG::Node & addClonedDAGToDAG( const ActionsDAG::Node & addFunction( const ActionsDAGPtr & new_dag, const FunctionOverloadResolverPtr & function, - ActionsDAG::NodeRawConstPtrs children, - OriginalToNewNodeMap & node_remap) + ActionsDAG::NodeRawConstPtrs children) + //OriginalToNewNodeMap & node_remap) { const auto & new_node = new_dag->addFunction(function, children, ""); - node_remap[new_node.result_name] = {new_dag.get(), &new_node}; + //node_remap[new_node.result_name] = {new_dag.get(), &new_node}; return new_node; } @@ -152,14 +153,14 @@ const ActionsDAG::Node & addFunction( const ActionsDAG::Node & addCast( const ActionsDAGPtr & dag, const ActionsDAG::Node & node_to_cast, - const DataTypePtr & to_type, - OriginalToNewNodeMap & node_remap) + const DataTypePtr & to_type) + //[[maybe_unused]] OriginalToNewNodeMap & node_remap) { if (!node_to_cast.result_type->equals(*to_type)) return node_to_cast; const auto & new_node = dag->addCast(node_to_cast, to_type, {}); - node_remap[new_node.result_name] = {dag.get(), &new_node}; + //node_remap[new_node.result_name] = {dag.get(), &new_node}; return new_node; } @@ -169,8 +170,8 @@ const ActionsDAG::Node & addCast( /// 2. makes sure that the result contains only 0 or 1 values even if the source column contains non-boolean values. const ActionsDAG::Node & addAndTrue( const ActionsDAGPtr & dag, - const ActionsDAG::Node & filter_node_to_normalize, - OriginalToNewNodeMap & node_remap) + const ActionsDAG::Node & filter_node_to_normalize) + //OriginalToNewNodeMap & node_remap) { Field const_true_value(true); @@ -181,7 +182,7 @@ const ActionsDAG::Node & addAndTrue( const auto * const_true_node = &dag->addColumn(std::move(const_true_column)); ActionsDAG::NodeRawConstPtrs children = {&filter_node_to_normalize, const_true_node}; FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); - return addFunction(dag, func_builder_and, children, node_remap); + return addFunction(dag, func_builder_and, children); //, node_remap); } } @@ -243,7 +244,11 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction struct Step { ActionsDAGPtr actions; - String column_name; + /// Original condition, in case if we have only one condition, and it was not casted + const ActionsDAG::Node * original_node; + /// Result condition node + const ActionsDAG::Node * result_node; + //String column_name; }; std::vector steps; @@ -254,7 +259,9 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction { const auto & condition_group = condition_groups[step_index]; ActionsDAGPtr step_dag = std::make_unique(); - String result_name; + const ActionsDAG::Node * original_node = nullptr; + const ActionsDAG::Node * result_node; + //String result_name; std::vector new_condition_nodes; for (const auto * node : condition_group) @@ -267,48 +274,47 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction { /// Add AND function to combine the conditions FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); - const auto & and_function_node = addFunction(step_dag, func_builder_and, new_condition_nodes, node_remap); - step_dag->addOrReplaceInOutputs(and_function_node); - result_name = and_function_node.result_name; + const auto & and_function_node = addFunction(step_dag, func_builder_and, new_condition_nodes); //, node_remap); + //step_dag->addOrReplaceInOutputs(and_function_node); + result_node = &and_function_node; } else { - const auto & result_node = *new_condition_nodes.front(); + result_node = new_condition_nodes.front(); /// Check if explicit cast is needed for the condition to serve as a filter. - const auto result_type_name = result_node.result_type->getName(); - if (result_type_name == "UInt8" || - result_type_name == "Nullable(UInt8)" || - result_type_name == "LowCardinality(UInt8)" || - result_type_name == "LowCardinality(Nullable(UInt8))") + //const auto result_type_name = result_node->result_type->getName(); + if (isUInt8(removeNullable(removeLowCardinality(result_node->result_type)))) { /// No need to cast - step_dag->addOrReplaceInOutputs(result_node); - result_name = result_node.result_name; + //step_dag->addOrReplaceInOutputs(result_node); + //result_name = result_node.result_name; } else { /// Build "condition AND True" expression to "cast" the condition to UInt8 or Nullable(UInt8) depending on its type. - const auto & cast_node = addAndTrue(step_dag, result_node, node_remap); - step_dag->addOrReplaceInOutputs(cast_node); - result_name = cast_node.result_name; + result_node = &addAndTrue(step_dag, *result_node); //, node_remap); + //step_dag->addOrReplaceInOutputs(cast_node); + //result_name = &cast_node.result_name; } } - steps.push_back({std::move(step_dag), result_name}); + step_dag->getOutputs().insert(step_dag->getOutputs().begin(), result_node); + steps.push_back({std::move(step_dag), original_node, result_node}); } /// 6. Find all outputs of the original DAG auto original_outputs = prewhere_info->prewhere_actions.getOutputs(); + steps.back().actions->getOutputs().clear(); /// 7. Find all outputs that were computed in the already built DAGs, mark these nodes as outputs in the steps where they were computed /// 8. Add computation of the remaining outputs to the last step with the procedure similar to 4 - NameSet all_output_names; + std::unordered_set all_outputs; for (const auto * output : original_outputs) { - all_output_names.insert(output->result_name); - if (node_remap.contains(output->result_name)) + all_outputs.insert(output); + if (node_remap.contains(output)) //->result_name)) { - const auto & new_node_info = node_remap[output->result_name]; - new_node_info.dag->addOrReplaceInOutputs(*new_node_info.node); + const auto & new_node_info = node_remap[output]; + new_node_info.dag->getOutputs().push_back(new_node_info.node); } else if (output->result_name == prewhere_info->prewhere_column_name) { @@ -319,20 +325,23 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction /// 1. AND the last condition with constant True. This is needed to make sure that in the last step filter has UInt8 type /// but contains values other than 0 and 1 (e.g. if it is (number%5) it contains 2,3,4) /// 2. CAST the result to the exact type of the PREWHERE column from the original DAG - const auto & last_step_result_node_info = node_remap[steps.back().column_name]; + //const auto & last_step_result_node_info = node_remap[steps.back().column_name]; auto & last_step_dag = steps.back().actions; + auto & last_step_result_node = steps.back().result_node; /// Build AND(last_step_result_node, true) - const auto & and_node = addAndTrue(last_step_dag, *last_step_result_node_info.node, node_remap); + const auto & and_node = addAndTrue(last_step_dag, *last_step_result_node); //, node_remap); /// Build CAST(and_node, type of PREWHERE column) - const auto & cast_node = addCast(last_step_dag, and_node, output->result_type, node_remap); + const auto & cast_node = addCast(last_step_dag, and_node, output->result_type); //, node_remap); /// Add alias for the result with the name of the PREWHERE column const auto & prewhere_result_node = last_step_dag->addAlias(cast_node, output->result_name); - last_step_dag->addOrReplaceInOutputs(prewhere_result_node); + //last_step_dag->addOrReplaceInOutputs(prewhere_result_node); + last_step_dag->getOutputs().push_back(&prewhere_result_node); + steps.back().result_node = &prewhere_result_node; } else { const auto & node_in_new_dag = addClonedDAGToDAG(steps.size() - 1, output, steps.back().actions, node_remap, node_to_step); - steps.back().actions->addOrReplaceInOutputs(node_in_new_dag); + steps.back().actions->getOutputs().push_back(&node_in_new_dag); } } @@ -345,10 +354,10 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction { .type = PrewhereExprStep::Filter, .actions = std::make_shared(std::move(*step.actions), actions_settings), - .filter_column_name = step.column_name, + .filter_column_name = step.result_node->result_name, /// Don't remove if it's in the list of original outputs .remove_filter_column = - !all_output_names.contains(step.column_name) && node_to_step[step.column_name] <= step_index, + step.original_node && !all_outputs.contains(step.original_node) && node_to_step[step.original_node] <= step_index, .need_filter = false, .perform_alter_conversions = true, }; @@ -356,6 +365,7 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction prewhere.steps.push_back(std::make_shared(std::move(new_step))); } + prewhere.steps.back()->remove_filter_column = prewhere_info->remove_prewhere_column; prewhere.steps.back()->need_filter = prewhere_info->need_filter; } From bcab2d51aa47f66d88ecc1c17463e2754260826d Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Mon, 11 Nov 2024 15:58:06 +0100 Subject: [PATCH 128/171] Use get_parameter_from_ssm in ci_buddy --- tests/ci/ci_buddy.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/tests/ci/ci_buddy.py b/tests/ci/ci_buddy.py index 164af72f4be..07b748180cd 100644 --- a/tests/ci/ci_buddy.py +++ b/tests/ci/ci_buddy.py @@ -3,14 +3,13 @@ import json import os from typing import Dict, List, Union -import boto3 import requests from botocore.exceptions import ClientError from ci_config import CI from ci_utils import WithIter from commit_status_helper import get_commit_filtered_statuses, get_repo -from get_robot_token import get_best_robot_token +from get_robot_token import get_best_robot_token, get_parameter_from_ssm from github_helper import GitHub from pr_info import PRInfo @@ -89,15 +88,9 @@ class CIBuddy: def _get_webhooks(): name = "ci_buddy_web_hooks" - session = boto3.Session(region_name="us-east-1") # Replace with your region - ssm_client = session.client("ssm") json_string = None try: - response = ssm_client.get_parameter( - Name=name, - WithDecryption=True, # Set to True if the parameter is a SecureString - ) - json_string = response["Parameter"]["Value"] + json_string = get_parameter_from_ssm(name, decrypt=True) except ClientError as e: print(f"An error occurred: {e}") From 5c5016218b77cf77323c126bc064d90601beadef Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 11 Nov 2024 15:05:53 +0000 Subject: [PATCH 129/171] Fixing style. --- src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp b/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp index ce02ef8b9ba..1832cc2ad42 100644 --- a/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp +++ b/src/Processors/QueryPlan/BuildQueryPipelineSettings.cpp @@ -20,7 +20,7 @@ BuildQueryPipelineSettings BuildQueryPipelineSettings::fromContext(ContextPtr fr settings.progress_callback = from->getProgressCallback(); /// Setting query_plan_merge_filters is enabled by default. - /// But it can brake short-circuit without splitting fiter step into smaller steps. + /// But it can brake short-circuit without splitting filter step into smaller steps. /// So, enable and disable this optimizations together. settings.enable_multiple_filters_transforms_for_and_chain = query_settings[Setting::query_plan_merge_filters]; return settings; From b7d80728190f1e56de3739186543afb575cf2063 Mon Sep 17 00:00:00 2001 From: Vitaly Baranov Date: Mon, 11 Nov 2024 16:06:17 +0100 Subject: [PATCH 130/171] Add waiting for prometheus instances to start before running test "test_prometheus_protocols". --- tests/integration/helpers/cluster.py | 25 ++++++++++++++++++- .../test_prometheus_protocols/test.py | 4 +-- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index b24593602ec..a0c2e1d1a70 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -744,11 +744,13 @@ class ClickHouseCluster: # available when with_prometheus == True self.with_prometheus = False self.prometheus_writer_host = "prometheus_writer" + self.prometheus_writer_ip = None self.prometheus_writer_port = 9090 self.prometheus_writer_logs_dir = p.abspath( p.join(self.instances_dir, "prometheus_writer/logs") ) self.prometheus_reader_host = "prometheus_reader" + self.prometheus_reader_ip = None self.prometheus_reader_port = 9091 self.prometheus_reader_logs_dir = p.abspath( p.join(self.instances_dir, "prometheus_reader/logs") @@ -2728,6 +2730,16 @@ class ClickHouseCluster: raise Exception("Can't wait LDAP to start") + def wait_prometheus_to_start(self): + self.prometheus_reader_ip = self.get_instance_ip(self.prometheus_reader_host) + self.prometheus_writer_ip = self.get_instance_ip(self.prometheus_writer_host) + self.wait_for_url( + f"http://{self.prometheus_reader_ip}:{self.prometheus_reader_port}/api/v1/query?query=time()" + ) + self.wait_for_url( + f"http://{self.prometheus_writer_ip}:{self.prometheus_writer_port}/api/v1/query?query=time()" + ) + def start(self): pytest_xdist_logging_to_separate_files.setup() logging.info("Running tests in {}".format(self.base_path)) @@ -3083,12 +3095,23 @@ class ClickHouseCluster: f"http://{self.jdbc_bridge_ip}:{self.jdbc_bridge_port}/ping" ) - if self.with_prometheus: + if self.with_prometheus and self.base_prometheus_cmd: os.makedirs(self.prometheus_writer_logs_dir) os.chmod(self.prometheus_writer_logs_dir, stat.S_IRWXU | stat.S_IRWXO) os.makedirs(self.prometheus_reader_logs_dir) os.chmod(self.prometheus_reader_logs_dir, stat.S_IRWXU | stat.S_IRWXO) + prometheus_start_cmd = self.base_prometheus_cmd + common_opts + + logging.info( + "Trying to create Prometheus instances by command %s", + " ".join(map(str, prometheus_start_cmd)), + ) + run_and_check(prometheus_start_cmd) + self.up_called = True + logging.info("Trying to connect to Prometheus...") + self.wait_prometheus_to_start() + clickhouse_start_cmd = self.base_cmd + ["up", "-d", "--no-recreate"] logging.debug( ( diff --git a/tests/integration/test_prometheus_protocols/test.py b/tests/integration/test_prometheus_protocols/test.py index e368c841c4e..49bc7817f02 100644 --- a/tests/integration/test_prometheus_protocols/test.py +++ b/tests/integration/test_prometheus_protocols/test.py @@ -20,7 +20,7 @@ node = cluster.add_instance( def execute_query_on_prometheus_writer(query, timestamp): return execute_query_impl( - cluster.get_instance_ip(cluster.prometheus_writer_host), + cluster.prometheus_writer_ip, cluster.prometheus_writer_port, "/api/v1/query", query, @@ -30,7 +30,7 @@ def execute_query_on_prometheus_writer(query, timestamp): def execute_query_on_prometheus_reader(query, timestamp): return execute_query_impl( - cluster.get_instance_ip(cluster.prometheus_reader_host), + cluster.prometheus_reader_ip, cluster.prometheus_reader_port, "/api/v1/query", query, From 0bdf4402fea83e1cb96b0040323c3247f5dcb0b5 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Mon, 11 Nov 2024 15:58:38 +0100 Subject: [PATCH 131/171] Post critical errors from cherry_pick.py --- tests/ci/cherry_pick.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/ci/cherry_pick.py b/tests/ci/cherry_pick.py index 9bdc184f661..ca32d5bc24c 100644 --- a/tests/ci/cherry_pick.py +++ b/tests/ci/cherry_pick.py @@ -34,8 +34,9 @@ from typing import List, Optional import __main__ +from ci_buddy import CIBuddy from ci_config import Labels -from env_helper import TEMP_PATH +from env_helper import IS_CI, TEMP_PATH from get_robot_token import get_best_robot_token from git_helper import GIT_PREFIX, git_runner, is_shallow from github_helper import GitHub, PullRequest, PullRequests, Repository @@ -653,6 +654,14 @@ def main(): bp.process_backports() if bp.error is not None: logging.error("Finished successfully, but errors occurred!") + if IS_CI: + ci_buddy = CIBuddy() + ci_buddy.post_job_error( + f"The cherry-pick finished with errors: {bp.error}", + with_instance_info=True, + with_wf_link=True, + critical=True, + ) raise bp.error From 40c4183ae70c720aaca797b165e3cf71aa4d8133 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Mon, 11 Nov 2024 17:26:28 +0100 Subject: [PATCH 132/171] fix tidy build --- src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h | 10 +++++----- src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp | 4 ++-- src/Disks/ObjectStorages/Local/LocalObjectStorage.h | 8 ++++---- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h index 317399b4753..7d6c914c398 100644 --- a/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h +++ b/src/Disks/ObjectStorages/HDFS/HDFSObjectStorage.h @@ -77,11 +77,6 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - /// Remove file. Throws exception if file doesn't exists or it's a directory. - void removeObject(const StoredObject & object); - - void removeObjects(const StoredObjects & objects); - void removeObjectIfExists(const StoredObject & object) override; void removeObjectsIfExist(const StoredObjects & objects) override; @@ -117,6 +112,11 @@ private: void initializeHDFSFS() const; std::string extractObjectKeyFromURL(const StoredObject & object) const; + /// Remove file. Throws exception if file doesn't exists or it's a directory. + void removeObject(const StoredObject & object); + + void removeObjects(const StoredObjects & objects); + const Poco::Util::AbstractConfiguration & config; mutable HDFSBuilderWrapper hdfs_builder; diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp index 5f1b6aedc72..f24501dc60e 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.cpp @@ -81,7 +81,7 @@ std::unique_ptr LocalObjectStorage::writeObject( /// NO return std::make_unique(object.remote_path, buf_size); } -void LocalObjectStorage::removeObject(const StoredObject & object) +void LocalObjectStorage::removeObject(const StoredObject & object) const { /// For local object storage files are actually removed when "metadata" is removed. if (!exists(object)) @@ -91,7 +91,7 @@ void LocalObjectStorage::removeObject(const StoredObject & object) ErrnoException::throwFromPath(ErrorCodes::CANNOT_UNLINK, object.remote_path, "Cannot unlink file {}", object.remote_path); } -void LocalObjectStorage::removeObjects(const StoredObjects & objects) +void LocalObjectStorage::removeObjects(const StoredObjects & objects) const { for (const auto & object : objects) removeObject(object); diff --git a/src/Disks/ObjectStorages/Local/LocalObjectStorage.h b/src/Disks/ObjectStorages/Local/LocalObjectStorage.h index ffc151bda04..5b3c3951364 100644 --- a/src/Disks/ObjectStorages/Local/LocalObjectStorage.h +++ b/src/Disks/ObjectStorages/Local/LocalObjectStorage.h @@ -42,10 +42,6 @@ public: size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE, const WriteSettings & write_settings = {}) override; - void removeObject(const StoredObject & object); - - void removeObjects(const StoredObjects & objects); - void removeObjectIfExists(const StoredObject & object) override; void removeObjectsIfExist(const StoredObjects & objects) override; @@ -82,6 +78,10 @@ public: ReadSettings patchSettings(const ReadSettings & read_settings) const override; private: + void removeObject(const StoredObject & object) const; + + void removeObjects(const StoredObjects & objects) const; + String key_prefix; LoggerPtr log; std::string description; From 6f00b490679f9e26159105f095660f6b23ea34c2 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 11 Nov 2024 16:41:23 +0000 Subject: [PATCH 133/171] Fixing more tests. --- .../MergeTreeSplitPrewhereIntoReadSteps.cpp | 2 +- ...filter_push_down_equivalent_sets.reference | 68 +++++++++++-------- .../0_stateless/03199_merge_filters_bug.sql | 34 +++++++++- 3 files changed, 74 insertions(+), 30 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 73fe2600946..2af9974c870 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -358,7 +358,7 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction /// Don't remove if it's in the list of original outputs .remove_filter_column = step.original_node && !all_outputs.contains(step.original_node) && node_to_step[step.original_node] <= step_index, - .need_filter = false, + .need_filter = true, .perform_alter_conversions = true, }; diff --git a/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.reference b/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.reference index 80f4e309505..d0a3e7b02ae 100644 --- a/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.reference +++ b/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.reference @@ -163,17 +163,21 @@ Positions: 4 2 0 1 Filter (( + (JOIN actions + Change column names to column identifiers))) Header: __table1.id UInt64 __table1.value String - Filter column: and(equals(__table1.id, 5_UInt8), equals(__table1.id, 6_UInt8)) (removed) + AND column: equals(__table1.id, 5_UInt8) Actions: INPUT : 0 -> id UInt64 : 0 - INPUT : 1 -> value String : 1 + COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 1 + FUNCTION equals(id : 0, 5_UInt8 :: 1) -> equals(__table1.id, 5_UInt8) UInt8 : 2 + Positions: 2 0 2 + Filter column: and(equals(__table1.id, 5_UInt8), equals(__table1.id, 6_UInt8)) (removed) + Actions: INPUT : 2 -> value String : 0 + INPUT : 1 -> id UInt64 : 1 COLUMN Const(UInt8) -> 6_UInt8 UInt8 : 2 - COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 3 - ALIAS id : 0 -> __table1.id UInt64 : 4 - ALIAS value :: 1 -> __table1.value String : 5 - FUNCTION equals(id : 0, 6_UInt8 :: 2) -> equals(__table1.id, 6_UInt8) UInt8 : 1 - FUNCTION equals(id :: 0, 5_UInt8 :: 3) -> equals(__table1.id, 5_UInt8) UInt8 : 2 - FUNCTION and(equals(__table1.id, 5_UInt8) :: 2, equals(__table1.id, 6_UInt8) :: 1) -> and(equals(__table1.id, 5_UInt8), equals(__table1.id, 6_UInt8)) UInt8 : 3 - Positions: 3 4 5 + INPUT : 0 -> equals(__table1.id, 5_UInt8) UInt8 : 3 + ALIAS value :: 0 -> __table1.value String : 4 + ALIAS id : 1 -> __table1.id UInt64 : 0 + FUNCTION equals(id :: 1, 6_UInt8 :: 2) -> equals(__table1.id, 6_UInt8) UInt8 : 5 + FUNCTION and(equals(__table1.id, 5_UInt8) :: 3, equals(__table1.id, 6_UInt8) :: 5) -> and(equals(__table1.id, 5_UInt8), equals(__table1.id, 6_UInt8)) UInt8 : 2 + Positions: 2 0 4 ReadFromMergeTree (default.test_table_1) Header: id UInt64 value String @@ -183,17 +187,21 @@ Positions: 4 2 0 1 Filter (( + (JOIN actions + Change column names to column identifiers))) Header: __table2.id UInt64 __table2.value String - Filter column: and(equals(__table2.id, 6_UInt8), equals(__table2.id, 5_UInt8)) (removed) + AND column: equals(__table2.id, 6_UInt8) Actions: INPUT : 0 -> id UInt64 : 0 - INPUT : 1 -> value String : 1 + COLUMN Const(UInt8) -> 6_UInt8 UInt8 : 1 + FUNCTION equals(id : 0, 6_UInt8 :: 1) -> equals(__table2.id, 6_UInt8) UInt8 : 2 + Positions: 2 0 2 + Filter column: and(equals(__table2.id, 6_UInt8), equals(__table2.id, 5_UInt8)) (removed) + Actions: INPUT : 2 -> value String : 0 + INPUT : 1 -> id UInt64 : 1 COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 2 - COLUMN Const(UInt8) -> 6_UInt8 UInt8 : 3 - ALIAS id : 0 -> __table2.id UInt64 : 4 - ALIAS value :: 1 -> __table2.value String : 5 - FUNCTION equals(id : 0, 5_UInt8 :: 2) -> equals(__table2.id, 5_UInt8) UInt8 : 1 - FUNCTION equals(id :: 0, 6_UInt8 :: 3) -> equals(__table2.id, 6_UInt8) UInt8 : 2 - FUNCTION and(equals(__table2.id, 6_UInt8) :: 2, equals(__table2.id, 5_UInt8) :: 1) -> and(equals(__table2.id, 6_UInt8), equals(__table2.id, 5_UInt8)) UInt8 : 3 - Positions: 3 4 5 + INPUT : 0 -> equals(__table2.id, 6_UInt8) UInt8 : 3 + ALIAS value :: 0 -> __table2.value String : 4 + ALIAS id : 1 -> __table2.id UInt64 : 0 + FUNCTION equals(id :: 1, 5_UInt8 :: 2) -> equals(__table2.id, 5_UInt8) UInt8 : 5 + FUNCTION and(equals(__table2.id, 6_UInt8) :: 3, equals(__table2.id, 5_UInt8) :: 5) -> and(equals(__table2.id, 6_UInt8), equals(__table2.id, 5_UInt8)) UInt8 : 2 + Positions: 2 0 4 ReadFromMergeTree (default.test_table_2) Header: id UInt64 value String @@ -656,17 +664,21 @@ Positions: 4 2 0 1 __table1.value String __table2.value String __table2.id UInt64 - Filter column: and(equals(__table1.id, 5_UInt8), equals(__table2.id, 6_UInt8)) (removed) + AND column: equals(__table1.id, 5_UInt8) Actions: INPUT : 0 -> __table1.id UInt64 : 0 - INPUT :: 1 -> __table1.value String : 1 - INPUT :: 2 -> __table2.value String : 2 - INPUT : 3 -> __table2.id UInt64 : 3 - COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 4 - COLUMN Const(UInt8) -> 6_UInt8 UInt8 : 5 - FUNCTION equals(__table1.id : 0, 5_UInt8 :: 4) -> equals(__table1.id, 5_UInt8) UInt8 : 6 - FUNCTION equals(__table2.id : 3, 6_UInt8 :: 5) -> equals(__table2.id, 6_UInt8) UInt8 : 4 - FUNCTION and(equals(__table1.id, 5_UInt8) :: 6, equals(__table2.id, 6_UInt8) :: 4) -> and(equals(__table1.id, 5_UInt8), equals(__table2.id, 6_UInt8)) UInt8 : 5 - Positions: 5 0 1 2 3 + COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 1 + FUNCTION equals(__table1.id : 0, 5_UInt8 :: 1) -> equals(__table1.id, 5_UInt8) UInt8 : 2 + Positions: 2 0 2 + Filter column: and(equals(__table1.id, 5_UInt8), equals(__table2.id, 6_UInt8)) (removed) + Actions: INPUT :: 1 -> __table1.id UInt64 : 0 + INPUT :: 2 -> __table1.value String : 1 + INPUT :: 3 -> __table2.value String : 2 + INPUT : 4 -> __table2.id UInt64 : 3 + COLUMN Const(UInt8) -> 6_UInt8 UInt8 : 4 + INPUT : 0 -> equals(__table1.id, 5_UInt8) UInt8 : 5 + FUNCTION equals(__table2.id : 3, 6_UInt8 :: 4) -> equals(__table2.id, 6_UInt8) UInt8 : 6 + FUNCTION and(equals(__table1.id, 5_UInt8) :: 5, equals(__table2.id, 6_UInt8) :: 6) -> and(equals(__table1.id, 5_UInt8), equals(__table2.id, 6_UInt8)) UInt8 : 4 + Positions: 4 0 1 2 3 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String diff --git a/tests/queries/0_stateless/03199_merge_filters_bug.sql b/tests/queries/0_stateless/03199_merge_filters_bug.sql index bb2a4255a3d..2023e0f1d73 100644 --- a/tests/queries/0_stateless/03199_merge_filters_bug.sql +++ b/tests/queries/0_stateless/03199_merge_filters_bug.sql @@ -51,6 +51,22 @@ tmp1 AS LEFT JOIN tmp1 USING (fs1) WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 0, query_plan_merge_filters=0; +WITH +tmp1 AS +( + SELECT + CAST(s1, 'FixedString(10)') AS fs1, + s2 AS sector, + s3 + FROM t1 + WHERE (s3 != 'test') +) + SELECT + fs1 + FROM t2 + LEFT JOIN tmp1 USING (fs1) + WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 1, query_plan_merge_filters=1; + optimize table t1 final; WITH @@ -67,4 +83,20 @@ tmp1 AS fs1 FROM t2 LEFT JOIN tmp1 USING (fs1) - WHERE (fs1 IN ('test')); + WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 0, query_plan_merge_filters=0; + +WITH +tmp1 AS +( + SELECT + CAST(s1, 'FixedString(10)') AS fs1, + s2 AS sector, + s3 + FROM t1 + WHERE (s3 != 'test') +) + SELECT + fs1 + FROM t2 + LEFT JOIN tmp1 USING (fs1) + WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 1, query_plan_merge_filters=1; From a0cc03b175b035e9c52e782811d990a619acc272 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 11 Nov 2024 17:50:11 +0000 Subject: [PATCH 134/171] Cleanup. --- src/Processors/QueryPlan/FilterStep.cpp | 17 ++++++++ .../QueryPlan/ReadFromMergeTree.cpp | 2 + .../MergeTree/MergeTreeBlockReadUtils.cpp | 2 +- src/Storages/MergeTree/MergeTreeIOSettings.h | 2 + .../MergeTree/MergeTreeSelectProcessor.cpp | 8 ++-- .../MergeTree/MergeTreeSelectProcessor.h | 3 +- .../MergeTreeSplitPrewhereIntoReadSteps.cpp | 41 +++++++++++-------- 7 files changed, 51 insertions(+), 24 deletions(-) diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index a6b157cdd1d..5bf55f67208 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -64,6 +64,7 @@ static ActionsAndName splitSingleAndFilter(ActionsDAG & dag, const ActionsDAG::N return ActionsAndName{std::move(split_result.first), std::move(name)}; } +/// Try to split the left most AND atom to a separate DAG. static std::optional trySplitSingleAndFilter(ActionsDAG & dag, const std::string & filter_name) { const auto * filter = &dag.findInOutputs(filter_name); @@ -83,6 +84,7 @@ static std::optional trySplitSingleAndFilter(ActionsDAG & dag, c if (node->type == ActionsDAG::ActionType::FUNCTION && node->function_base->getName() == "and") { + /// The order is important. We should take the left-most atom, so put conditions on stack in reverse order. for (const auto * child : node->children | std::ranges::views::reverse) nodes.push(child); @@ -141,6 +143,8 @@ void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQ { std::vector and_atoms; + /// Spliting AND filter condition to steps under the setting, which is enabled with merge_filters optimization. + /// This is needed to support short-circuit properly. if (settings.enable_multiple_filters_transforms_for_and_chain && !actions_dag.hasStatefulFunctions()) and_atoms = splitAndChainIntoMultipleFilters(actions_dag, filter_column_name); @@ -206,6 +210,19 @@ void FilterStep::describeActions(FormatSettings & settings) const void FilterStep::describeActions(JSONBuilder::JSONMap & map) const { + auto cloned_dag = actions_dag.clone(); + + std::vector and_atoms; + if (!actions_dag.hasStatefulFunctions()) + and_atoms = splitAndChainIntoMultipleFilters(cloned_dag, filter_column_name); + + for (auto & and_atom : and_atoms) + { + auto expression = std::make_shared(std::move(and_atom.dag)); + map.add("AND column", and_atom.name); + map.add("Expression", expression->toTree()); + } + map.add("Filter Column", filter_column_name); map.add("Removes Filter", remove_filter_column); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 3186df6a6b3..d144187821a 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -175,6 +175,7 @@ namespace Setting extern const SettingsBool use_skip_indexes; extern const SettingsBool use_skip_indexes_if_final; extern const SettingsBool use_uncompressed_cache; + extern const SettingsBool query_plan_merge_filters; extern const SettingsUInt64 merge_tree_min_read_task_size; } @@ -206,6 +207,7 @@ static MergeTreeReaderSettings getMergeTreeReaderSettings( .use_asynchronous_read_from_pool = settings[Setting::allow_asynchronous_read_from_io_pool_for_merge_tree] && (settings[Setting::max_streams_to_max_threads_ratio] > 1 || settings[Setting::max_streams_for_merge_tree_reading] > 1), .enable_multiple_prewhere_read_steps = settings[Setting::enable_multiple_prewhere_read_steps], + .force_shirt_circuit_execution = settings[Setting::query_plan_merge_filters] }; } diff --git a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp index 7ba358d2d35..03a0aed80bf 100644 --- a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp +++ b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp @@ -330,7 +330,7 @@ MergeTreeReadTaskColumns getReadTaskColumns( auto prewhere_actions = MergeTreeSelectProcessor::getPrewhereActions( prewhere_info, actions_settings, - reader_settings.enable_multiple_prewhere_read_steps); + reader_settings.enable_multiple_prewhere_read_steps, reader_settings.force_shirt_circuit_execution); for (const auto & step : prewhere_actions.steps) add_step(*step); diff --git a/src/Storages/MergeTree/MergeTreeIOSettings.h b/src/Storages/MergeTree/MergeTreeIOSettings.h index 4d1d2533729..ecd4ad34961 100644 --- a/src/Storages/MergeTree/MergeTreeIOSettings.h +++ b/src/Storages/MergeTree/MergeTreeIOSettings.h @@ -45,6 +45,8 @@ struct MergeTreeReaderSettings bool use_asynchronous_read_from_pool = false; /// If PREWHERE has multiple conditions combined with AND, execute them in separate read/filtering steps. bool enable_multiple_prewhere_read_steps = false; + /// In case of multiple prewhere steps, execute filtering earlier to support short-circuit properly. + bool force_shirt_circuit_execution = false; /// If true, try to lower size of read buffer according to granule size and compressed block size. bool adjust_read_buffer_size = true; /// If true, it's allowed to read the whole part without reading marks. diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 5efd33ce09a..8beff55e698 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -91,7 +91,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( , algorithm(std::move(algorithm_)) , prewhere_info(prewhere_info_) , actions_settings(actions_settings_) - , prewhere_actions(getPrewhereActions(prewhere_info, actions_settings, reader_settings_.enable_multiple_prewhere_read_steps)) + , prewhere_actions(getPrewhereActions(prewhere_info, actions_settings, reader_settings_.enable_multiple_prewhere_read_steps, reader_settings_.force_shirt_circuit_execution)) , reader_settings(reader_settings_) , result_header(transformHeader(pool->getHeader(), prewhere_info)) { @@ -124,9 +124,9 @@ String MergeTreeSelectProcessor::getName() const return fmt::format("MergeTreeSelect(pool: {}, algorithm: {})", pool->getName(), algorithm->getName()); } -bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, PrewhereExprInfo & prewhere); +bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, PrewhereExprInfo & prewhere, bool force_shirt_circuit_execution); -PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, bool enable_multiple_prewhere_read_steps) +PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, bool enable_multiple_prewhere_read_steps, bool force_shirt_circuit_execution) { PrewhereExprInfo prewhere_actions; if (prewhere_info) @@ -147,7 +147,7 @@ PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr pr } if (!enable_multiple_prewhere_read_steps || - !tryBuildPrewhereSteps(prewhere_info, actions_settings, prewhere_actions)) + !tryBuildPrewhereSteps(prewhere_info, actions_settings, prewhere_actions, force_shirt_circuit_execution)) { PrewhereExprStep prewhere_step { diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index 33069a78e33..afd88116e15 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -73,7 +73,8 @@ public: static PrewhereExprInfo getPrewhereActions( PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, - bool enable_multiple_prewhere_read_steps); + bool enable_multiple_prewhere_read_steps, + bool force_shirt_circuit_execution); void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 2af9974c870..c35e356bf18 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -50,6 +50,17 @@ void fillRequiredColumns(const ActionsDAG::Node * node, std::unordered_map DAGNodeRef +/// ResultNode -> DAGNodeRef using OriginalToNewNodeMap = std::unordered_map; using NodeNameToLastUsedStepMap = std::unordered_map; @@ -70,7 +81,6 @@ const ActionsDAG::Node & addClonedDAGToDAG( OriginalToNewNodeMap & node_remap, NodeNameToLastUsedStepMap & node_to_step_map) { - //const String & node_name = original_dag_node->result_name; /// Look for the node in the map of already known nodes if (node_remap.contains(original_dag_node)) { @@ -82,9 +92,11 @@ const ActionsDAG::Node & addClonedDAGToDAG( /// If the node is known from the previous steps, add it as an input, except for constants if (original_dag_node->type != ActionsDAG::ActionType::COLUMN) { - node_ref.dag->addOrReplaceInOutputs(*node_ref.node); + // addToOutputsIfNotAlreadyAdded(*node_ref.dag, node_ref.node); + node_ref.dag->getOutputs().push_back(node_ref.node); + const auto & new_node = new_dag->addInput(node_ref.node->result_name, node_ref.node->result_type); - node_remap[original_dag_node] = {new_dag.get(), &new_node}; /// TODO: here we update the node reference. Is it always correct? + node_remap[original_dag_node] = {new_dag.get(), &new_node}; /// Remember the index of the last step which reuses this node. /// We cannot remove this node from the outputs before that step. @@ -207,7 +219,11 @@ const ActionsDAG::Node & addAndTrue( /// 6. Find all outputs of the original DAG /// 7. Find all outputs that were computed in the already built DAGs, mark these nodes as outputs in the steps where they were computed /// 8. Add computation of the remaining outputs to the last step with the procedure similar to 4 -bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, PrewhereExprInfo & prewhere) +bool tryBuildPrewhereSteps( + PrewhereInfoPtr prewhere_info, + const ExpressionActionsSettings & actions_settings, + PrewhereExprInfo & prewhere, + bool force_shirt_circuit_execution) { if (!prewhere_info) return true; @@ -275,26 +291,16 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction /// Add AND function to combine the conditions FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); const auto & and_function_node = addFunction(step_dag, func_builder_and, new_condition_nodes); //, node_remap); - //step_dag->addOrReplaceInOutputs(and_function_node); result_node = &and_function_node; } else { result_node = new_condition_nodes.front(); /// Check if explicit cast is needed for the condition to serve as a filter. - //const auto result_type_name = result_node->result_type->getName(); - if (isUInt8(removeNullable(removeLowCardinality(result_node->result_type)))) - { - /// No need to cast - //step_dag->addOrReplaceInOutputs(result_node); - //result_name = result_node.result_name; - } - else + if (!isUInt8(removeNullable(removeLowCardinality(result_node->result_type)))) { /// Build "condition AND True" expression to "cast" the condition to UInt8 or Nullable(UInt8) depending on its type. result_node = &addAndTrue(step_dag, *result_node); //, node_remap); - //step_dag->addOrReplaceInOutputs(cast_node); - //result_name = &cast_node.result_name; } } @@ -334,7 +340,6 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction const auto & cast_node = addCast(last_step_dag, and_node, output->result_type); //, node_remap); /// Add alias for the result with the name of the PREWHERE column const auto & prewhere_result_node = last_step_dag->addAlias(cast_node, output->result_name); - //last_step_dag->addOrReplaceInOutputs(prewhere_result_node); last_step_dag->getOutputs().push_back(&prewhere_result_node); steps.back().result_node = &prewhere_result_node; } @@ -358,7 +363,7 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction /// Don't remove if it's in the list of original outputs .remove_filter_column = step.original_node && !all_outputs.contains(step.original_node) && node_to_step[step.original_node] <= step_index, - .need_filter = true, + .need_filter = force_shirt_circuit_execution, .perform_alter_conversions = true, }; From 92114f3c749bb78811ece644123b3d81e011e56f Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 11 Nov 2024 18:01:24 +0000 Subject: [PATCH 135/171] Fixing typos. --- src/Processors/QueryPlan/FilterStep.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/FilterStep.cpp b/src/Processors/QueryPlan/FilterStep.cpp index 5bf55f67208..af9e3f0c515 100644 --- a/src/Processors/QueryPlan/FilterStep.cpp +++ b/src/Processors/QueryPlan/FilterStep.cpp @@ -143,7 +143,7 @@ void FilterStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQ { std::vector and_atoms; - /// Spliting AND filter condition to steps under the setting, which is enabled with merge_filters optimization. + /// Splitting AND filter condition to steps under the setting, which is enabled with merge_filters optimization. /// This is needed to support short-circuit properly. if (settings.enable_multiple_filters_transforms_for_and_chain && !actions_dag.hasStatefulFunctions()) and_atoms = splitAndChainIntoMultipleFilters(actions_dag, filter_column_name); From 621cb60446cb17f0366f49b86c3432eed5db3716 Mon Sep 17 00:00:00 2001 From: Peter Nguyen Date: Mon, 11 Nov 2024 11:12:01 -0800 Subject: [PATCH 136/171] Fix 'was was' typo in sql-reference/statements/alter/column.md --- docs/en/sql-reference/statements/alter/column.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/alter/column.md b/docs/en/sql-reference/statements/alter/column.md index 29df041ccc6..fb16dacb7c8 100644 --- a/docs/en/sql-reference/statements/alter/column.md +++ b/docs/en/sql-reference/statements/alter/column.md @@ -279,7 +279,7 @@ For columns with a new or updated `MATERIALIZED` value expression, all existing For columns with a new or updated `DEFAULT` value expression, the behavior depends on the ClickHouse version: - In ClickHouse < v24.2, all existing rows are rewritten. -- ClickHouse >= v24.2 distinguishes if a row value in a column with `DEFAULT` value expression was explicitly specified when it was inserted, or not, i.e. calculated from the `DEFAULT` value expression. If the value was explicitly specified, ClickHouse keeps it as is. If the value was was calculated, ClickHouse changes it to the new or updated `MATERIALIZED` value expression. +- ClickHouse >= v24.2 distinguishes if a row value in a column with `DEFAULT` value expression was explicitly specified when it was inserted, or not, i.e. calculated from the `DEFAULT` value expression. If the value was explicitly specified, ClickHouse keeps it as is. If the value was calculated, ClickHouse changes it to the new or updated `MATERIALIZED` value expression. Syntax: From b05d3ed6df35b2e66c81bc8d7b9077a82758dcf1 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 11 Nov 2024 22:43:03 +0100 Subject: [PATCH 137/171] impl --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 3 + .../ExecuteScalarSubqueriesVisitor.cpp | 7 ++- src/Interpreters/PreparedSets.cpp | 19 +++--- src/Interpreters/ProcessorsProfileLog.cpp | 62 ++++++++++++++++++- src/Interpreters/ProcessorsProfileLog.h | 1 + src/Interpreters/executeQuery.cpp | 48 +------------- 6 files changed, 80 insertions(+), 60 deletions(-) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 390418494e7..03ebd893c47 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -1,3 +1,4 @@ +#include #include #include @@ -676,6 +677,8 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden "tuple"}); } } + + logProcessorProfile(context, io.pipeline.getProcessors()); } scalars_cache.emplace(node_with_hash, scalar_block); diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index d4da038c089..9ae2ffc208d 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -5,9 +5,11 @@ #include #include #include +#include #include #include #include +#include #include #include #include @@ -19,9 +21,8 @@ #include #include #include -#include #include -#include +#include namespace ProfileEvents { @@ -246,6 +247,8 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTSubquery & subquery, ASTPtr if (tmp_block.rows() != 0) throw Exception(ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY, "Scalar subquery returned more than one row"); + + logProcessorProfile(data.getContext(), io.pipeline.getProcessors()); } block = materializeBlock(block); diff --git a/src/Interpreters/PreparedSets.cpp b/src/Interpreters/PreparedSets.cpp index 538108165fb..c69e2f84d42 100644 --- a/src/Interpreters/PreparedSets.cpp +++ b/src/Interpreters/PreparedSets.cpp @@ -1,21 +1,22 @@ #include #include -#include -#include -#include -#include -#include +#include #include -#include +#include +#include +#include +#include +#include #include #include +#include #include +#include #include #include -#include -#include #include #include +#include namespace DB { @@ -239,6 +240,8 @@ SetPtr FutureSetFromSubquery::buildOrderedSetInplace(const ContextPtr & context) if (!set_and_key->set->isCreated()) return nullptr; + logProcessorProfile(context, pipeline.getProcessors()); + return set_and_key->set; } diff --git a/src/Interpreters/ProcessorsProfileLog.cpp b/src/Interpreters/ProcessorsProfileLog.cpp index 8a646b5d0e7..d7811e5e9e2 100644 --- a/src/Interpreters/ProcessorsProfileLog.cpp +++ b/src/Interpreters/ProcessorsProfileLog.cpp @@ -1,5 +1,6 @@ #include +#include #include #include #include @@ -8,16 +9,19 @@ #include #include #include +#include #include #include #include -#include - -#include namespace DB { +namespace Setting +{ +extern const SettingsBool log_processors_profiles; +} + ColumnsDescription ProcessorProfileLogElement::getColumnsDescription() { return ColumnsDescription @@ -81,5 +85,57 @@ void ProcessorProfileLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(output_bytes); } +void logProcessorProfile(ContextPtr context, const Processors & processors) +{ + const Settings & settings = context->getSettingsRef(); + if (settings[Setting::log_processors_profiles]) + { + if (auto processors_profile_log = context->getProcessorsProfileLog()) + { + ProcessorProfileLogElement processor_elem; + const auto time_now = std::chrono::system_clock::now(); + processor_elem.event_time = timeInSeconds(time_now); + processor_elem.event_time_microseconds = timeInMicroseconds(time_now); + processor_elem.initial_query_id = context->getInitialQueryId(); + processor_elem.query_id = context->getCurrentQueryId(); + + auto get_proc_id = [](const IProcessor & proc) -> UInt64 { return reinterpret_cast(&proc); }; + + for (const auto & processor : processors) + { + std::vector parents; + for (const auto & port : processor->getOutputs()) + { + if (!port.isConnected()) + continue; + const IProcessor & next = port.getInputPort().getProcessor(); + parents.push_back(get_proc_id(next)); + } + + processor_elem.id = get_proc_id(*processor); + processor_elem.parent_ids = std::move(parents); + + processor_elem.plan_step = reinterpret_cast(processor->getQueryPlanStep()); + processor_elem.plan_step_name = processor->getPlanStepName(); + processor_elem.plan_step_description = processor->getPlanStepDescription(); + processor_elem.plan_group = processor->getQueryPlanStepGroup(); + + processor_elem.processor_name = processor->getName(); + + processor_elem.elapsed_us = static_cast(processor->getElapsedNs() / 1000U); + processor_elem.input_wait_elapsed_us = static_cast(processor->getInputWaitElapsedNs() / 1000U); + processor_elem.output_wait_elapsed_us = static_cast(processor->getOutputWaitElapsedNs() / 1000U); + + auto stats = processor->getProcessorDataStats(); + processor_elem.input_rows = stats.input_rows; + processor_elem.input_bytes = stats.input_bytes; + processor_elem.output_rows = stats.output_rows; + processor_elem.output_bytes = stats.output_bytes; + + processors_profile_log->add(processor_elem); + } + } + } +} } diff --git a/src/Interpreters/ProcessorsProfileLog.h b/src/Interpreters/ProcessorsProfileLog.h index fbf52f45f56..9cc2ab6c7f0 100644 --- a/src/Interpreters/ProcessorsProfileLog.h +++ b/src/Interpreters/ProcessorsProfileLog.h @@ -50,4 +50,5 @@ public: using SystemLog::SystemLog; }; +void logProcessorProfile(ContextPtr context, const Processors & processors); } diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 9250c069283..fa28fa04ab1 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -117,7 +117,6 @@ namespace Setting extern const SettingsOverflowMode join_overflow_mode; extern const SettingsString log_comment; extern const SettingsBool log_formatted_queries; - extern const SettingsBool log_processors_profiles; extern const SettingsBool log_profile_events; extern const SettingsUInt64 log_queries_cut_to_length; extern const SettingsBool log_queries; @@ -551,53 +550,8 @@ void logQueryFinish( if (auto query_log = context->getQueryLog()) query_log->add(elem); } - if (settings[Setting::log_processors_profiles]) - { - if (auto processors_profile_log = context->getProcessorsProfileLog()) - { - ProcessorProfileLogElement processor_elem; - processor_elem.event_time = elem.event_time; - processor_elem.event_time_microseconds = elem.event_time_microseconds; - processor_elem.initial_query_id = elem.client_info.initial_query_id; - processor_elem.query_id = elem.client_info.current_query_id; - auto get_proc_id = [](const IProcessor & proc) -> UInt64 { return reinterpret_cast(&proc); }; - - for (const auto & processor : query_pipeline.getProcessors()) - { - std::vector parents; - for (const auto & port : processor->getOutputs()) - { - if (!port.isConnected()) - continue; - const IProcessor & next = port.getInputPort().getProcessor(); - parents.push_back(get_proc_id(next)); - } - - processor_elem.id = get_proc_id(*processor); - processor_elem.parent_ids = std::move(parents); - - processor_elem.plan_step = reinterpret_cast(processor->getQueryPlanStep()); - processor_elem.plan_step_name = processor->getPlanStepName(); - processor_elem.plan_step_description = processor->getPlanStepDescription(); - processor_elem.plan_group = processor->getQueryPlanStepGroup(); - - processor_elem.processor_name = processor->getName(); - - processor_elem.elapsed_us = static_cast(processor->getElapsedNs() / 1000U); - processor_elem.input_wait_elapsed_us = static_cast(processor->getInputWaitElapsedNs() / 1000U); - processor_elem.output_wait_elapsed_us = static_cast(processor->getOutputWaitElapsedNs() / 1000U); - - auto stats = processor->getProcessorDataStats(); - processor_elem.input_rows = stats.input_rows; - processor_elem.input_bytes = stats.input_bytes; - processor_elem.output_rows = stats.output_rows; - processor_elem.output_bytes = stats.output_bytes; - - processors_profile_log->add(processor_elem); - } - } - } + logProcessorProfile(context, query_pipeline.getProcessors()); logQueryMetricLogFinish(context, internal, elem.client_info.current_query_id, time_now, std::make_shared(info)); } From bd71442ea26a5263b56e6774c6938fcb24dea432 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 11 Nov 2024 22:45:39 +0100 Subject: [PATCH 138/171] add test --- .../03270_processors_profile_log_3.reference | 2 + .../03270_processors_profile_log_3.sh | 96 +++++++++++++++++++ 2 files changed, 98 insertions(+) create mode 100644 tests/queries/0_stateless/03270_processors_profile_log_3.reference create mode 100755 tests/queries/0_stateless/03270_processors_profile_log_3.sh diff --git a/tests/queries/0_stateless/03270_processors_profile_log_3.reference b/tests/queries/0_stateless/03270_processors_profile_log_3.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/tests/queries/0_stateless/03270_processors_profile_log_3.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/0_stateless/03270_processors_profile_log_3.sh b/tests/queries/0_stateless/03270_processors_profile_log_3.sh new file mode 100755 index 00000000000..eb86a9f6352 --- /dev/null +++ b/tests/queries/0_stateless/03270_processors_profile_log_3.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +set -e + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +$CLICKHOUSE_CLIENT -q " + CREATE TABLE t + ( + a UInt32, + b UInt32 + ) + ENGINE = MergeTree + ORDER BY (a, b); + + INSERT INTO t SELECT number, number FROM numbers(1000); +" + +query_id="03270_processors_profile_log_3_$RANDOM" + +$CLICKHOUSE_CLIENT --query_id="$query_id" -q " + SET log_processors_profiles = 1; + + WITH + t0 AS + ( + SELECT * + FROM numbers(1000) + ), + t1 AS + ( + SELECT number * 3 AS b + FROM t0 + ) + SELECT b * 3 + FROM t + WHERE a IN (t1) + FORMAT Null; +" + +$CLICKHOUSE_CLIENT --query_id="$query_id" -q " + SYSTEM FLUSH LOGS; + + SELECT sum(elapsed_us) > 0 + FROM system.processors_profile_log + WHERE event_date >= yesterday() AND query_id = '$query_id' AND name = 'CreatingSetsTransform'; +" + +##################################################################### + +$CLICKHOUSE_CLIENT -q " + CREATE TABLE t1 + ( + st FixedString(54) + ) + ENGINE = MergeTree + ORDER BY tuple(); + + INSERT INTO t1 VALUES + ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRTUVWXYZ'), + ('\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'), + ('IIIIIIIIII\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'); +" + +query_id="03270_processors_profile_log_3_$RANDOM" + +$CLICKHOUSE_CLIENT --query_id="$query_id" -q " + SET log_processors_profiles = 1; + SET max_threads=2; -- no merging when max_threads=1 + + WITH + ( + SELECT groupConcat(',')(st) + FROM t1 + ORDER BY ALL + ) AS a, + ( + SELECT groupConcat(',')(CAST(st, 'String')) + FROM t1 + ORDER BY ALL + ) AS b + SELECT a = b + FORMAT Null; +" + +$CLICKHOUSE_CLIENT --query_id="$query_id" -q " + SYSTEM FLUSH LOGS; + + SELECT sum(elapsed_us) > 0 + FROM system.processors_profile_log + WHERE event_date >= yesterday() AND query_id = '$query_id' AND name = 'MergingSortedTransform'; +" + From 0825b42d5f82879a40c428ee9b1735a7497ca669 Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Tue, 12 Nov 2024 07:09:58 +0100 Subject: [PATCH 139/171] Lint --- src/Analyzer/QueryTreePassManager.cpp | 59 +++++++++++----------- src/Storages/IStorageCluster.h | 4 +- src/TableFunctions/ITableFunctionCluster.h | 2 +- 3 files changed, 32 insertions(+), 33 deletions(-) diff --git a/src/Analyzer/QueryTreePassManager.cpp b/src/Analyzer/QueryTreePassManager.cpp index 4443f83596f..0005aa6c8bc 100644 --- a/src/Analyzer/QueryTreePassManager.cpp +++ b/src/Analyzer/QueryTreePassManager.cpp @@ -3,7 +3,6 @@ #include #include -#include "Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.h" #include #include @@ -16,39 +15,39 @@ #include #include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include -#include -#include - +#include +#include +#include +#include namespace DB { diff --git a/src/Storages/IStorageCluster.h b/src/Storages/IStorageCluster.h index 893cf222556..d000e24562f 100644 --- a/src/Storages/IStorageCluster.h +++ b/src/Storages/IStorageCluster.h @@ -10,8 +10,8 @@ namespace DB /** - * Base cluster for Storages used in table functions like s3Cluster and hdfsCluster - * Needed for code simplification around parallel_distributed_insert_select + * Base cluster for Storages used in table functions like s3Cluster and hdfsCluster. + * Necessary for code simplification around parallel_distributed_insert_select. */ class IStorageCluster : public IStorage { diff --git a/src/TableFunctions/ITableFunctionCluster.h b/src/TableFunctions/ITableFunctionCluster.h index 744d7139d16..6935ac39e79 100644 --- a/src/TableFunctions/ITableFunctionCluster.h +++ b/src/TableFunctions/ITableFunctionCluster.h @@ -51,7 +51,7 @@ protected: "corresponding table function", getName()); - /// Evaluate only first argument, everything else will be done Base class + /// Evaluate only first argument, everything else will be done by the Base class args[0] = evaluateConstantExpressionOrIdentifierAsLiteral(args[0], context); /// Cluster name is always the first From e85a9852e7e6e57d4d86dadfddac1201662e987e Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Mon, 11 Nov 2024 20:10:53 +0100 Subject: [PATCH 140/171] Fix issues with garbage DB events from orphane branches --- tests/ci/ci.py | 2 ++ tests/ci/clickhouse_helper.py | 15 ++++++--------- tests/ci/commit_status_helper.py | 8 +++++++- tests/ci/docker_images_check.py | 4 +++- tests/ci/docker_manifests_merge.py | 4 +++- tests/ci/pr_info.py | 20 +++++++++++--------- tests/ci/upload_result_helper.py | 4 ++-- 7 files changed, 34 insertions(+), 23 deletions(-) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 10431ce038f..d6eeb6a3613 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -1268,6 +1268,7 @@ def main() -> int: s3, pr_info.number, pr_info.sha, + pr_info.head_ref, job_report.test_results, job_report.additional_files, job_report.check_name or _get_ext_check_name(args.job_name), @@ -1335,6 +1336,7 @@ def main() -> int: s3, pr_info.number, pr_info.sha, + pr_info.head_ref, job_report.test_results, job_report.additional_files, job_report.check_name or _get_ext_check_name(args.job_name), diff --git a/tests/ci/clickhouse_helper.py b/tests/ci/clickhouse_helper.py index 99f4778fe8f..32e7f6f6a53 100644 --- a/tests/ci/clickhouse_helper.py +++ b/tests/ci/clickhouse_helper.py @@ -9,6 +9,7 @@ from typing import Any, Dict, List, Optional import requests +from env_helper import GITHUB_REPOSITORY from get_robot_token import get_parameter_from_ssm from pr_info import PRInfo from report import TestResults @@ -211,17 +212,13 @@ def prepare_tests_results_for_clickhouse( report_url: str, check_name: str, ) -> List[dict]: - pull_request_url = "https://github.com/ClickHouse/ClickHouse/commits/master" - base_ref = "master" - head_ref = "master" - base_repo = pr_info.repo_full_name - head_repo = pr_info.repo_full_name + base_ref = pr_info.base_ref + base_repo = pr_info.base_name + head_ref = pr_info.head_ref + head_repo = pr_info.head_name + pull_request_url = f"https://github.com/{GITHUB_REPOSITORY}/commits/{head_ref}" if pr_info.number != 0: pull_request_url = pr_info.pr_html_url - base_ref = pr_info.base_ref - base_repo = pr_info.base_name - head_ref = pr_info.head_ref - head_repo = pr_info.head_name common_properties = { "pull_request_number": pr_info.number, diff --git a/tests/ci/commit_status_helper.py b/tests/ci/commit_status_helper.py index 28323b6786c..ce9eaf78e75 100644 --- a/tests/ci/commit_status_helper.py +++ b/tests/ci/commit_status_helper.py @@ -315,7 +315,13 @@ def create_ci_report(pr_info: PRInfo, statuses: CommitStatuses) -> str: ) ) return upload_results( - S3Helper(), pr_info.number, pr_info.sha, test_results, [], CI.StatusNames.CI + S3Helper(), + pr_info.number, + pr_info.sha, + pr_info.head_ref, + test_results, + [], + CI.StatusNames.CI, ) diff --git a/tests/ci/docker_images_check.py b/tests/ci/docker_images_check.py index c8dbcd10245..c94aff1ab4d 100644 --- a/tests/ci/docker_images_check.py +++ b/tests/ci/docker_images_check.py @@ -250,7 +250,9 @@ def main(): s3_helper = S3Helper() pr_info = PRInfo() - url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [], NAME) + url = upload_results( + s3_helper, pr_info.number, pr_info.sha, pr_info.head_ref, test_results, [], NAME + ) print(f"::notice ::Report url: {url}") diff --git a/tests/ci/docker_manifests_merge.py b/tests/ci/docker_manifests_merge.py index 6c6a88330ea..772cbbf1b02 100644 --- a/tests/ci/docker_manifests_merge.py +++ b/tests/ci/docker_manifests_merge.py @@ -183,7 +183,9 @@ def main(): pr_info = PRInfo() s3_helper = S3Helper() - url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, [], NAME) + url = upload_results( + s3_helper, pr_info.number, pr_info.sha, pr_info.head_ref, test_results, [], NAME + ) print(f"::notice ::Report url: {url}") diff --git a/tests/ci/pr_info.py b/tests/ci/pr_info.py index 8ed2f972183..d602a2d752a 100644 --- a/tests/ci/pr_info.py +++ b/tests/ci/pr_info.py @@ -132,6 +132,12 @@ class PRInfo: ref = github_event.get("ref", "refs/heads/master") if ref and ref.startswith("refs/heads/"): ref = ref[11:] + # Default values + self.base_ref = "" # type: str + self.base_name = "" # type: str + self.head_ref = "" # type: str + self.head_name = "" # type: str + self.number = 0 # type: int # workflow completed event, used for PRs only if "action" in github_event and github_event["action"] == "completed": @@ -146,7 +152,7 @@ class PRInfo: if "pull_request" in github_event: # pull request and other similar events self.event_type = EventType.PULL_REQUEST - self.number = github_event["pull_request"]["number"] # type: int + self.number = github_event["pull_request"]["number"] if pr_event_from_api: try: response = get_gh_api( @@ -172,17 +178,13 @@ class PRInfo: self.pr_html_url = f"{repo_prefix}/pull/{self.number}" # master or backport/xx.x/xxxxx - where the PR will be merged - self.base_ref = github_event["pull_request"]["base"]["ref"] # type: str + self.base_ref = github_event["pull_request"]["base"]["ref"] # ClickHouse/ClickHouse - self.base_name = github_event["pull_request"]["base"]["repo"][ - "full_name" - ] # type: str + self.base_name = github_event["pull_request"]["base"]["repo"]["full_name"] # any_branch-name - the name of working branch name - self.head_ref = github_event["pull_request"]["head"]["ref"] # type: str + self.head_ref = github_event["pull_request"]["head"]["ref"] # UserName/ClickHouse or ClickHouse/ClickHouse - self.head_name = github_event["pull_request"]["head"]["repo"][ - "full_name" - ] # type: str + self.head_name = github_event["pull_request"]["head"]["repo"]["full_name"] self.body = github_event["pull_request"]["body"] self.labels = { label["name"] for label in github_event["pull_request"]["labels"] diff --git a/tests/ci/upload_result_helper.py b/tests/ci/upload_result_helper.py index 619ffe937c7..85f4cfd12a0 100644 --- a/tests/ci/upload_result_helper.py +++ b/tests/ci/upload_result_helper.py @@ -64,6 +64,7 @@ def upload_results( s3_client: S3Helper, pr_number: int, commit_sha: str, + branch_name: str, test_results: TestResults, additional_files: Union[Sequence[Path], Sequence[str]], check_name: str, @@ -80,8 +81,7 @@ def upload_results( process_logs(s3_client, additional_files, s3_path_prefix, test_results) ) - branch_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/commits/master" - branch_name = "master" + branch_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/commits/{branch_name}" if pr_number != 0: branch_name = f"PR #{pr_number}" branch_url = f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/pull/{pr_number}" From cba626743121f2ceba143d593dc206192bf2d9d4 Mon Sep 17 00:00:00 2001 From: Alexander Gololobov Date: Tue, 12 Nov 2024 10:31:28 +0100 Subject: [PATCH 141/171] Allow reading previous vaules of async metrics during update --- src/Common/AsynchronousMetrics.cpp | 7 +++++-- src/Common/AsynchronousMetrics.h | 7 ++++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index f419a23b209..262f3ca3a7a 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -331,7 +331,7 @@ AsynchronousMetrics::~AsynchronousMetrics() AsynchronousMetricValues AsynchronousMetrics::getValues() const { - std::lock_guard lock(data_mutex); + SharedLockGuard lock(values_mutex); return values; } @@ -1807,7 +1807,10 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) first_run = false; // Finally, update the current metrics. - values = new_values; + { + std::lock_guard values_lock(values_mutex); + values.swap(new_values); + } } } diff --git a/src/Common/AsynchronousMetrics.h b/src/Common/AsynchronousMetrics.h index 215dc6e1337..b60f0f25e74 100644 --- a/src/Common/AsynchronousMetrics.h +++ b/src/Common/AsynchronousMetrics.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -100,6 +101,7 @@ private: std::condition_variable wait_cond; bool quit TSA_GUARDED_BY(thread_mutex) = false; + /// Protects all raw data and serializes multiple updates. mutable std::mutex data_mutex; /// Some values are incremental and we have to calculate the difference. @@ -107,7 +109,10 @@ private: bool first_run TSA_GUARDED_BY(data_mutex) = true; TimePoint previous_update_time TSA_GUARDED_BY(data_mutex); - AsynchronousMetricValues values TSA_GUARDED_BY(data_mutex); + /// Protects saved values. + mutable SharedMutex values_mutex; + /// Values store the result of the last update prepared for reading. + AsynchronousMetricValues values TSA_GUARDED_BY(values_mutex); #if defined(OS_LINUX) || defined(OS_FREEBSD) MemoryStatisticsOS memory_stat TSA_GUARDED_BY(data_mutex); From 0e2cd397d2e876b9290790d242c4f283bae6aba2 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 12 Nov 2024 09:40:51 +0000 Subject: [PATCH 142/171] Cleanup. --- .../MergeTreeSplitPrewhereIntoReadSteps.cpp | 12 ------------ .../queries/0_stateless/03199_merge_filters_bug.sql | 2 ++ 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index c35e356bf18..dd5db3e2b02 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -50,17 +50,6 @@ void fillRequiredColumns(const ActionsDAG::Node * node, std::unordered_maptype != ActionsDAG::ActionType::COLUMN) { - // addToOutputsIfNotAlreadyAdded(*node_ref.dag, node_ref.node); node_ref.dag->getOutputs().push_back(node_ref.node); const auto & new_node = new_dag->addInput(node_ref.node->result_name, node_ref.node->result_type); diff --git a/tests/queries/0_stateless/03199_merge_filters_bug.sql b/tests/queries/0_stateless/03199_merge_filters_bug.sql index 2023e0f1d73..696856c9121 100644 --- a/tests/queries/0_stateless/03199_merge_filters_bug.sql +++ b/tests/queries/0_stateless/03199_merge_filters_bug.sql @@ -1,3 +1,5 @@ +set allow_reorder_prewhere_conditions=0; + drop table if exists t1; drop table if exists t2; From 0223e6c9717fc020544bb6015469a6764bb20e8c Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 12 Nov 2024 09:47:07 +0000 Subject: [PATCH 143/171] Cleanup. --- .../MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index dd5db3e2b02..9110954fad6 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -81,7 +81,10 @@ const ActionsDAG::Node & addClonedDAGToDAG( /// If the node is known from the previous steps, add it as an input, except for constants if (original_dag_node->type != ActionsDAG::ActionType::COLUMN) { - node_ref.dag->getOutputs().push_back(node_ref.node); + /// If the node was found in node_remap, it was not added to outputs yet. + /// The only exception is the filter node, which is always the first one. + if (node_ref.dag->getOutputs().at(0) != node_ref.node) + node_ref.dag->getOutputs().push_back(node_ref.node); const auto & new_node = new_dag->addInput(node_ref.node->result_name, node_ref.node->result_type); node_remap[original_dag_node] = {new_dag.get(), &new_node}; @@ -140,10 +143,8 @@ const ActionsDAG::Node & addFunction( const ActionsDAGPtr & new_dag, const FunctionOverloadResolverPtr & function, ActionsDAG::NodeRawConstPtrs children) - //OriginalToNewNodeMap & node_remap) { const auto & new_node = new_dag->addFunction(function, children, ""); - //node_remap[new_node.result_name] = {new_dag.get(), &new_node}; return new_node; } @@ -154,13 +155,11 @@ const ActionsDAG::Node & addCast( const ActionsDAGPtr & dag, const ActionsDAG::Node & node_to_cast, const DataTypePtr & to_type) - //[[maybe_unused]] OriginalToNewNodeMap & node_remap) { if (!node_to_cast.result_type->equals(*to_type)) return node_to_cast; const auto & new_node = dag->addCast(node_to_cast, to_type, {}); - //node_remap[new_node.result_name] = {dag.get(), &new_node}; return new_node; } @@ -265,7 +264,6 @@ bool tryBuildPrewhereSteps( ActionsDAGPtr step_dag = std::make_unique(); const ActionsDAG::Node * original_node = nullptr; const ActionsDAG::Node * result_node; - //String result_name; std::vector new_condition_nodes; for (const auto * node : condition_group) From 4e0e0f0d88f4b017878167dfde5ad2c33c33709b Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 12 Nov 2024 09:50:12 +0000 Subject: [PATCH 144/171] Cleanup. --- .../MergeTreeSplitPrewhereIntoReadSteps.cpp | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 9110954fad6..60b6eca1720 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -170,7 +170,6 @@ const ActionsDAG::Node & addCast( const ActionsDAG::Node & addAndTrue( const ActionsDAGPtr & dag, const ActionsDAG::Node & filter_node_to_normalize) - //OriginalToNewNodeMap & node_remap) { Field const_true_value(true); @@ -181,7 +180,7 @@ const ActionsDAG::Node & addAndTrue( const auto * const_true_node = &dag->addColumn(std::move(const_true_column)); ActionsDAG::NodeRawConstPtrs children = {&filter_node_to_normalize, const_true_node}; FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); - return addFunction(dag, func_builder_and, children); //, node_remap); + return addFunction(dag, func_builder_and, children); } } @@ -251,7 +250,6 @@ bool tryBuildPrewhereSteps( const ActionsDAG::Node * original_node; /// Result condition node const ActionsDAG::Node * result_node; - //String column_name; }; std::vector steps; @@ -276,7 +274,7 @@ bool tryBuildPrewhereSteps( { /// Add AND function to combine the conditions FunctionOverloadResolverPtr func_builder_and = std::make_unique(std::make_shared()); - const auto & and_function_node = addFunction(step_dag, func_builder_and, new_condition_nodes); //, node_remap); + const auto & and_function_node = addFunction(step_dag, func_builder_and, new_condition_nodes); result_node = &and_function_node; } else @@ -286,7 +284,7 @@ bool tryBuildPrewhereSteps( if (!isUInt8(removeNullable(removeLowCardinality(result_node->result_type)))) { /// Build "condition AND True" expression to "cast" the condition to UInt8 or Nullable(UInt8) depending on its type. - result_node = &addAndTrue(step_dag, *result_node); //, node_remap); + result_node = &addAndTrue(step_dag, *result_node); } } @@ -303,7 +301,7 @@ bool tryBuildPrewhereSteps( for (const auto * output : original_outputs) { all_outputs.insert(output); - if (node_remap.contains(output)) //->result_name)) + if (node_remap.contains(output)) { const auto & new_node_info = node_remap[output]; new_node_info.dag->getOutputs().push_back(new_node_info.node); @@ -321,9 +319,9 @@ bool tryBuildPrewhereSteps( auto & last_step_dag = steps.back().actions; auto & last_step_result_node = steps.back().result_node; /// Build AND(last_step_result_node, true) - const auto & and_node = addAndTrue(last_step_dag, *last_step_result_node); //, node_remap); + const auto & and_node = addAndTrue(last_step_dag, *last_step_result_node); /// Build CAST(and_node, type of PREWHERE column) - const auto & cast_node = addCast(last_step_dag, and_node, output->result_type); //, node_remap); + const auto & cast_node = addCast(last_step_dag, and_node, output->result_type); /// Add alias for the result with the name of the PREWHERE column const auto & prewhere_result_node = last_step_dag->addAlias(cast_node, output->result_name); last_step_dag->getOutputs().push_back(&prewhere_result_node); From 05be65315a60b904bcf95c77855f6e23b65cbd13 Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Tue, 12 Nov 2024 11:54:12 +0100 Subject: [PATCH 145/171] squash! Missing tests in several tests in 24.10 Added corner cases for tests for: to_utc_timestamp and from_utc_timestamp (more timezones, spetial timezones, epoch corners does not look right, raising a bug over that) arrayUnion (empty and big arrays) quantilesExactWeightedInterpolated (more data types) --- docs/en/sql-reference/functions/date-time-functions.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/sql-reference/functions/date-time-functions.md b/docs/en/sql-reference/functions/date-time-functions.md index 2357b5b2fdd..34dc6e996ee 100644 --- a/docs/en/sql-reference/functions/date-time-functions.md +++ b/docs/en/sql-reference/functions/date-time-functions.md @@ -4773,7 +4773,7 @@ Result: ## toUTCTimestamp -Convert DateTime/DateTime64 type value from other time zone to UTC timezone timestamp +Convert DateTime/DateTime64 type value from other time zone to UTC timezone timestamp. This function is mainly included for compatibility with Apache Spark and similar frameworks. **Syntax** @@ -4799,14 +4799,14 @@ SELECT toUTCTimestamp(toDateTime('2023-03-16'), 'Asia/Shanghai'); Result: ``` text -┌─toUTCTimestamp(toDateTime('2023-03-16'),'Asia/Shanghai')┐ +┌─toUTCTimestamp(toDateTime('2023-03-16'), 'Asia/Shanghai')┐ │ 2023-03-15 16:00:00 │ └─────────────────────────────────────────────────────────┘ ``` ## fromUTCTimestamp -Convert DateTime/DateTime64 type value from UTC timezone to other time zone timestamp +Convert DateTime/DateTime64 type value from UTC timezone to other time zone timestamp. This function is mainly included for compatibility with Apache Spark and similar frameworks. **Syntax** @@ -4832,7 +4832,7 @@ SELECT fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00', 3), 'Asia/Shanghai') Result: ``` text -┌─fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00',3),'Asia/Shanghai')─┐ +┌─fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00',3), 'Asia/Shanghai')─┐ │ 2023-03-16 18:00:00.000 │ └─────────────────────────────────────────────────────────────────────────┘ ``` From d4e288a8c1b001a73a5d7e545ae8bd40ed2192df Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Tue, 12 Nov 2024 11:02:46 +0000 Subject: [PATCH 146/171] Refactor to avoid too much TSA_GUARD clutter Remove LOGICAL_ERROR check that should never happen. It didn't happen on the CI so far, and seems unnecessary. --- src/Interpreters/QueryMetricLog.cpp | 41 ++++++++++++----------------- src/Interpreters/QueryMetricLog.h | 15 +++++++---- 2 files changed, 27 insertions(+), 29 deletions(-) diff --git a/src/Interpreters/QueryMetricLog.cpp b/src/Interpreters/QueryMetricLog.cpp index 62700f49605..bacaf00dc2d 100644 --- a/src/Interpreters/QueryMetricLog.cpp +++ b/src/Interpreters/QueryMetricLog.cpp @@ -150,12 +150,13 @@ void QueryMetricLog::collectMetric(const ProcessList & process_list, String quer void QueryMetricLog::startQuery(const String & query_id, TimePoint start_time, UInt64 interval_milliseconds) TSA_NO_THREAD_SAFETY_ANALYSIS { QueryMetricLogStatus query_status; - query_status.interval_milliseconds = interval_milliseconds; - query_status.next_collect_time = start_time; + QueryMetricLogStatusInfo & info = query_status.info; + info.interval_milliseconds = interval_milliseconds; + info.next_collect_time = start_time; auto context = getContext(); const auto & process_list = context->getProcessList(); - query_status.task = context->getSchedulePool().createTask("QueryMetricLog", [this, &process_list, query_id] { + info.task = context->getSchedulePool().createTask("QueryMetricLog", [this, &process_list, query_id] { collectMetric(process_list, query_id); }); @@ -199,10 +200,10 @@ void QueryMetricLog::finishQuery(const String & query_id, TimePoint finish_time, /// that order. { /// Take ownership of the task so that we can destroy it in this scope after unlocking `queries_mutex`. - auto task = std::move(query_status.task); + auto task = std::move(query_status.info.task); /// Build an empty task for the old task to make sure it does not lock any mutex on its destruction. - query_status.task = {}; + query_status.info.task = {}; query_lock.unlock(); global_lock.lock(); @@ -216,18 +217,18 @@ void QueryMetricLog::finishQuery(const String & query_id, TimePoint finish_time, void QueryMetricLogStatus::scheduleNext(String query_id) { - next_collect_time += std::chrono::milliseconds(interval_milliseconds); + info.next_collect_time += std::chrono::milliseconds(info.interval_milliseconds); const auto now = std::chrono::system_clock::now(); - if (next_collect_time > now) + if (info.next_collect_time > now) { - const auto wait_time = std::chrono::duration_cast(next_collect_time - now).count(); - task->scheduleAfter(wait_time); + const auto wait_time = std::chrono::duration_cast(info.next_collect_time - now).count(); + info.task->scheduleAfter(wait_time); } else { LOG_TRACE(logger, "The next collecting task for query {} should have already run at {}. Scheduling it right now", - query_id, timePointToString(next_collect_time)); - task->schedule(); + query_id, timePointToString(info.next_collect_time)); + info.task->schedule(); } } @@ -235,25 +236,17 @@ std::optional QueryMetricLogStatus::createLogMetricElemen { /// TODO: remove trace before 24.11 release after checking everything is fine on the CI LOG_TRACE(logger, "Collecting query_metric_log for query {} and interval {} ms with QueryStatusInfo from {}. Next collection time: {}", - query_id, interval_milliseconds, timePointToString(query_info_time), - schedule_next ? timePointToString(next_collect_time + std::chrono::milliseconds(interval_milliseconds)) : "finished"); + query_id, info.interval_milliseconds, timePointToString(query_info_time), + schedule_next ? timePointToString(info.next_collect_time + std::chrono::milliseconds(info.interval_milliseconds)) : "finished"); - if (query_info_time <= last_collect_time) + if (query_info_time <= info.last_collect_time) { /// TODO: remove trace before 24.11 release after checking everything is fine on the CI LOG_TRACE(logger, "Query {} has a more recent metrics collected. Skipping this one", query_id); return {}; } - /// Leave some margin because task->scheduleAfter takes a value in milliseconds. - /// So, we can expect up to 1ms of drift since BackgroundSchedulePool will compare - /// time points in milliseconds. - static auto error_margin = std::chrono::milliseconds(1); - if (schedule_next && query_info_time + error_margin < next_collect_time) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Task to collect metric for query {} scheduled at {} but run at {}", - query_id, timePointToString(next_collect_time), timePointToString(query_info_time)); - - last_collect_time = query_info_time; + info.last_collect_time = query_info_time; QueryMetricLogElement elem; elem.event_time = timeInSeconds(query_info_time); @@ -267,7 +260,7 @@ std::optional QueryMetricLogStatus::createLogMetricElemen for (ProfileEvents::Event i = ProfileEvents::Event(0), end = ProfileEvents::end(); i < end; ++i) { const auto & new_value = (*(query_info.profile_counters))[i]; - auto & old_value = last_profile_events[i]; + auto & old_value = info.last_profile_events[i]; /// Profile event counters are supposed to be monotonic. However, at least the `NetworkReceiveBytes` can be inaccurate. /// So, since in the future the counter should always have a bigger value than in the past, we skip this event. diff --git a/src/Interpreters/QueryMetricLog.h b/src/Interpreters/QueryMetricLog.h index 65764229b0a..9371dfbb6b5 100644 --- a/src/Interpreters/QueryMetricLog.h +++ b/src/Interpreters/QueryMetricLog.h @@ -41,16 +41,21 @@ struct QueryMetricLogElement void appendToBlock(MutableColumns & columns) const; }; +struct QueryMetricLogStatusInfo +{ + UInt64 interval_milliseconds; + std::chrono::system_clock::time_point last_collect_time; + std::chrono::system_clock::time_point next_collect_time; + std::vector last_profile_events = std::vector(ProfileEvents::end()); + BackgroundSchedulePool::TaskHolder task; +}; + struct QueryMetricLogStatus { using TimePoint = std::chrono::system_clock::time_point; using Mutex = std::mutex; - UInt64 interval_milliseconds; - std::chrono::system_clock::time_point last_collect_time TSA_GUARDED_BY(getMutex()); - std::chrono::system_clock::time_point next_collect_time TSA_GUARDED_BY(getMutex()); - std::vector last_profile_events TSA_GUARDED_BY(getMutex()) = std::vector(ProfileEvents::end()); - BackgroundSchedulePool::TaskHolder task TSA_GUARDED_BY(getMutex()); + QueryMetricLogStatusInfo info TSA_GUARDED_BY(getMutex()); /// We need to be able to move it for the hash map, so we need to add an indirection here. std::unique_ptr mutex = std::make_unique(); From 5c8d89bf13278cf06e93cadcc6db55e974854d5e Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Tue, 12 Nov 2024 11:05:29 +0000 Subject: [PATCH 147/171] Remove unnecessary extern --- src/Interpreters/QueryMetricLog.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/Interpreters/QueryMetricLog.cpp b/src/Interpreters/QueryMetricLog.cpp index bacaf00dc2d..52d773b7d1b 100644 --- a/src/Interpreters/QueryMetricLog.cpp +++ b/src/Interpreters/QueryMetricLog.cpp @@ -24,11 +24,6 @@ namespace DB static auto logger = getLogger("QueryMetricLog"); -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -}; - String timePointToString(QueryMetricLog::TimePoint time) { /// fmtlib supports subsecond formatting in 10.0.0. We're in 9.1.0, so we need to add the milliseconds ourselves. From dde5633c6d1fb248bae5a11634d11c3a3afe8e1e Mon Sep 17 00:00:00 2001 From: Alexander Gololobov Date: Tue, 12 Nov 2024 14:56:37 +0100 Subject: [PATCH 148/171] Fix darwin build --- src/Common/AsynchronousMetrics.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Common/AsynchronousMetrics.h b/src/Common/AsynchronousMetrics.h index b60f0f25e74..e8d4b7980f9 100644 --- a/src/Common/AsynchronousMetrics.h +++ b/src/Common/AsynchronousMetrics.h @@ -112,7 +112,12 @@ private: /// Protects saved values. mutable SharedMutex values_mutex; /// Values store the result of the last update prepared for reading. +#ifdef OS_LINUX AsynchronousMetricValues values TSA_GUARDED_BY(values_mutex); +#else + /// When SharedMutex == std::shared_mutex it may not be annotated with the 'capability'. + AsynchronousMetricValues values; +#endif #if defined(OS_LINUX) || defined(OS_FREEBSD) MemoryStatisticsOS memory_stat TSA_GUARDED_BY(data_mutex); From 410e09b2484e0225687e31166c99c4f9236485b3 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 12 Nov 2024 15:00:12 +0100 Subject: [PATCH 149/171] Add missing fields in system.s3_queue_settings --- src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp index 200872a2f4c..6630d5a6c8f 100644 --- a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp +++ b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp @@ -747,6 +747,7 @@ ObjectStorageQueueSettings StorageObjectStorageQueue::getSettings() const /// so let's reconstruct. ObjectStorageQueueSettings settings; const auto & table_metadata = getTableMetadata(); + settings[ObjectStorageQueueSetting::mode] = table_metadata.mode; settings[ObjectStorageQueueSetting::after_processing] = table_metadata.after_processing; settings[ObjectStorageQueueSetting::keeper_path] = zk_path; settings[ObjectStorageQueueSetting::loading_retries] = table_metadata.loading_retries; @@ -764,6 +765,7 @@ ObjectStorageQueueSettings StorageObjectStorageQueue::getSettings() const settings[ObjectStorageQueueSetting::max_processed_files_before_commit] = commit_settings.max_processed_files_before_commit; settings[ObjectStorageQueueSetting::max_processed_rows_before_commit] = commit_settings.max_processed_rows_before_commit; settings[ObjectStorageQueueSetting::max_processed_bytes_before_commit] = commit_settings.max_processed_bytes_before_commit; + settings[ObjectStorageQueueSetting::max_processing_time_sec_before_commit] = commit_settings.max_processing_time_sec_before_commit; return settings; } From 36dba0cae4d01cdd3eda231f497d9c5928b05c38 Mon Sep 17 00:00:00 2001 From: Sema Checherinda Date: Tue, 12 Nov 2024 15:29:09 +0100 Subject: [PATCH 150/171] relax memory limit --- tests/queries/0_stateless/00755_avg_value_size_hint_passing.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/00755_avg_value_size_hint_passing.sql b/tests/queries/0_stateless/00755_avg_value_size_hint_passing.sql index 1d033b7aa47..c840bba6871 100644 --- a/tests/queries/0_stateless/00755_avg_value_size_hint_passing.sql +++ b/tests/queries/0_stateless/00755_avg_value_size_hint_passing.sql @@ -5,7 +5,7 @@ SET max_block_size = 1000; SET max_memory_usage = 1000000000; INSERT INTO size_hint SELECT arrayMap(x -> 'Hello', range(1000)) FROM numbers(10000); -SET max_memory_usage = 100000000, max_threads = 2; +SET max_memory_usage = 105000000, max_threads = 2; SELECT count(), sum(length(s)) FROM size_hint; DROP TABLE size_hint; From 43456d74476c99ebdb25ae74a9e6f471421f3191 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:30:57 +0100 Subject: [PATCH 151/171] Restart CI --- .../0_stateless/03262_column_sizes_with_dynamic_structure.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.sql b/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.sql index 21e6515fc99..099bbd5dd22 100644 --- a/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.sql +++ b/tests/queries/0_stateless/03262_column_sizes_with_dynamic_structure.sql @@ -3,6 +3,7 @@ set allow_experimental_dynamic_type = 1; set allow_experimental_json_type = 1; + drop table if exists test; create table test (d Dynamic, json JSON) engine=MergeTree order by tuple() settings min_rows_for_wide_part=0, min_bytes_for_wide_part=1; insert into test select number, '{"a" : 42, "b" : "Hello, World"}' from numbers(10000000); From e1b6d70c8b8297da457154f04a6e5a3f65709dc4 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Sun, 10 Nov 2024 15:27:45 +0100 Subject: [PATCH 152/171] Address point regarding chmod for /etc/clickhouse* --- docker/server/Dockerfile.ubuntu | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 0fe9a409ee4..e6bde845c4e 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -113,7 +113,9 @@ RUN clickhouse local -q 'SELECT 1' >/dev/null 2>&1 && exit 0 || : \ /var/lib/apt/lists/* \ /var/cache/debconf \ /tmp/* \ - && apt-get autoremove --purge -yq dirmngr gnupg2 + && apt-get autoremove --purge -yq dirmngr gnupg2 \ + && chmod ugo+Xrw -R /etc/clickhouse-server /etc/clickhouse-client +# The last chmod is here to make the next one is No-op in docker official library Dockerfile # post install # we need to allow "others" access to clickhouse folder, because docker container From e513cc96014c6ae62a13dea834b793f3b4b8bd96 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Fri, 24 Nov 2023 17:41:22 +0100 Subject: [PATCH 153/171] Add a script to generate docker/official directory --- tests/ci/official_docker.py | 153 ++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 tests/ci/official_docker.py diff --git a/tests/ci/official_docker.py b/tests/ci/official_docker.py new file mode 100644 index 00000000000..b7311026d8e --- /dev/null +++ b/tests/ci/official_docker.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python +"""Plan: +- Create a PR with regenerated Dockerfiles for each release branch +- Generate `library definition file` as described in +https://github.com/docker-library/official-images/blob/master/README.md#library-definition-files +- Create a PR with it to https://github.com/docker-library/official-images, the file +name will be `library/clickhouse`""" + +import argparse +import logging +from pathlib import Path +from shutil import rmtree +from typing import Dict, Iterable, List, Set + +from git_helper import git_runner +from version_helper import ( + ClickHouseVersion, + get_supported_versions, + get_tagged_versions, + get_version_from_string, +) + + +def _rel_path(arg: str) -> Path: + path = Path(arg) + if path.is_absolute(): + raise argparse.ArgumentError(None, message=f"path '{path}' must be relative") + return path + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description="The script to handle tasks for docker-library/official-images", + ) + parser.add_argument( + "--directory", + type=_rel_path, + default=_rel_path("docker/official"), + help="a relative to the reporitory root directory", + ) + subparsers = parser.add_subparsers( + title="commands", dest="command", help="choose the command to run" + ) + parser_tree = subparsers.add_parser( + "generate-tree", help="the command to generate directory `docker/official`" + ) + parser_tree.add_argument( + "--min-version", + type=get_version_from_string, + default=None, + help="if not set, only currently supported versions will be used", + ) + parser_tree.add_argument( + "--use-master-docker", + action="store_true", + help="by default, the `docker/server` from each branch is used to generate the " + "directory; if this flag is set, then the `docker/server` from the `master` " + "branch is used", + ) + parser_tree.add_argument( + "--build-images", + action="store_true", + help="if set, the image will be built for each Dockerfile.* in the result dirs", + ) + parser_tree.add_argument("--clean", default=True, help=argparse.SUPPRESS) + parser_tree.add_argument( + "--no-clean", + dest="clean", + action="store_false", + default=argparse.SUPPRESS, + help="if set, the directory `docker/official` won't be cleaned " + "before generating", + ) + args = parser.parse_args() + return args + + +def get_versions_greater(minimal: ClickHouseVersion) -> Set[ClickHouseVersion]: + supported = {} # type: Dict[str, ClickHouseVersion] + versions = get_tagged_versions() + for v in versions: + if v < minimal: + continue + txt = f"{v.major}.{v.minor}" + if txt in supported: + supported[txt] = max(v, supported[txt]) + continue + supported[txt] = v + return set(supported.values()) + + +def create_versions_dirs( + versions: Iterable[ClickHouseVersion], directory: Path, image_type: str = "server" +) -> Dict[ClickHouseVersion, Path]: + assert directory.is_dir() or not directory.exists() + dirs = {} + for v in versions: + version_dir = directory / image_type / str(v) + version_dir.mkdir(parents=True, exist_ok=True) + dirs[v] = version_dir + return dirs + + +def generate_docker_directories( + version_dirs: Dict[ClickHouseVersion, Path], + use_master_docker: bool, + build_images: bool = False, + image_type: str = "server", +) -> None: + for version, directory in version_dirs.items(): + branch = "origin/master" if use_master_docker else version.describe + git_runner( + f"git archive {branch}:docker/{image_type} | " f"tar x -C {directory}" + ) + (directory / "README.md").unlink(missing_ok=True) + for df in directory.glob("Dockerfile.*"): + content = df.read_text().splitlines() + for idx, line in enumerate(content): + if line.startswith("ARG VERSION="): + content[idx] = f'ARG VERSION="{version}"' + df.write_text("\n".join(content) + "\n") + if build_images: + git_runner( + f"docker build --network=host -t '{DOCKER_LIBRARY_NAME[image_type]}:" + f"{version}{df.suffix}' -f '{df}' --progress plain '{directory}'" + ) + + +def generate_tree(args: argparse.Namespace) -> None: + if args.min_version: + versions = get_versions_greater(args.min_version) + else: + versions = get_supported_versions() + directory = Path(git_runner.cwd) / args.directory + if args.clean: + try: + rmtree(directory) + except FileNotFoundError: + pass + version_dirs = create_versions_dirs(versions, directory) + generate_docker_directories(version_dirs, args.use_master_docker, args.build_images) + + +def main() -> None: + logging.basicConfig(level=logging.INFO) + args = parse_args() + if args.command == "generate-tree": + generate_tree(args) + + +if __name__ == "__main__": + main() From 985d19fc5aae84b5c8fc1f9f71dc02151639e1ee Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Tue, 28 Nov 2023 11:12:46 +0100 Subject: [PATCH 154/171] Improve logging, separate keeper/server generation --- tests/ci/official_docker.py | 58 ++++++++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 10 deletions(-) diff --git a/tests/ci/official_docker.py b/tests/ci/official_docker.py index b7311026d8e..1529f69fa8c 100644 --- a/tests/ci/official_docker.py +++ b/tests/ci/official_docker.py @@ -9,8 +9,9 @@ name will be `library/clickhouse`""" import argparse import logging from pathlib import Path +from pprint import pformat from shutil import rmtree -from typing import Dict, Iterable, List, Set +from typing import Dict, Iterable, Set from git_helper import git_runner from version_helper import ( @@ -33,17 +34,30 @@ def parse_args() -> argparse.Namespace: formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="The script to handle tasks for docker-library/official-images", ) + parser.add_argument( + "-v", + "--verbose", + action="count", + default=0, + help="set the script verbosity, could be used multiple", + ) parser.add_argument( "--directory", type=_rel_path, default=_rel_path("docker/official"), help="a relative to the reporitory root directory", ) + parser.add_argument( + "--image-type", + choices=["server", "keeper"], + default="server", + help="which image type to process", + ) subparsers = parser.add_subparsers( - title="commands", dest="command", help="choose the command to run" + title="commands", dest="command", help="the command to run" ) parser_tree = subparsers.add_parser( - "generate-tree", help="the command to generate directory `docker/official`" + "generate-tree", help="generates directory `docker/official`" ) parser_tree.add_argument( "--min-version", @@ -91,13 +105,14 @@ def get_versions_greater(minimal: ClickHouseVersion) -> Set[ClickHouseVersion]: def create_versions_dirs( - versions: Iterable[ClickHouseVersion], directory: Path, image_type: str = "server" + versions: Iterable[ClickHouseVersion], directory: Path ) -> Dict[ClickHouseVersion, Path]: assert directory.is_dir() or not directory.exists() dirs = {} for v in versions: - version_dir = directory / image_type / str(v) + version_dir = directory / str(v) version_dir.mkdir(parents=True, exist_ok=True) + logging.debug("Directory %s for version %s is created", version_dir, v) dirs[v] = version_dir return dirs @@ -108,8 +123,15 @@ def generate_docker_directories( build_images: bool = False, image_type: str = "server", ) -> None: + arg_version = "ARG VERSION=" for version, directory in version_dirs.items(): branch = "origin/master" if use_master_docker else version.describe + logging.debug( + "Checkout directory content from '%s:docker/%s' to %s", + branch, + image_type, + directory, + ) git_runner( f"git archive {branch}:docker/{image_type} | " f"tar x -C {directory}" ) @@ -117,8 +139,15 @@ def generate_docker_directories( for df in directory.glob("Dockerfile.*"): content = df.read_text().splitlines() for idx, line in enumerate(content): - if line.startswith("ARG VERSION="): - content[idx] = f'ARG VERSION="{version}"' + if line.startswith(arg_version): + logging.debug( + "Found '%s' in line %s:%s, setting to version %s", + arg_version, + df.name, + idx, + version, + ) + content[idx] = f'{arg_version}"{version}"' df.write_text("\n".join(content) + "\n") if build_images: git_runner( @@ -132,19 +161,28 @@ def generate_tree(args: argparse.Namespace) -> None: versions = get_versions_greater(args.min_version) else: versions = get_supported_versions() - directory = Path(git_runner.cwd) / args.directory + logging.info( + "The versions to generate:\n %s", + "\n ".join(v.string for v in sorted(versions)), + ) + directory = Path(git_runner.cwd) / args.directory / args.image_type if args.clean: try: + logging.info("Removing directory %s before generating", directory) rmtree(directory) except FileNotFoundError: pass version_dirs = create_versions_dirs(versions, directory) - generate_docker_directories(version_dirs, args.use_master_docker, args.build_images) + generate_docker_directories( + version_dirs, args.use_master_docker, args.build_images, args.image_type + ) def main() -> None: - logging.basicConfig(level=logging.INFO) args = parse_args() + log_levels = [logging.CRITICAL, logging.WARN, logging.INFO, logging.DEBUG] + logging.basicConfig(level=log_levels[min(args.verbose, 3)]) + logging.debug("Arguments are %s", pformat(args.__dict__)) if args.command == "generate-tree": generate_tree(args) From 5ef2104ae1d5e3764769f6cdc7af08e10a0c815c Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Mon, 4 Dec 2023 12:05:54 +0100 Subject: [PATCH 155/171] Add options to fetch tags and commit changes --- tests/ci/official_docker.py | 65 +++++++++++++++++++++++++++++++++++-- 1 file changed, 62 insertions(+), 3 deletions(-) diff --git a/tests/ci/official_docker.py b/tests/ci/official_docker.py index 1529f69fa8c..48e4c3c1808 100644 --- a/tests/ci/official_docker.py +++ b/tests/ci/official_docker.py @@ -8,9 +8,13 @@ name will be `library/clickhouse`""" import argparse import logging +from dataclasses import dataclass +from os import getpid from pathlib import Path from pprint import pformat +from shlex import quote from shutil import rmtree +from textwrap import fill from typing import Dict, Iterable, Set from git_helper import git_runner @@ -53,8 +57,14 @@ def parse_args() -> argparse.Namespace: default="server", help="which image type to process", ) + parser.add_argument( + "--commit", + action="store_true", + help="if set, the directory `docker/official` will be staged and committed " + "after generating", + ) subparsers = parser.add_subparsers( - title="commands", dest="command", help="the command to run" + title="commands", dest="command", required=True, help="the command to run" ) parser_tree = subparsers.add_parser( "generate-tree", help="generates directory `docker/official`" @@ -69,8 +79,8 @@ def parse_args() -> argparse.Namespace: "--use-master-docker", action="store_true", help="by default, the `docker/server` from each branch is used to generate the " - "directory; if this flag is set, then the `docker/server` from the `master` " - "branch is used", + "directory; if this flag is set, then the `docker/server` from the " + "`origin/master` branch is used", ) parser_tree.add_argument( "--build-images", @@ -86,6 +96,11 @@ def parse_args() -> argparse.Namespace: help="if set, the directory `docker/official` won't be cleaned " "before generating", ) + parser_tree.add_argument( + "--fetch-tags", + action="store_true", + help="if set, the tags will be updated before run", + ) args = parser.parse_args() return args @@ -156,7 +171,36 @@ def generate_docker_directories( ) +def path_is_changed(path: Path) -> bool: + logging.info("Checking if the path %s is changed", path) + return bool(git_runner(f"git status --porcelain -- '{path}'")) + + +def get_cmdline(width: int = 80) -> str: + cmdline = " ".join( + quote(arg) + for arg in Path(f"/proc/{getpid()}/cmdline") + .read_text(encoding="utf-8") + .split("\x00")[:-1] + ) + if width <= 2: + return cmdline + + return " \\\n".join( + fill( + cmdline, + break_long_words=False, + break_on_hyphens=False, + subsequent_indent=r" ", + width=width - 2, + ).split("\n") + ) + + def generate_tree(args: argparse.Namespace) -> None: + if args.fetch_tags: + # Fetch all tags to not miss the latest versions + git_runner("git fetch --tags --no-recurse-submodules") if args.min_version: versions = get_versions_greater(args.min_version) else: @@ -176,6 +220,21 @@ def generate_tree(args: argparse.Namespace) -> None: generate_docker_directories( version_dirs, args.use_master_docker, args.build_images, args.image_type ) + if args.commit and path_is_changed(directory): + logging.info("Staging and committing content of %s", directory) + git_runner(f"git add {directory}") + commit_message = "\n".join( + ( + "Re-/Generated tags for official docker library", + "", + "The changed versions:", + *(f" {v.string}" for v in sorted(versions)), + f"The old images were removed: {args.clean}", + "The directory is generated and committed as following:", + f"{get_cmdline()}", + ) + ) + git_runner("git commit -F -", input=commit_message) def main() -> None: From 81a3ca147a8454cee98f13e13bb0a2b5152eb5d7 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Mon, 4 Dec 2023 12:08:40 +0100 Subject: [PATCH 156/171] Add command to generate docker LDF --- tests/ci/official_docker.py | 123 +++++++++++++++++++++++++++++++++++- 1 file changed, 121 insertions(+), 2 deletions(-) diff --git a/tests/ci/official_docker.py b/tests/ci/official_docker.py index 48e4c3c1808..f4dc707d0ad 100644 --- a/tests/ci/official_docker.py +++ b/tests/ci/official_docker.py @@ -15,9 +15,10 @@ from pprint import pformat from shlex import quote from shutil import rmtree from textwrap import fill -from typing import Dict, Iterable, Set +from typing import Dict, Iterable, List, Optional, Set -from git_helper import git_runner +from env_helper import GITHUB_REPOSITORY +from git_helper import Git, git_runner from version_helper import ( ClickHouseVersion, get_supported_versions, @@ -25,6 +26,14 @@ from version_helper import ( get_version_from_string, ) +UBUNTU_NAMES = { + "20.04": "focal", + "22.04": "jammy", +} + + +DOCKER_LIBRARY_NAME = {"server": "clickhouse"} + def _rel_path(arg: str) -> Path: path = Path(arg) @@ -101,6 +110,9 @@ def parse_args() -> argparse.Namespace: action="store_true", help="if set, the tags will be updated before run", ) + subparsers.add_parser( + "generate-ldf", help="generate docker library definition file" + ) args = parser.parse_args() return args @@ -237,6 +249,111 @@ def generate_tree(args: argparse.Namespace) -> None: git_runner("git commit -F -", input=commit_message) +@dataclass +class TagAttrs: + """the metadata to preserve between generating tags for different versions""" + + # Only one latest can exist + latest: ClickHouseVersion + # Only one can be a major one (the most fresh per a year) + majors: Dict[int, ClickHouseVersion] + # Only one lts version can exist + lts: Optional[ClickHouseVersion] + + +def ldf_header(git: Git, directory: Path) -> List[str]: + script_path = Path(__file__).relative_to(git.root) + script_sha = git_runner(f"git log -1 --format=format:%H -- {script_path}") + repo = f"https://github.com/{GITHUB_REPOSITORY}" + fetch_commit = git_runner(f"git log -1 --format=format:%H -- {directory}") + return [ + f"# The file is generated by {repo}/blob/{script_sha}/{script_path}", + "", + "Maintainers: Misha f. Shiryaev (@Felixoid),", + " Max Kainov (@mkaynov),", + " Alexander Sapin (@alesapin)", + f"GitRepo: {repo}.git", + f"GitFetch: refs/heads/{git.branch}", + f"GitCommit: {fetch_commit}", + ] + + +def ldf_tags(version: ClickHouseVersion, distro: str, tag_attrs: TagAttrs) -> str: + """returns the string 'Tags: coma, separated, tags'""" + tags = [] + without_distro = distro in UBUNTU_NAMES.values() + with_lts = tag_attrs.lts in (None, version) and version.is_lts + if version == tag_attrs.latest: + if without_distro: + tags.append("latest") + tags.append(distro) + + if with_lts: + tag_attrs.lts = version + if without_distro: + tags.append("lts") + tags.append(f"{distro}-lts") + + with_major = tag_attrs.majors.get(version.major) in (None, version) + if with_major: + tag_attrs.majors[version.major] = version + if without_distro: + tags.append(f"{version.major}") + tags.append(f"{version.major}-{distro}") + + for tag in ( + f"{version.major}.{version.minor}", + f"{version.major}.{version.minor}.{version.patch}", + f"{version.major}.{version.minor}.{version.patch}.{version.tweak}", + ): + if without_distro: + tags.append(tag) + tags.append(f"{tag}-{distro}") + + return f"Tags: {', '.join(tags)}" + + +def generate_ldf(args: argparse.Namespace) -> None: + "collect all Dockerfile.*" + directory = Path(git_runner.cwd) / args.directory / args.image_type + versions = sorted([get_version_from_string(d.name) for d in directory.iterdir()]) + assert versions + assert not path_is_changed(directory) + git = Git(True) + lines = ldf_header(git, directory) + tag_attrs = TagAttrs(versions[-1], {}, None) + for version in reversed(versions): + tag_dir = directory / str(version) + for file in tag_dir.glob("Dockerfile.*"): + lines.append("") + distro = file.suffix[1:] + if distro == "ubuntu": + with open(file, "r", encoding="utf-8") as fd: + for l in fd: + if l.startswith("FROM ubuntu:"): + ubuntu_version = l.split(":")[-1].strip() + distro = UBUNTU_NAMES[ubuntu_version] + break + lines.append(ldf_tags(version, distro, tag_attrs)) + lines.append("Architectures: amd64, arm64v8") + lines.append(f"Directory: {tag_dir.relative_to(git.root)}") + lines.append(f"File: {file.name}") + + lines.append("") + ldf_file = ( + Path(git_runner.cwd) / args.directory / DOCKER_LIBRARY_NAME[args.image_type] + ) + ldf_file.write_text("\n".join(lines)) + if args.commit and path_is_changed(ldf_file): + git_runner(f"git add {ldf_file}") + commit_message = ( + f"Re-/Generated docker LDF for {args.image_type} image\n\n" + f"The file is generated and committed as following:\n{get_cmdline()}" + ) + git_runner("git commit -F -", input=commit_message) + print("\n".join(lines)) + + def main() -> None: args = parse_args() log_levels = [logging.CRITICAL, logging.WARN, logging.INFO, logging.DEBUG] @@ -244,6 +361,8 @@ def main() -> None: logging.debug("Arguments are %s", pformat(args.__dict__)) if args.command == "generate-tree": generate_tree(args) + elif args.command == "generate-ldf": + generate_ldf(args) if __name__ == "__main__": From 61030eb43c0bd33c563fd42f0a82fac359e4d4b7 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 6 Dec 2023 15:52:21 +0100 Subject: [PATCH 157/171] Fix the help for subcommands --- tests/ci/official_docker.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/tests/ci/official_docker.py b/tests/ci/official_docker.py index f4dc707d0ad..9c703339e77 100644 --- a/tests/ci/official_docker.py +++ b/tests/ci/official_docker.py @@ -47,26 +47,29 @@ def parse_args() -> argparse.Namespace: formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="The script to handle tasks for docker-library/official-images", ) - parser.add_argument( + global_args = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False + ) + global_args.add_argument( "-v", "--verbose", action="count", default=0, help="set the script verbosity, could be used multiple", ) - parser.add_argument( + global_args.add_argument( "--directory", type=_rel_path, default=_rel_path("docker/official"), help="a relative to the reporitory root directory", ) - parser.add_argument( + global_args.add_argument( "--image-type", choices=["server", "keeper"], default="server", help="which image type to process", ) - parser.add_argument( + global_args.add_argument( "--commit", action="store_true", help="if set, the directory `docker/official` will be staged and committed " @@ -76,7 +79,9 @@ def parse_args() -> argparse.Namespace: title="commands", dest="command", required=True, help="the command to run" ) parser_tree = subparsers.add_parser( - "generate-tree", help="generates directory `docker/official`" + "generate-tree", + help="generates directory `docker/official`", + parents=[global_args], ) parser_tree.add_argument( "--min-version", @@ -111,7 +116,9 @@ def parse_args() -> argparse.Namespace: help="if set, the tags will be updated before run", ) subparsers.add_parser( - "generate-ldf", help="generate docker library definition file" + "generate-ldf", + help="generate docker library definition file", + parents=[global_args], ) args = parser.parse_args() return args From 27b544e47e5a86b4e517f9de99101df51577437f Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 6 Dec 2023 16:57:00 +0100 Subject: [PATCH 158/171] Improve branch for docker usability --- tests/ci/official_docker.py | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/tests/ci/official_docker.py b/tests/ci/official_docker.py index 9c703339e77..8cae0921760 100644 --- a/tests/ci/official_docker.py +++ b/tests/ci/official_docker.py @@ -47,9 +47,7 @@ def parse_args() -> argparse.Namespace: formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="The script to handle tasks for docker-library/official-images", ) - global_args = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False - ) + global_args = argparse.ArgumentParser(add_help=False) global_args.add_argument( "-v", "--verbose", @@ -80,6 +78,7 @@ def parse_args() -> argparse.Namespace: ) parser_tree = subparsers.add_parser( "generate-tree", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, help="generates directory `docker/official`", parents=[global_args], ) @@ -89,12 +88,18 @@ def parse_args() -> argparse.Namespace: default=None, help="if not set, only currently supported versions will be used", ) + docker_branch = "origin/master" parser_tree.add_argument( - "--use-master-docker", + "--use-docker-from-branch", action="store_true", help="by default, the `docker/server` from each branch is used to generate the " "directory; if this flag is set, then the `docker/server` from the " - "`origin/master` branch is used", + "`--docker-branch` value is used", + ) + parser_tree.add_argument( + "--docker-branch", + default=docker_branch, + help="the branch to get the content of `docker/server` directory", ) parser_tree.add_argument( "--build-images", @@ -117,6 +122,7 @@ def parse_args() -> argparse.Namespace: ) subparsers.add_parser( "generate-ldf", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, help="generate docker library definition file", parents=[global_args], ) @@ -153,13 +159,14 @@ def create_versions_dirs( def generate_docker_directories( version_dirs: Dict[ClickHouseVersion, Path], - use_master_docker: bool, + use_docker_from_branch: bool, + docker_branch: str, build_images: bool = False, image_type: str = "server", ) -> None: arg_version = "ARG VERSION=" for version, directory in version_dirs.items(): - branch = "origin/master" if use_master_docker else version.describe + branch = docker_branch if use_docker_from_branch else version.describe logging.debug( "Checkout directory content from '%s:docker/%s' to %s", branch, @@ -237,7 +244,11 @@ def generate_tree(args: argparse.Namespace) -> None: pass version_dirs = create_versions_dirs(versions, directory) generate_docker_directories( - version_dirs, args.use_master_docker, args.build_images, args.image_type + version_dirs, + args.use_docker_from_branch, + args.docker_branch, + args.build_images, + args.image_type, ) if args.commit and path_is_changed(directory): logging.info("Staging and committing content of %s", directory) From 259baa1f498004657c6da0fa6fb0a9a18ec659b0 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Fri, 8 Dec 2023 12:07:13 +0100 Subject: [PATCH 159/171] Add argument to skip checking the changed docker/official tree --- tests/ci/official_docker.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/tests/ci/official_docker.py b/tests/ci/official_docker.py index 8cae0921760..493eca3ca3c 100644 --- a/tests/ci/official_docker.py +++ b/tests/ci/official_docker.py @@ -120,12 +120,21 @@ def parse_args() -> argparse.Namespace: action="store_true", help="if set, the tags will be updated before run", ) - subparsers.add_parser( + parser_ldf = subparsers.add_parser( "generate-ldf", formatter_class=argparse.ArgumentDefaultsHelpFormatter, help="generate docker library definition file", parents=[global_args], ) + parser_ldf.add_argument("--check-changed", default=True, help=argparse.SUPPRESS) + parser_ldf.add_argument( + "--no-check-changed", + dest="check_changed", + action="store_false", + default=argparse.SUPPRESS, + help="if set, the directory `docker/official` won't be checked to be " + "uncommitted", + ) args = parser.parse_args() return args @@ -336,7 +345,8 @@ def generate_ldf(args: argparse.Namespace) -> None: directory = Path(git_runner.cwd) / args.directory / args.image_type versions = sorted([get_version_from_string(d.name) for d in directory.iterdir()]) assert versions - assert not path_is_changed(directory) + if args.check_changed: + assert not path_is_changed(directory) git = Git(True) lines = ldf_header(git, directory) tag_attrs = TagAttrs(versions[-1], {}, None) From 7b65bebe2d7fc0dec27c6845e9167208d1efa282 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Mon, 6 May 2024 16:06:07 +0200 Subject: [PATCH 160/171] Filter out the CI-related stuff from official docker --- tests/ci/official_docker.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/tests/ci/official_docker.py b/tests/ci/official_docker.py index 493eca3ca3c..6b9108ef871 100644 --- a/tests/ci/official_docker.py +++ b/tests/ci/official_docker.py @@ -174,6 +174,8 @@ def generate_docker_directories( image_type: str = "server", ) -> None: arg_version = "ARG VERSION=" + start_filter = "#docker-official-library:off" + stop_filter = "#docker-official-library:on" for version, directory in version_dirs.items(): branch = docker_branch if use_docker_from_branch else version.describe logging.debug( @@ -187,17 +189,31 @@ def generate_docker_directories( ) (directory / "README.md").unlink(missing_ok=True) for df in directory.glob("Dockerfile.*"): - content = df.read_text().splitlines() - for idx, line in enumerate(content): + original_content = df.read_text().splitlines() + content = [] + filtering = False + for line in original_content: + # Change the ARG VERSION= to a proper version if line.startswith(arg_version): logging.debug( "Found '%s' in line %s:%s, setting to version %s", arg_version, df.name, - idx, + original_content.index(line), version, ) - content[idx] = f'{arg_version}"{version}"' + content.append(f'{arg_version}"{version}"') + continue + # Filter out CI-related part from official docker + if line == start_filter: + filtering = True + continue + if line == stop_filter: + filtering = False + continue + if not filtering: + content.append(line) + df.write_text("\n".join(content) + "\n") if build_images: git_runner( From c05c72cb8074b825619c13f5a15ed45d66f7c62a Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Mon, 6 May 2024 17:06:49 +0200 Subject: [PATCH 161/171] Set the `Dockerfile.*` as an argument --- tests/ci/official_docker.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/ci/official_docker.py b/tests/ci/official_docker.py index 6b9108ef871..e45d9d941dd 100644 --- a/tests/ci/official_docker.py +++ b/tests/ci/official_docker.py @@ -73,6 +73,12 @@ def parse_args() -> argparse.Namespace: help="if set, the directory `docker/official` will be staged and committed " "after generating", ) + dockerfile_glob = "Dockerfile.*" + global_args.add_argument( + "--dockerfile-glob", + default=dockerfile_glob, + help="a glob to collect Dockerfiles in the server of keeper directory", + ) subparsers = parser.add_subparsers( title="commands", dest="command", required=True, help="the command to run" ) @@ -88,23 +94,17 @@ def parse_args() -> argparse.Namespace: default=None, help="if not set, only currently supported versions will be used", ) - docker_branch = "origin/master" - parser_tree.add_argument( - "--use-docker-from-branch", - action="store_true", - help="by default, the `docker/server` from each branch is used to generate the " - "directory; if this flag is set, then the `docker/server` from the " - "`--docker-branch` value is used", - ) parser_tree.add_argument( "--docker-branch", - default=docker_branch, - help="the branch to get the content of `docker/server` directory", + default="", + help="if set, the branch to get the content of `docker/server` directory. When " + "unset, the content of directories is taken from release tags", ) parser_tree.add_argument( "--build-images", action="store_true", - help="if set, the image will be built for each Dockerfile.* in the result dirs", + help="if set, the image will be built for each Dockerfile '--dockerfile-glob' " + "in the result dirs", ) parser_tree.add_argument("--clean", default=True, help=argparse.SUPPRESS) parser_tree.add_argument( @@ -168,8 +168,8 @@ def create_versions_dirs( def generate_docker_directories( version_dirs: Dict[ClickHouseVersion, Path], - use_docker_from_branch: bool, docker_branch: str, + dockerfile_glob: str, build_images: bool = False, image_type: str = "server", ) -> None: @@ -177,7 +177,7 @@ def generate_docker_directories( start_filter = "#docker-official-library:off" stop_filter = "#docker-official-library:on" for version, directory in version_dirs.items(): - branch = docker_branch if use_docker_from_branch else version.describe + branch = docker_branch or version.describe logging.debug( "Checkout directory content from '%s:docker/%s' to %s", branch, @@ -188,7 +188,7 @@ def generate_docker_directories( f"git archive {branch}:docker/{image_type} | " f"tar x -C {directory}" ) (directory / "README.md").unlink(missing_ok=True) - for df in directory.glob("Dockerfile.*"): + for df in directory.glob(dockerfile_glob): original_content = df.read_text().splitlines() content = [] filtering = False @@ -270,8 +270,8 @@ def generate_tree(args: argparse.Namespace) -> None: version_dirs = create_versions_dirs(versions, directory) generate_docker_directories( version_dirs, - args.use_docker_from_branch, args.docker_branch, + args.dockerfile_glob, args.build_images, args.image_type, ) @@ -368,7 +368,7 @@ def generate_ldf(args: argparse.Namespace) -> None: tag_attrs = TagAttrs(versions[-1], {}, None) for version in reversed(versions): tag_dir = directory / str(version) - for file in tag_dir.glob("Dockerfile.*"): + for file in tag_dir.glob(args.dockerfile_glob): lines.append("") distro = file.suffix[1:] if distro == "ubuntu": From dcd0d1acac3f48244c2c7d0cad82bb5a78a93d69 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Thu, 7 Nov 2024 14:15:54 +0100 Subject: [PATCH 162/171] Support a dedicated repository for DOL sources --- tests/ci/official_docker.py | 39 +++++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/tests/ci/official_docker.py b/tests/ci/official_docker.py index e45d9d941dd..5bdd317cd1e 100644 --- a/tests/ci/official_docker.py +++ b/tests/ci/official_docker.py @@ -17,10 +17,11 @@ from shutil import rmtree from textwrap import fill from typing import Dict, Iterable, List, Optional, Set -from env_helper import GITHUB_REPOSITORY -from git_helper import Git, git_runner +from env_helper import GITHUB_REPOSITORY, IS_CI +from git_helper import GIT_PREFIX, Git, git_runner from version_helper import ( ClickHouseVersion, + VersionType, get_supported_versions, get_tagged_versions, get_version_from_string, @@ -31,6 +32,10 @@ UBUNTU_NAMES = { "22.04": "jammy", } +if not IS_CI: + GIT_PREFIX = "git" + +DOCKER_LIBRARY_REPOSITORY = "ClickHouse/docker-library" DOCKER_LIBRARY_NAME = {"server": "clickhouse"} @@ -224,7 +229,8 @@ def generate_docker_directories( def path_is_changed(path: Path) -> bool: logging.info("Checking if the path %s is changed", path) - return bool(git_runner(f"git status --porcelain -- '{path}'")) + path_dir = path.parent + return bool(git_runner(f"git -C {path_dir} status --porcelain -- '{path}'")) def get_cmdline(width: int = 80) -> str: @@ -251,7 +257,7 @@ def get_cmdline(width: int = 80) -> str: def generate_tree(args: argparse.Namespace) -> None: if args.fetch_tags: # Fetch all tags to not miss the latest versions - git_runner("git fetch --tags --no-recurse-submodules") + git_runner(f"{GIT_PREFIX} fetch --tags --no-recurse-submodules") if args.min_version: versions = get_versions_greater(args.min_version) else: @@ -277,7 +283,7 @@ def generate_tree(args: argparse.Namespace) -> None: ) if args.commit and path_is_changed(directory): logging.info("Staging and committing content of %s", directory) - git_runner(f"git add {directory}") + git_runner(f"git -C {directory} add {directory}") commit_message = "\n".join( ( "Re-/Generated tags for official docker library", @@ -289,7 +295,7 @@ def generate_tree(args: argparse.Namespace) -> None: f"{get_cmdline()}", ) ) - git_runner("git commit -F -", input=commit_message) + git_runner(f"{GIT_PREFIX} -C {directory} commit -F -", input=commit_message) @dataclass @@ -308,15 +314,19 @@ def ldf_header(git: Git, directory: Path) -> List[str]: script_path = Path(__file__).relative_to(git.root) script_sha = git_runner(f"git log -1 --format=format:%H -- {script_path}") repo = f"https://github.com/{GITHUB_REPOSITORY}" - fetch_commit = git_runner(f"git log -1 --format=format:%H -- {directory}") + dl_repo = f"https://github.com/{DOCKER_LIBRARY_REPOSITORY}" + fetch_commit = git_runner( + f"git -C {directory} log -1 --format=format:%H -- {directory}" + ) + dl_branch = git_runner(f"git -C {directory} branch --show-current") return [ f"# The file is generated by {repo}/blob/{script_sha}/{script_path}", "", "Maintainers: Misha f. Shiryaev (@Felixoid),", " Max Kainov (@mkaynov),", " Alexander Sapin (@alesapin)", - f"GitRepo: {repo}.git", - f"GitFetch: refs/heads/{git.branch}", + f"GitRepo: {dl_repo}.git", + f"GitFetch: refs/heads/{dl_branch}", f"GitCommit: {fetch_commit}", ] @@ -364,6 +374,10 @@ def generate_ldf(args: argparse.Namespace) -> None: if args.check_changed: assert not path_is_changed(directory) git = Git(True) + # Support a few repositories + dir_git_root = ( + args.directory / git_runner(f"git -C {args.directory} rev-parse --show-cdup") + ).absolute() lines = ldf_header(git, directory) tag_attrs = TagAttrs(versions[-1], {}, None) for version in reversed(versions): @@ -380,7 +394,7 @@ def generate_ldf(args: argparse.Namespace) -> None: break lines.append(ldf_tags(version, distro, tag_attrs)) lines.append("Architectures: amd64, arm64v8") - lines.append(f"Directory: {tag_dir.relative_to(git.root)}") + lines.append(f"Directory: {tag_dir.relative_to(dir_git_root)}") lines.append(f"File: {file.name}") lines.append("") @@ -389,12 +403,13 @@ def generate_ldf(args: argparse.Namespace) -> None: ) ldf_file.write_text("\n".join(lines)) if args.commit and path_is_changed(ldf_file): - git_runner(f"git add {ldf_file}") + ldf_dir = ldf_file.parent + git_runner(f"git -C {ldf_dir} add {ldf_file}") commit_message = ( f"Re-/Generated docker LDF for {args.image_type} image\n\n" f"The file is generated and committed as following:\n{get_cmdline()}" ) - git_runner("git commit -F -", input=commit_message) + git_runner(f"{GIT_PREFIX} -C {ldf_dir} commit -F -", input=commit_message) print("\n".join(lines)) From 20717d1bbe6a234cf823a8f014f8348efcb789bd Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Thu, 7 Nov 2024 14:34:31 +0100 Subject: [PATCH 163/171] ClickHouseVersion.is_supported property, create images for them exclusively --- tests/ci/official_docker.py | 3 +-- tests/ci/version_helper.py | 7 ++++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/ci/official_docker.py b/tests/ci/official_docker.py index 5bdd317cd1e..ad1f6a6d6d2 100644 --- a/tests/ci/official_docker.py +++ b/tests/ci/official_docker.py @@ -21,7 +21,6 @@ from env_helper import GITHUB_REPOSITORY, IS_CI from git_helper import GIT_PREFIX, Git, git_runner from version_helper import ( ClickHouseVersion, - VersionType, get_supported_versions, get_tagged_versions, get_version_from_string, @@ -148,7 +147,7 @@ def get_versions_greater(minimal: ClickHouseVersion) -> Set[ClickHouseVersion]: supported = {} # type: Dict[str, ClickHouseVersion] versions = get_tagged_versions() for v in versions: - if v < minimal: + if v < minimal or not v.is_supported: continue txt = f"{v.major}.{v.minor}" if txt in supported: diff --git a/tests/ci/version_helper.py b/tests/ci/version_helper.py index bf48f3f53dd..0fce2f920d1 100755 --- a/tests/ci/version_helper.py +++ b/tests/ci/version_helper.py @@ -164,6 +164,11 @@ class ClickHouseVersion: """our X.3 and X.8 are LTS""" return self.minor % 5 == 3 + @property + def is_supported(self) -> bool: + "we can support only versions with VersionType STABLE or LTS" + return self.description in (VersionType.STABLE, VersionType.LTS) + def get_stable_release_type(self) -> str: if self.is_lts: return VersionType.LTS @@ -365,7 +370,7 @@ def get_supported_versions( versions = list(versions) else: # checks that repo is not shallow in background - versions = get_tagged_versions() + versions = [v for v in get_tagged_versions() if v.is_supported] versions.sort() versions.reverse() for version in versions: From 687564bee830e4a385285a8f50a9be499e0af443 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Fri, 8 Nov 2024 11:23:05 +0100 Subject: [PATCH 164/171] Ignore README and its sources on unpacking --- tests/ci/official_docker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ci/official_docker.py b/tests/ci/official_docker.py index ad1f6a6d6d2..beb22870ab3 100644 --- a/tests/ci/official_docker.py +++ b/tests/ci/official_docker.py @@ -189,9 +189,9 @@ def generate_docker_directories( directory, ) git_runner( - f"git archive {branch}:docker/{image_type} | " f"tar x -C {directory}" + f"git archive {branch}:docker/{image_type} | " + f"tar --exclude='README*' -x -C {directory}" ) - (directory / "README.md").unlink(missing_ok=True) for df in directory.glob(dockerfile_glob): original_content = df.read_text().splitlines() content = [] From e698bbeb751789d25a82d00b1e859223276e801b Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Sun, 10 Nov 2024 20:26:31 +0100 Subject: [PATCH 165/171] Improve script usability --- tests/ci/official_docker.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/ci/official_docker.py b/tests/ci/official_docker.py index beb22870ab3..ec85ca2ce33 100644 --- a/tests/ci/official_docker.py +++ b/tests/ci/official_docker.py @@ -401,7 +401,9 @@ def generate_ldf(args: argparse.Namespace) -> None: Path(git_runner.cwd) / args.directory / DOCKER_LIBRARY_NAME[args.image_type] ) ldf_file.write_text("\n".join(lines)) + logging.info("The content of LDF file:\n%s", "\n".join(lines)) if args.commit and path_is_changed(ldf_file): + logging.info("Starting committing or %s file", ldf_file) ldf_dir = ldf_file.parent git_runner(f"git -C {ldf_dir} add {ldf_file}") commit_message = ( @@ -409,7 +411,6 @@ def generate_ldf(args: argparse.Namespace) -> None: f"The file is generated and committed as following:\n{get_cmdline()}" ) git_runner(f"{GIT_PREFIX} -C {ldf_dir} commit -F -", input=commit_message) - print("\n".join(lines)) def main() -> None: From e1b84f1f01fb9458ec7ac60c9883286fbca3af2e Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Tue, 12 Nov 2024 16:46:45 +0100 Subject: [PATCH 166/171] Add more comments and function-docs --- tests/ci/official_docker.py | 52 ++++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 21 deletions(-) diff --git a/tests/ci/official_docker.py b/tests/ci/official_docker.py index ec85ca2ce33..f62002ee1d2 100644 --- a/tests/ci/official_docker.py +++ b/tests/ci/official_docker.py @@ -39,13 +39,6 @@ DOCKER_LIBRARY_REPOSITORY = "ClickHouse/docker-library" DOCKER_LIBRARY_NAME = {"server": "clickhouse"} -def _rel_path(arg: str) -> Path: - path = Path(arg) - if path.is_absolute(): - raise argparse.ArgumentError(None, message=f"path '{path}' must be relative") - return path - - def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, @@ -61,8 +54,8 @@ def parse_args() -> argparse.Namespace: ) global_args.add_argument( "--directory", - type=_rel_path, - default=_rel_path("docker/official"), + type=Path, + default=Path("docker/official"), help="a relative to the reporitory root directory", ) global_args.add_argument( @@ -144,6 +137,7 @@ def parse_args() -> argparse.Namespace: def get_versions_greater(minimal: ClickHouseVersion) -> Set[ClickHouseVersion]: + "Get the latest patch version for each major.minor >= minimal" supported = {} # type: Dict[str, ClickHouseVersion] versions = get_tagged_versions() for v in versions: @@ -182,15 +176,15 @@ def generate_docker_directories( stop_filter = "#docker-official-library:on" for version, directory in version_dirs.items(): branch = docker_branch or version.describe + docker_source = f"{branch}:docker/{image_type}" logging.debug( - "Checkout directory content from '%s:docker/%s' to %s", - branch, - image_type, + "Unpack directory content from '%s' to %s", + docker_source, directory, ) + # We ignore README* files git_runner( - f"git archive {branch}:docker/{image_type} | " - f"tar --exclude='README*' -x -C {directory}" + f"git archive {docker_source} | tar --exclude='README*' -x -C {directory}" ) for df in directory.glob(dockerfile_glob): original_content = df.read_text().splitlines() @@ -227,12 +221,14 @@ def generate_docker_directories( def path_is_changed(path: Path) -> bool: + "checks if `path` has uncommitted changes" logging.info("Checking if the path %s is changed", path) path_dir = path.parent return bool(git_runner(f"git -C {path_dir} status --porcelain -- '{path}'")) def get_cmdline(width: int = 80) -> str: + "Returns the cmdline split by words with given maximum width" cmdline = " ".join( quote(arg) for arg in Path(f"/proc/{getpid()}/cmdline") @@ -299,7 +295,7 @@ def generate_tree(args: argparse.Namespace) -> None: @dataclass class TagAttrs: - """the metadata to preserve between generating tags for different versions""" + """A mutable metadata to preserve between generating tags for different versions""" # Only one latest can exist latest: ClickHouseVersion @@ -310,6 +306,7 @@ class TagAttrs: def ldf_header(git: Git, directory: Path) -> List[str]: + "Generates the header for LDF" script_path = Path(__file__).relative_to(git.root) script_sha = git_runner(f"git log -1 --format=format:%H -- {script_path}") repo = f"https://github.com/{GITHUB_REPOSITORY}" @@ -333,19 +330,22 @@ def ldf_header(git: Git, directory: Path) -> List[str]: def ldf_tags(version: ClickHouseVersion, distro: str, tag_attrs: TagAttrs) -> str: """returns the string 'Tags: coma, separated, tags'""" tags = [] + # without_distro shows that it's the default tags set, without `-jammy` suffix without_distro = distro in UBUNTU_NAMES.values() - with_lts = tag_attrs.lts in (None, version) and version.is_lts if version == tag_attrs.latest: if without_distro: tags.append("latest") tags.append(distro) + # The current version gets the `lts` tag when it's the first met version.is_lts + with_lts = tag_attrs.lts in (None, version) and version.is_lts if with_lts: tag_attrs.lts = version if without_distro: tags.append("lts") - tags.append(f"{distro}-lts") + tags.append(f"lts-{distro}") + # If the tag `22`, `23`, `24` etc. should be included in the tags with_major = tag_attrs.majors.get(version.major) in (None, version) if with_major: tag_attrs.majors[version.major] = version @@ -353,6 +353,7 @@ def ldf_tags(version: ClickHouseVersion, distro: str, tag_attrs: TagAttrs) -> st tags.append(f"{version.major}") tags.append(f"{version.major}-{distro}") + # Add all normal tags for tag in ( f"{version.major}.{version.minor}", f"{version.major}.{version.minor}.{version.patch}", @@ -366,25 +367,33 @@ def ldf_tags(version: ClickHouseVersion, distro: str, tag_attrs: TagAttrs) -> st def generate_ldf(args: argparse.Namespace) -> None: - "collect all Dockerfile.*" + """Collect all args.dockerfile_glob files from args.directory and generate the + Library Definition File, read about it in + https://github.com/docker-library/official-images/?tab=readme-ov-file#library-definition-files + """ directory = Path(git_runner.cwd) / args.directory / args.image_type versions = sorted([get_version_from_string(d.name) for d in directory.iterdir()]) - assert versions + assert versions, "There are no directories to generate the LDF" if args.check_changed: - assert not path_is_changed(directory) + assert not path_is_changed( + directory + ), f"There are uncommitted changes in {directory}" git = Git(True) - # Support a few repositories + # Support a few repositories, get the git-root for images directory dir_git_root = ( args.directory / git_runner(f"git -C {args.directory} rev-parse --show-cdup") ).absolute() lines = ldf_header(git, directory) tag_attrs = TagAttrs(versions[-1], {}, None) + + # We iterate from the most recent to the oldest version for version in reversed(versions): tag_dir = directory / str(version) for file in tag_dir.glob(args.dockerfile_glob): lines.append("") distro = file.suffix[1:] if distro == "ubuntu": + # replace 'ubuntu' by the release name from UBUNTU_NAMES with open(file, "r", encoding="utf-8") as fd: for l in fd: if l.startswith("FROM ubuntu:"): @@ -396,6 +405,7 @@ def generate_ldf(args: argparse.Namespace) -> None: lines.append(f"Directory: {tag_dir.relative_to(dir_git_root)}") lines.append(f"File: {file.name}") + # For the last '\n' in join lines.append("") ldf_file = ( Path(git_runner.cwd) / args.directory / DOCKER_LIBRARY_NAME[args.image_type] From 39aef112d51c819a0cfc3a01e45f346be4123cbe Mon Sep 17 00:00:00 2001 From: maxvostrikov Date: Tue, 12 Nov 2024 17:44:54 +0100 Subject: [PATCH 167/171] tests for parseDateTime64InJodaSyntax added more corner cases for tests for parseDateTime64InJodaSyntax functions --- ..._parse_datetime64_in_joda_syntax.reference | 30 ++++++++++++++ .../03252_parse_datetime64_in_joda_syntax.sql | 39 ++++++++++++++++++- 2 files changed, 67 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference index 063b76b152c..e4be64155d1 100644 --- a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference +++ b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.reference @@ -1,14 +1,44 @@ +parseDateTime64InJodaSyntax +2077-10-09 10:30:10.123 +1970-01-01 08:00:00 +1970-01-01 08:00:01 +2024-01-02 00:00:00 +1970-01-01 10:30:50 +2025-01-01 15:30:10.123456 +2024-01-01 00:00:01.123456 2024-10-09 10:30:10.123 2024-10-09 10:30:10.123456 2024-10-10 02:30:10.123456 +2024-11-05 17:02:03.123456 2024-10-10 01:30:10.123456 +2024-10-09 08:00:10.123456 +2024-09-10 10:30:10.123 +2024-09-10 10:30:10.000999999 +2024-10-10 03:15:10.123456 +2024-03-01 03:23:34 +2024-02-29 15:22:33 +2023-03-01 15:22:33 +2024-03-04 16:22:33 +2023-03-04 16:22:33 +parseDateTime64InJodaSyntaxOrZero 2024-10-09 10:30:10.123 2024-10-09 10:30:10.123456 1970-01-01 08:00:00.000000000 2024-10-10 02:30:10.123456 2024-10-10 01:30:10.123456 +2024-09-10 10:30:10.123 +1970-01-01 08:00:00.000 +1970-01-01 08:00:00 +1970-01-01 08:00:00 +1970-01-01 08:00:00.000 +parseDateTime64InJodaSyntaxOrNull 2024-10-09 10:30:10.123 2024-10-09 10:30:10.123456 \N 2024-10-10 02:30:10.123456 2024-10-10 01:30:10.123456 +2024-09-10 10:30:10.123 +\N +\N +\N +1970-01-01 00:00:00 diff --git a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql index 9ea854bc324..7e024288f1c 100644 --- a/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql +++ b/tests/queries/0_stateless/03252_parse_datetime64_in_joda_syntax.sql @@ -1,19 +1,54 @@ set session_timezone = 'Asia/Shanghai'; +select 'parseDateTime64InJodaSyntax'; +select parseDateTime64InJodaSyntax('', ''); -- { serverError VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE } +select parseDateTime64InJodaSyntax('2077-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); +select parseDateTime64InJodaSyntax('2177-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntax('+0000', 'Z'); +select parseDateTime64InJodaSyntax('08:01', 'HH:ss'); +select parseDateTime64InJodaSyntax('2024-01-02', 'yyyy-MM-dd'); +select parseDateTime64InJodaSyntax('10:30:50', 'HH:mm:ss'); +select parseDateTime64InJodaSyntax('2024-12-31 23:30:10.123456-0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); +select parseDateTime64InJodaSyntax('2024-01-01 00:00:01.123456+0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456789', 'yyyy-MM-dd HH:mm:ss.SSSSSSSSS'); -- { serverError CANNOT_PARSE_DATETIME } select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); +select parseDateTime64InJodaSyntax('2024-11-05-0800 01:02:03.123456', 'yyyy-MM-ddZ HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456America/Los_Angeles', 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); - +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456Australia/Adelaide', 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-dd-MM HH:mm:ss.SSS'); +select parseDateTime64InJodaSyntax('999999 10-09-202410:30:10', 'SSSSSSSSS dd-MM-yyyyHH:mm:ss'); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-0845', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); +select parseDateTime64InJodaSyntax('2023-02-29 11:22:33Not/Timezone', 'yyyy-MM-dd HH:mm:ssz'); -- { serverError BAD_ARGUMENTS } +--leap years and non-leap years +select parseDateTime64InJodaSyntax('2024-02-29 11:23:34America/Los_Angeles', 'yyyy-MM-dd HH:mm:ssz'); +select parseDateTime64InJodaSyntax('2023-02-29 11:22:33America/Los_Angeles', 'yyyy-MM-dd HH:mm:ssz'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntax('2024-02-28 23:22:33America/Los_Angeles', 'yyyy-MM-dd HH:mm:ssz'); +select parseDateTime64InJodaSyntax('2023-02-28 23:22:33America/Los_Angeles', 'yyyy-MM-dd HH:mm:ssz'); +select parseDateTime64InJodaSyntax('2024-03-01 00:22:33-8000', 'yyyy-MM-dd HH:mm:ssZ'); +select parseDateTime64InJodaSyntax('2023-03-01 00:22:33-8000', 'yyyy-MM-dd HH:mm:ssZ'); +select 'parseDateTime64InJodaSyntaxOrZero'; select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456789', 'yyyy-MM-dd HH:mm:ss.SSSSSSSSS'); select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456-0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456America/Los_Angeles', 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); - +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 'yyyy-dd-MM HH:mm:ss.SSS'); +select parseDateTime64InJodaSyntaxOrZero('wrong value', 'yyyy-dd-MM HH:mm:ss.SSS'); +select parseDateTime64InJodaSyntaxOrZero('2023-02-29 11:22:33America/Los_Angeles', 'yyyy-MM-dd HH:mm:ssz'); +select parseDateTime64InJodaSyntaxOrZero('', ''); +select parseDateTime64InJodaSyntaxOrZero('2177-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); +select 'parseDateTime64InJodaSyntaxOrNull'; select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456', 'yyyy-MM-dd HH:mm:ss.SSSSSS'); select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456789', 'yyyy-MM-dd HH:mm:ss.SSSSSSSSS'); select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456-0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456America/Los_Angeles', 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 'yyyy-dd-MM HH:mm:ss.SSS'); +select parseDateTime64InJodaSyntaxOrNull('2023-02-29 11:22:33America/Los_Angeles', 'yyyy-MM-dd HH:mm:ssz'); +select parseDateTime64InJodaSyntaxOrNull('', ''); +select parseDateTime64InJodaSyntaxOrNull('2177-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); + +set session_timezone = 'UTC'; +select parseDateTime64InJodaSyntax('', ''); From 31b4c6c3e96cabffabd2ebf87121e5836f5bcdd3 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 12 Nov 2024 18:22:02 +0000 Subject: [PATCH 168/171] Fix typo and cleanup. --- src/Processors/QueryPlan/ReadFromMergeTree.cpp | 2 +- src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp | 2 +- src/Storages/MergeTree/MergeTreeIOSettings.h | 2 +- src/Storages/MergeTree/MergeTreeSelectProcessor.cpp | 2 +- src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp | 1 - 5 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index d144187821a..6a9ca93fd85 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -207,7 +207,7 @@ static MergeTreeReaderSettings getMergeTreeReaderSettings( .use_asynchronous_read_from_pool = settings[Setting::allow_asynchronous_read_from_io_pool_for_merge_tree] && (settings[Setting::max_streams_to_max_threads_ratio] > 1 || settings[Setting::max_streams_for_merge_tree_reading] > 1), .enable_multiple_prewhere_read_steps = settings[Setting::enable_multiple_prewhere_read_steps], - .force_shirt_circuit_execution = settings[Setting::query_plan_merge_filters] + .force_short_circuit_execution = settings[Setting::query_plan_merge_filters] }; } diff --git a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp index 03a0aed80bf..4a7e02a7a51 100644 --- a/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp +++ b/src/Storages/MergeTree/MergeTreeBlockReadUtils.cpp @@ -330,7 +330,7 @@ MergeTreeReadTaskColumns getReadTaskColumns( auto prewhere_actions = MergeTreeSelectProcessor::getPrewhereActions( prewhere_info, actions_settings, - reader_settings.enable_multiple_prewhere_read_steps, reader_settings.force_shirt_circuit_execution); + reader_settings.enable_multiple_prewhere_read_steps, reader_settings.force_short_circuit_execution); for (const auto & step : prewhere_actions.steps) add_step(*step); diff --git a/src/Storages/MergeTree/MergeTreeIOSettings.h b/src/Storages/MergeTree/MergeTreeIOSettings.h index ecd4ad34961..7506c726bc4 100644 --- a/src/Storages/MergeTree/MergeTreeIOSettings.h +++ b/src/Storages/MergeTree/MergeTreeIOSettings.h @@ -46,7 +46,7 @@ struct MergeTreeReaderSettings /// If PREWHERE has multiple conditions combined with AND, execute them in separate read/filtering steps. bool enable_multiple_prewhere_read_steps = false; /// In case of multiple prewhere steps, execute filtering earlier to support short-circuit properly. - bool force_shirt_circuit_execution = false; + bool force_short_circuit_execution = false; /// If true, try to lower size of read buffer according to granule size and compressed block size. bool adjust_read_buffer_size = true; /// If true, it's allowed to read the whole part without reading marks. diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 8beff55e698..019808abe76 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -91,7 +91,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( , algorithm(std::move(algorithm_)) , prewhere_info(prewhere_info_) , actions_settings(actions_settings_) - , prewhere_actions(getPrewhereActions(prewhere_info, actions_settings, reader_settings_.enable_multiple_prewhere_read_steps, reader_settings_.force_shirt_circuit_execution)) + , prewhere_actions(getPrewhereActions(prewhere_info, actions_settings, reader_settings_.enable_multiple_prewhere_read_steps, reader_settings_.force_short_circuit_execution)) , reader_settings(reader_settings_) , result_header(transformHeader(pool->getHeader(), prewhere_info)) { diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 60b6eca1720..66bc0722800 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -315,7 +315,6 @@ bool tryBuildPrewhereSteps( /// 1. AND the last condition with constant True. This is needed to make sure that in the last step filter has UInt8 type /// but contains values other than 0 and 1 (e.g. if it is (number%5) it contains 2,3,4) /// 2. CAST the result to the exact type of the PREWHERE column from the original DAG - //const auto & last_step_result_node_info = node_remap[steps.back().column_name]; auto & last_step_dag = steps.back().actions; auto & last_step_result_node = steps.back().result_node; /// Build AND(last_step_result_node, true) From de4110097f43076d4f78253d2419353079632180 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 12 Nov 2024 19:49:37 +0000 Subject: [PATCH 169/171] Fix mox typos. --- src/Storages/MergeTree/MergeTreeSelectProcessor.cpp | 6 +++--- src/Storages/MergeTree/MergeTreeSelectProcessor.h | 2 +- .../MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 019808abe76..242f9c6504f 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -124,9 +124,9 @@ String MergeTreeSelectProcessor::getName() const return fmt::format("MergeTreeSelect(pool: {}, algorithm: {})", pool->getName(), algorithm->getName()); } -bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, PrewhereExprInfo & prewhere, bool force_shirt_circuit_execution); +bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, PrewhereExprInfo & prewhere, bool force_short_circuit_execution); -PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, bool enable_multiple_prewhere_read_steps, bool force_shirt_circuit_execution) +PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, bool enable_multiple_prewhere_read_steps, bool force_short_circuit_execution) { PrewhereExprInfo prewhere_actions; if (prewhere_info) @@ -147,7 +147,7 @@ PrewhereExprInfo MergeTreeSelectProcessor::getPrewhereActions(PrewhereInfoPtr pr } if (!enable_multiple_prewhere_read_steps || - !tryBuildPrewhereSteps(prewhere_info, actions_settings, prewhere_actions, force_shirt_circuit_execution)) + !tryBuildPrewhereSteps(prewhere_info, actions_settings, prewhere_actions, force_short_circuit_execution)) { PrewhereExprStep prewhere_step { diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.h b/src/Storages/MergeTree/MergeTreeSelectProcessor.h index afd88116e15..32a761cefb7 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.h +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.h @@ -74,7 +74,7 @@ public: PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, bool enable_multiple_prewhere_read_steps, - bool force_shirt_circuit_execution); + bool force_short_circuit_execution); void addPartLevelToChunk(bool add_part_level_) { add_part_level = add_part_level_; } diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 66bc0722800..1cc4006a285 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -209,7 +209,7 @@ bool tryBuildPrewhereSteps( PrewhereInfoPtr prewhere_info, const ExpressionActionsSettings & actions_settings, PrewhereExprInfo & prewhere, - bool force_shirt_circuit_execution) + bool force_short_circuit_execution) { if (!prewhere_info) return true; @@ -346,7 +346,7 @@ bool tryBuildPrewhereSteps( /// Don't remove if it's in the list of original outputs .remove_filter_column = step.original_node && !all_outputs.contains(step.original_node) && node_to_step[step.original_node] <= step_index, - .need_filter = force_shirt_circuit_execution, + .need_filter = force_short_circuit_execution, .perform_alter_conversions = true, }; From 61202499b599345cf38c687a36e60978d0f141a4 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 13 Nov 2024 02:10:40 +0100 Subject: [PATCH 170/171] Enable build profiling in pull requests --- tests/ci/ci.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index d6eeb6a3613..eb0fe7c85c3 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -798,10 +798,6 @@ def _upload_build_profile_data( logging.info("Unknown CI logs host, skip uploading build profile data") return - if not pr_info.number == 0: - logging.info("Skipping uploading build profile data for PRs") - return - instance_type = get_instance_type() instance_id = get_instance_id() auth = { From dbc6949ef78f4c7fdca83d3b1f5314b15f71c923 Mon Sep 17 00:00:00 2001 From: Tanya Bragin Date: Wed, 13 Nov 2024 08:09:10 -0800 Subject: [PATCH 171/171] Update README.md - Update meetups Move Barcelona to completed --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index abaf27abf11..3270cd19671 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,6 @@ Keep an eye out for upcoming meetups and events around the world. Somewhere else Upcoming meetups -* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12 * [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19 * [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21 * [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26 @@ -53,6 +52,7 @@ Upcoming meetups Recently completed meetups +* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12 * [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22 * [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3 * [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1