From 83b79fce832ef4bd3fa230ae8a66d23935724985 Mon Sep 17 00:00:00 2001 From: qhsong Date: Wed, 8 May 2024 16:18:13 +0800 Subject: [PATCH 01/74] Add repeatable uniq ID for processor and step --- src/Common/ThreadStatus.cpp | 10 +++++++ src/Common/ThreadStatus.h | 9 +++++++ src/Interpreters/Context.h | 2 ++ src/Interpreters/ProcessorsProfileLog.cpp | 4 +++ src/Interpreters/ProcessorsProfileLog.h | 4 ++- src/Interpreters/executeQuery.cpp | 2 ++ .../Executors/ExecutionThreadContext.cpp | 2 +- src/Processors/IProcessor.h | 26 ++++++++++++++++--- src/Processors/QueryPlan/IQueryPlanStep.h | 12 ++++++++- src/Processors/QueryPlan/QueryPlan.cpp | 1 + src/QueryPipeline/QueryPipelineBuilder.cpp | 4 +-- src/QueryPipeline/printPipeline.cpp | 4 +-- src/QueryPipeline/printPipeline.h | 2 +- .../01786_explain_merge_tree.reference | 2 ++ 14 files changed, 73 insertions(+), 11 deletions(-) diff --git a/src/Common/ThreadStatus.cpp b/src/Common/ThreadStatus.cpp index ad96018a17e..74b5475da77 100644 --- a/src/Common/ThreadStatus.cpp +++ b/src/Common/ThreadStatus.cpp @@ -221,6 +221,16 @@ bool ThreadStatus::isQueryCanceled() const return false; } +size_t ThreadStatus::incrStepIndex() +{ + return ++(*local_data.step_count); +} + +size_t ThreadStatus::incrProcessorIndex() +{ + return ++(*local_data.processor_count); +} + ThreadStatus::~ThreadStatus() { flushUntrackedMemory(); diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index 0c02ab8fdb0..97b45c01e54 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -10,6 +10,7 @@ #include +#include #include #include #include @@ -90,6 +91,11 @@ public: String query_for_logs; UInt64 normalized_query_hash = 0; + //QueryPlan can not build parallel, but processor may build parallel in expand() function. + //so we use atomic_size_t for processor_count + std::shared_ptr step_count = std::make_shared(0); + std::shared_ptr processor_count = std::make_shared(0); + QueryIsCanceledPredicate query_is_canceled_predicate = {}; }; @@ -309,6 +315,9 @@ public: void initGlobalProfiler(UInt64 global_profiler_real_time_period, UInt64 global_profiler_cpu_time_period); + size_t incrStepIndex(); + size_t incrProcessorIndex(); + private: void applyGlobalSettings(); void applyQuerySettings(); diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index d1ff5b4c2b2..692d71a3384 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -1336,7 +1336,9 @@ private: std::shared_ptr getClustersImpl(std::lock_guard & lock) const; /// Throttling + public: + ThrottlerPtr getReplicatedFetchesThrottler() const; ThrottlerPtr getReplicatedSendsThrottler() const; diff --git a/src/Interpreters/ProcessorsProfileLog.cpp b/src/Interpreters/ProcessorsProfileLog.cpp index 7dec2a3163a..8e1cf278c63 100644 --- a/src/Interpreters/ProcessorsProfileLog.cpp +++ b/src/Interpreters/ProcessorsProfileLog.cpp @@ -42,6 +42,8 @@ ColumnsDescription ProcessorProfileLogElement::getColumnsDescription() {"input_bytes", std::make_shared(), "The number of bytes consumed by processor."}, {"output_rows", std::make_shared(), "The number of rows generated by processor."}, {"output_bytes", std::make_shared(), "The number of bytes generated by processor."}, + {"processor_uniq_id", std::make_shared(), "The uniq processor id in pipeline."}, + {"step_uniq_id", std::make_shared(), "The uniq step id in plan."}, }; } @@ -75,6 +77,8 @@ void ProcessorProfileLogElement::appendToBlock(MutableColumns & columns) const columns[i++]->insert(input_bytes); columns[i++]->insert(output_rows); columns[i++]->insert(output_bytes); + columns[i++]->insert(processor_uniq_id); + columns[i++]->insert(step_uniq_id); } diff --git a/src/Interpreters/ProcessorsProfileLog.h b/src/Interpreters/ProcessorsProfileLog.h index 8319d373f39..1b2abaa8ede 100644 --- a/src/Interpreters/ProcessorsProfileLog.h +++ b/src/Interpreters/ProcessorsProfileLog.h @@ -17,12 +17,14 @@ struct ProcessorProfileLogElement UInt64 id{}; std::vector parent_ids; - UInt64 plan_step{}; + UInt64 plan_step; UInt64 plan_group{}; String initial_query_id; String query_id; String processor_name; + String processor_uniq_id; + String step_uniq_id; /// Milliseconds spend in IProcessor::work() UInt32 elapsed_us{}; diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 33a4cf2a74c..59573e912e4 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -460,6 +460,8 @@ void logQueryFinish( processor_elem.plan_step = reinterpret_cast(processor->getQueryPlanStep()); processor_elem.plan_group = processor->getQueryPlanStepGroup(); + processor_elem.processor_uniq_id = processor->getUniqID(); + processor_elem.step_uniq_id = processor->getStepUniqID(); processor_elem.processor_name = processor->getName(); diff --git a/src/Processors/Executors/ExecutionThreadContext.cpp b/src/Processors/Executors/ExecutionThreadContext.cpp index 05669725f9a..06b4b53c817 100644 --- a/src/Processors/Executors/ExecutionThreadContext.cpp +++ b/src/Processors/Executors/ExecutionThreadContext.cpp @@ -79,7 +79,7 @@ bool ExecutionThreadContext::executeTask() if (trace_processors) { - span = std::make_unique(node->processor->getName()); + span = std::make_unique(node->processor->getUniqID()); span->addAttribute("thread_number", thread_number); } std::optional execution_time_watch; diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 56b4509fe00..b99ebeb5fa5 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -3,7 +3,9 @@ #include #include #include - +#include +#include +#include class EventCounter; @@ -121,7 +123,10 @@ protected: OutputPorts outputs; public: - IProcessor() = default; + IProcessor() + { + setProcessorIndex(); + } IProcessor(InputPorts inputs_, OutputPorts outputs_) : inputs(std::move(inputs_)), outputs(std::move(outputs_)) @@ -130,9 +135,16 @@ public: port.processor = this; for (auto & port : outputs) port.processor = this; + setProcessorIndex(); + } + + void setProcessorIndex() + { + processor_index = CurrentThread::get().incrProcessorIndex(); } virtual String getName() const = 0; + String getUniqID() const { return fmt::format("{}_{}", getName(), processor_index); } enum class Status { @@ -300,11 +312,16 @@ public: /// Step of QueryPlan from which processor was created. void setQueryPlanStep(IQueryPlanStep * step, size_t group = 0) { - query_plan_step = step; + if (step != nullptr) + { + query_plan_step = step; + step_uniq_id = step->getUniqID(); + } query_plan_step_group = group; } IQueryPlanStep * getQueryPlanStep() const { return query_plan_step; } + const String &getStepUniqID() const { return step_uniq_id; } size_t getQueryPlanStepGroup() const { return query_plan_step_group; } uint64_t getElapsedUs() const { return elapsed_us; } @@ -392,7 +409,10 @@ private: size_t stream_number = NO_STREAM; IQueryPlanStep * query_plan_step = nullptr; + String step_uniq_id; size_t query_plan_step_group = 0; + + size_t processor_index = 0; }; diff --git a/src/Processors/QueryPlan/IQueryPlanStep.h b/src/Processors/QueryPlan/IQueryPlanStep.h index ac5ea259d2e..ec5ac9ad4dc 100644 --- a/src/Processors/QueryPlan/IQueryPlanStep.h +++ b/src/Processors/QueryPlan/IQueryPlanStep.h @@ -2,6 +2,9 @@ #include #include #include +#include +#include +#include namespace DB { @@ -71,6 +74,10 @@ using QueryPlanRawPtrs = std::list; class IQueryPlanStep { public: + IQueryPlanStep() + { + step_index = CurrentThread::get().incrStepIndex(); + } virtual ~IQueryPlanStep() = default; virtual String getName() const = 0; @@ -138,7 +145,7 @@ public: } virtual bool canUpdateInputStream() const { return false; } - + String getUniqID() const { return fmt::format("{}_{}", getName(), step_index); } protected: virtual void updateOutputStream() { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented"); } @@ -153,6 +160,9 @@ protected: Processors processors; static void describePipeline(const Processors & processors, FormatSettings & settings); + +private: + size_t step_index = 0; }; using QueryPlanStepPtr = std::unique_ptr; diff --git a/src/Processors/QueryPlan/QueryPlan.cpp b/src/Processors/QueryPlan/QueryPlan.cpp index 0fae7e8df4d..f651870453b 100644 --- a/src/Processors/QueryPlan/QueryPlan.cpp +++ b/src/Processors/QueryPlan/QueryPlan.cpp @@ -206,6 +206,7 @@ QueryPipelineBuilderPtr QueryPlan::buildQueryPipeline( static void explainStep(const IQueryPlanStep & step, JSONBuilder::JSONMap & map, const QueryPlan::ExplainPlanOptions & options) { map.add("Node Type", step.getName()); + map.add("Node Id", step.getUniqID()); if (options.description) { diff --git a/src/QueryPipeline/QueryPipelineBuilder.cpp b/src/QueryPipeline/QueryPipelineBuilder.cpp index 67a8fe5dcab..4b6f15905ce 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.cpp +++ b/src/QueryPipeline/QueryPipelineBuilder.cpp @@ -400,10 +400,10 @@ std::unique_ptr QueryPipelineBuilder::joinPipelinesRightLe left->pipe.collected_processors = collected_processors; - /// Collect the NEW processors for the right pipeline. - QueryPipelineProcessorsCollector collector(*right); /// Remember the last step of the right pipeline. IQueryPlanStep * step = right->pipe.processors->back()->getQueryPlanStep(); + /// Collect the NEW processors for the right pipeline. + QueryPipelineProcessorsCollector collector(*right, step); /// In case joined subquery has totals, and we don't, add default chunk to totals. bool default_totals = false; diff --git a/src/QueryPipeline/printPipeline.cpp b/src/QueryPipeline/printPipeline.cpp index 40c88502ed0..1726d776921 100644 --- a/src/QueryPipeline/printPipeline.cpp +++ b/src/QueryPipeline/printPipeline.cpp @@ -113,7 +113,7 @@ void printPipelineCompact(const Processors & processors, WriteBuffer & out, bool if (item.first != nullptr) { out << " subgraph cluster_" << next_step << " {\n"; - out << " label =\"" << item.first->getName() << "\";\n"; + out << " label =\"" << item.first->getUniqID() << "\";\n"; out << " style=filled;\n"; out << " color=lightgrey;\n"; out << " node [style=filled,color=white];\n"; @@ -125,7 +125,7 @@ void printPipelineCompact(const Processors & processors, WriteBuffer & out, bool for (const auto & node : item.second) { const auto & processor = node->agents.front(); - out << " n" << node->id << " [label=\"" << processor->getName(); + out << " n" << node->id << " [label=\"" << processor->getUniqID(); if (node->agents.size() > 1) out << " × " << node->agents.size(); diff --git a/src/QueryPipeline/printPipeline.h b/src/QueryPipeline/printPipeline.h index 2bdbd8f7a07..e6799251851 100644 --- a/src/QueryPipeline/printPipeline.h +++ b/src/QueryPipeline/printPipeline.h @@ -30,7 +30,7 @@ void printPipeline(const Processors & processors, const Statuses & statuses, Wri for (const auto & processor : processors) { const auto & description = processor->getDescription(); - out << " n" << get_proc_id(*processor) << "[label=\"" << processor->getName() << (description.empty() ? "" : ":") << description; + out << " n" << get_proc_id(*processor) << "[label=\"" << processor->getUniqID() << (description.empty() ? "" : ":") << description; if (statuses_iter != statuses.end()) { diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.reference b/tests/queries/0_stateless/01786_explain_merge_tree.reference index 3a015d32539..36ebbe1a1da 100644 --- a/tests/queries/0_stateless/01786_explain_merge_tree.reference +++ b/tests/queries/0_stateless/01786_explain_merge_tree.reference @@ -29,6 +29,7 @@ Granules: 2/3 ----------------- "Node Type": "ReadFromMergeTree", + "Node Id": "ReadFromMergeTree_0", "Description": "default.test_index", "Indexes": [ { @@ -126,6 +127,7 @@ Granules: 3/6 ----------------- "Node Type": "ReadFromMergeTree", + "Node Id": "ReadFromMergeTree_0", "Description": "default.test_index", "Indexes": [ { From 59bd7447fcb1db44bc77d93339b36dae684d5daf Mon Sep 17 00:00:00 2001 From: qhsong Date: Tue, 30 Jul 2024 10:23:51 +0800 Subject: [PATCH 02/74] Fix testcase --- src/Common/ThreadStatus.h | 2 +- src/Interpreters/Context.h | 2 -- src/Processors/IProcessor.h | 5 ++++- src/Processors/QueryPlan/IQueryPlanStep.h | 5 ++++- .../0_stateless/01786_explain_merge_tree.reference | 4 ++-- .../0_stateless/01823_explain_json.reference | 13 +++++++++++-- .../03213_distributed_analyzer.reference | 2 +- 7 files changed, 23 insertions(+), 10 deletions(-) diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index 97b45c01e54..fd384ad1603 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -93,7 +93,7 @@ public: //QueryPlan can not build parallel, but processor may build parallel in expand() function. //so we use atomic_size_t for processor_count - std::shared_ptr step_count = std::make_shared(0); + std::shared_ptr step_count = std::make_shared(0); std::shared_ptr processor_count = std::make_shared(0); QueryIsCanceledPredicate query_is_canceled_predicate = {}; diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index 1f331f0d094..cb553d07513 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -1404,9 +1404,7 @@ private: std::shared_ptr getClustersImpl(std::lock_guard & lock) const; /// Throttling - public: - ThrottlerPtr getReplicatedFetchesThrottler() const; ThrottlerPtr getReplicatedSendsThrottler() const; diff --git a/src/Processors/IProcessor.h b/src/Processors/IProcessor.h index 3933a79ab55..d426d5ef9ba 100644 --- a/src/Processors/IProcessor.h +++ b/src/Processors/IProcessor.h @@ -140,7 +140,10 @@ public: void setProcessorIndex() { - processor_index = CurrentThread::get().incrProcessorIndex(); + if (CurrentThread::isInitialized()) [[likely]] + { + processor_index = CurrentThread::get().incrProcessorIndex(); + } } virtual String getName() const = 0; diff --git a/src/Processors/QueryPlan/IQueryPlanStep.h b/src/Processors/QueryPlan/IQueryPlanStep.h index acd8857b9df..500e0812983 100644 --- a/src/Processors/QueryPlan/IQueryPlanStep.h +++ b/src/Processors/QueryPlan/IQueryPlanStep.h @@ -76,7 +76,10 @@ class IQueryPlanStep public: IQueryPlanStep() { - step_index = CurrentThread::get().incrStepIndex(); + if (CurrentThread::isInitialized()) [[likely]] + { + step_index = CurrentThread::get().incrStepIndex(); + } } virtual ~IQueryPlanStep() = default; diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.reference b/tests/queries/0_stateless/01786_explain_merge_tree.reference index 36ebbe1a1da..75736669905 100644 --- a/tests/queries/0_stateless/01786_explain_merge_tree.reference +++ b/tests/queries/0_stateless/01786_explain_merge_tree.reference @@ -29,7 +29,7 @@ Granules: 2/3 ----------------- "Node Type": "ReadFromMergeTree", - "Node Id": "ReadFromMergeTree_0", + "Node Id": "ReadFromMergeTree_1", "Description": "default.test_index", "Indexes": [ { @@ -127,7 +127,7 @@ Granules: 3/6 ----------------- "Node Type": "ReadFromMergeTree", - "Node Id": "ReadFromMergeTree_0", + "Node Id": "ReadFromMergeTree_1", "Description": "default.test_index", "Indexes": [ { diff --git a/tests/queries/0_stateless/01823_explain_json.reference b/tests/queries/0_stateless/01823_explain_json.reference index 23fb34c2192..1aa5aa134e9 100644 --- a/tests/queries/0_stateless/01823_explain_json.reference +++ b/tests/queries/0_stateless/01823_explain_json.reference @@ -2,20 +2,25 @@ { "Plan": { "Node Type": "Union", + "Node Id": "Union_11", "Plans": [ { "Node Type": "Expression", + "Node Id": "Expression_14", "Plans": [ { - "Node Type": "ReadFromStorage" + "Node Type": "ReadFromStorage", + "Node Id": "ReadFromStorage_1" } ] }, { "Node Type": "Expression", + "Node Id": "Expression_17", "Plans": [ { - "Node Type": "ReadFromStorage" + "Node Type": "ReadFromStorage", + "Node Id": "ReadFromStorage_5" } ] } @@ -35,6 +40,7 @@ } -------- "Node Type": "Aggregating", + "Node Id": "Aggregating_4", "Header": [ { "Name": "__table1.number", @@ -73,13 +79,16 @@ ], -------- "Node Type": "ArrayJoin", + "Node Id": "ArrayJoin_5", "Left": false, "Columns": ["__table1.x", "__table1.y"], -------- "Node Type": "Distinct", + "Node Id": "Distinct_5", "Columns": ["intDiv(__table1.number, 2_UInt8)", "intDiv(__table1.number, 3_UInt8)"], -- "Node Type": "Distinct", + "Node Id": "Distinct_4", "Columns": ["intDiv(__table1.number, 2_UInt8)", "intDiv(__table1.number, 3_UInt8)"], -------- "Sort Description": [ diff --git a/tests/queries/0_stateless/03213_distributed_analyzer.reference b/tests/queries/0_stateless/03213_distributed_analyzer.reference index 9d63c0a7a5e..2456192ca9d 100644 --- a/tests/queries/0_stateless/03213_distributed_analyzer.reference +++ b/tests/queries/0_stateless/03213_distributed_analyzer.reference @@ -1 +1 @@ -['digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote"];',' }','}','digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote"];',' }','}'] +['digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote_4"];',' }','}','digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote_4"];',' }','}'] From d10b79020edda1f35f9f3637447cd57d90352bae Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 14 Oct 2024 13:39:03 +0000 Subject: [PATCH 03/74] Refactoring TempDataOnDisk --- src/Common/CurrentMetrics.cpp | 1 + .../gtest_cascade_and_memory_write_buffer.cpp | 2 +- src/Interpreters/Aggregator.cpp | 12 +- src/Interpreters/Aggregator.h | 9 +- src/Interpreters/Context.cpp | 20 +- src/Interpreters/GraceHashJoin.cpp | 60 +- src/Interpreters/GraceHashJoin.h | 2 +- src/Interpreters/HashJoin/HashJoin.cpp | 29 +- src/Interpreters/HashJoin/HashJoin.h | 5 +- src/Interpreters/TableJoin.h | 7 +- src/Interpreters/TemporaryDataOnDisk.cpp | 647 ++++++++---------- src/Interpreters/TemporaryDataOnDisk.h | 233 ++++--- src/Interpreters/tests/gtest_filecache.cpp | 85 ++- .../Algorithms/CollapsingSortedAlgorithm.cpp | 6 +- .../Algorithms/CollapsingSortedAlgorithm.h | 6 +- .../Algorithms/MergingSortedAlgorithm.cpp | 3 +- .../Algorithms/MergingSortedAlgorithm.h | 6 +- .../Algorithms/ReplacingSortedAlgorithm.cpp | 6 +- .../Algorithms/ReplacingSortedAlgorithm.h | 6 +- .../VersionedCollapsingAlgorithm.cpp | 6 +- .../Algorithms/VersionedCollapsingAlgorithm.h | 6 +- .../Merges/CollapsingSortedTransform.h | 2 +- .../Merges/MergingSortedTransform.cpp | 2 +- .../Merges/MergingSortedTransform.h | 2 +- .../Merges/ReplacingSortedTransform.h | 4 +- .../Merges/VersionedCollapsingTransform.h | 4 +- .../QueryPlan/BuildQueryPipelineSettings.h | 1 - src/Processors/QueryPlan/SortingStep.cpp | 6 +- .../Transforms/AggregatingTransform.cpp | 27 +- .../Transforms/MergeSortingTransform.cpp | 44 +- .../Transforms/MergeSortingTransform.h | 5 +- src/QueryPipeline/QueryPipelineBuilder.h | 6 + src/QueryPipeline/QueryPlanResourceHolder.h | 2 + src/Server/HTTPHandler.cpp | 39 +- src/Storages/MergeTree/MergeTask.cpp | 124 +--- src/Storages/MergeTree/MergeTask.h | 5 +- .../MergeTree/MergeTreeSelectProcessor.cpp | 5 +- 37 files changed, 646 insertions(+), 789 deletions(-) diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index bd62e7e8aae..8d232e11df3 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -45,6 +45,7 @@ M(TemporaryFilesForSort, "Number of temporary files created for external sorting") \ M(TemporaryFilesForAggregation, "Number of temporary files created for external aggregation") \ M(TemporaryFilesForJoin, "Number of temporary files created for JOIN") \ + M(TemporaryFilesForMerge, "Number of temporary files for vertical merge") \ M(TemporaryFilesUnknown, "Number of temporary files created without known purpose") \ M(Read, "Number of read (read, pread, io_getevents, etc.) syscalls in fly") \ M(RemoteRead, "Number of read with remote reader in fly") \ diff --git a/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp b/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp index 23b783173c8..6fd7570c4eb 100644 --- a/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp +++ b/src/Disks/tests/gtest_cascade_and_memory_write_buffer.cpp @@ -69,7 +69,7 @@ static void testCascadeBufferRedability( auto rbuf = wbuf_readable.tryGetReadBuffer(); ASSERT_FALSE(!rbuf); - concat.appendBuffer(wrapReadBufferPointer(std::move(rbuf))); + concat.appendBuffer(std::move(rbuf)); } std::string decoded_data; diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 0fe1c74ed17..e6fecc37cfa 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -335,7 +335,7 @@ Aggregator::Aggregator(const Block & header_, const Params & params_) : header(header_) , keys_positions(calculateKeysPositions(header, params_)) , params(params_) - , tmp_data(params.tmp_data_scope ? std::make_unique(params.tmp_data_scope, CurrentMetrics::TemporaryFilesForAggregation) : nullptr) + , tmp_data(params.tmp_data_scope ? params.tmp_data_scope->childScope(CurrentMetrics::TemporaryFilesForAggregation) : nullptr) , min_bytes_for_prefetch(getMinBytesForPrefetch()) { /// Use query-level memory tracker @@ -1519,10 +1519,10 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, si Stopwatch watch; size_t rows = data_variants.size(); - auto & out_stream = tmp_data->createStream(getHeader(false), max_temp_file_size); + auto & out_stream = tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size); ProfileEvents::increment(ProfileEvents::ExternalAggregationWritePart); - LOG_DEBUG(log, "Writing part of aggregation data into temporary file {}", out_stream.getPath()); + LOG_DEBUG(log, "Writing part of aggregation data into temporary file {}", out_stream.getHolder()->describeFilePath()); /// Flush only two-level data and possibly overflow data. @@ -1643,7 +1643,7 @@ template void Aggregator::writeToTemporaryFileImpl( AggregatedDataVariants & data_variants, Method & method, - TemporaryFileStream & out) const + TemporaryBlockStreamHolder & out) const { size_t max_temporary_block_size_rows = 0; size_t max_temporary_block_size_bytes = 0; @@ -1660,14 +1660,14 @@ void Aggregator::writeToTemporaryFileImpl( for (UInt32 bucket = 0; bucket < Method::Data::NUM_BUCKETS; ++bucket) { Block block = convertOneBucketToBlock(data_variants, method, data_variants.aggregates_pool, false, bucket); - out.write(block); + out->write(block); update_max_sizes(block); } if (params.overflow_row) { Block block = prepareBlockAndFillWithoutKey(data_variants, false, true); - out.write(block); + out->write(block); update_max_sizes(block); } diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 4de0a640219..bc28d3dccb8 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -309,9 +309,9 @@ public: /// For external aggregation. void writeToTemporaryFile(AggregatedDataVariants & data_variants, size_t max_temp_file_size = 0) const; - bool hasTemporaryData() const { return tmp_data && !tmp_data->empty(); } + bool hasTemporaryData() const { return !tmp_files.empty(); } - const TemporaryDataOnDisk & getTemporaryData() const { return *tmp_data; } + std::vector & getTemporaryData() { return tmp_files; } /// Get data structure of the result. Block getHeader(bool final) const; @@ -355,7 +355,8 @@ private: LoggerPtr log = getLogger("Aggregator"); /// For external aggregation. - TemporaryDataOnDiskPtr tmp_data; + TemporaryDataOnDiskScopePtr tmp_data; + mutable std::vector tmp_files; size_t min_bytes_for_prefetch = 0; @@ -456,7 +457,7 @@ private: void writeToTemporaryFileImpl( AggregatedDataVariants & data_variants, Method & method, - TemporaryFileStream & out) const; + TemporaryBlockStreamHolder & out) const; /// Merge NULL key data from hash table `src` into `dst`. template diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 85cde959b66..6ada12e63f9 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -353,6 +353,8 @@ struct ContextSharedPart : boost::noncopyable /// Child scopes for more fine-grained accounting are created per user/query/etc. /// Initialized once during server startup. TemporaryDataOnDiskScopePtr root_temp_data_on_disk TSA_GUARDED_BY(mutex); + /// TODO: remove, use only root_temp_data_on_disk + VolumePtr temporary_volume_legacy; mutable OnceFlag async_loader_initialized; mutable std::unique_ptr async_loader; /// Thread pool for asynchronous initialization of arbitrary DAG of `LoadJob`s (used for tables loading) @@ -783,10 +785,9 @@ struct ContextSharedPart : boost::noncopyable } /// Special volumes might also use disks that require shutdown. - auto & tmp_data = root_temp_data_on_disk; - if (tmp_data && tmp_data->getVolume()) + if (temporary_volume_legacy) { - auto & disks = tmp_data->getVolume()->getDisks(); + auto & disks = temporary_volume_legacy->getDisks(); for (auto & disk : disks) disk->shutdown(); } @@ -1166,8 +1167,8 @@ VolumePtr Context::getGlobalTemporaryVolume() const SharedLockGuard lock(shared->mutex); /// Calling this method we just bypass the `temp_data_on_disk` and write to the file on the volume directly. /// Volume is the same for `root_temp_data_on_disk` (always set) and `temp_data_on_disk` (if it's set). - if (shared->root_temp_data_on_disk) - return shared->root_temp_data_on_disk->getVolume(); + if (shared->temporary_volume_legacy) + return shared->temporary_volume_legacy; return nullptr; } @@ -1288,7 +1289,8 @@ void Context::setTemporaryStoragePath(const String & path, size_t max_size) TemporaryDataOnDiskSettings temporary_data_on_disk_settings; temporary_data_on_disk_settings.max_size_on_disk = max_size; - shared->root_temp_data_on_disk = std::make_shared(std::move(volume), std::move(temporary_data_on_disk_settings)); + shared->root_temp_data_on_disk = std::make_shared(volume, std::move(temporary_data_on_disk_settings)); + shared->temporary_volume_legacy = volume; } void Context::setTemporaryStoragePolicy(const String & policy_name, size_t max_size) @@ -1336,7 +1338,8 @@ void Context::setTemporaryStoragePolicy(const String & policy_name, size_t max_s TemporaryDataOnDiskSettings temporary_data_on_disk_settings; temporary_data_on_disk_settings.max_size_on_disk = max_size; - shared->root_temp_data_on_disk = std::make_shared(std::move(volume), std::move(temporary_data_on_disk_settings)); + shared->root_temp_data_on_disk = std::make_shared(volume, std::move(temporary_data_on_disk_settings)); + shared->temporary_volume_legacy = volume; } void Context::setTemporaryStorageInCache(const String & cache_disk_name, size_t max_size) @@ -1360,7 +1363,8 @@ void Context::setTemporaryStorageInCache(const String & cache_disk_name, size_t TemporaryDataOnDiskSettings temporary_data_on_disk_settings; temporary_data_on_disk_settings.max_size_on_disk = max_size; - shared->root_temp_data_on_disk = std::make_shared(std::move(volume), file_cache.get(), std::move(temporary_data_on_disk_settings)); + shared->root_temp_data_on_disk = std::make_shared(file_cache.get(), std::move(temporary_data_on_disk_settings)); + shared->temporary_volume_legacy = volume; } void Context::setFlagsPath(const String & path) diff --git a/src/Interpreters/GraceHashJoin.cpp b/src/Interpreters/GraceHashJoin.cpp index 978782c851f..a2010b7d94b 100644 --- a/src/Interpreters/GraceHashJoin.cpp +++ b/src/Interpreters/GraceHashJoin.cpp @@ -41,15 +41,15 @@ namespace class AccumulatedBlockReader { public: - AccumulatedBlockReader(TemporaryFileStream & reader_, + AccumulatedBlockReader(TemporaryBlockStreamReaderHolder reader_, std::mutex & mutex_, size_t result_block_size_ = 0) - : reader(reader_) + : reader(std::move(reader_)) , mutex(mutex_) , result_block_size(result_block_size_) { - if (!reader.isWriteFinished()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Reading not finished file"); + if (!reader) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Reader is nullptr"); } Block read() @@ -63,7 +63,7 @@ namespace size_t rows_read = 0; do { - Block block = reader.read(); + Block block = reader->read(); rows_read += block.rows(); if (!block) { @@ -81,7 +81,7 @@ namespace } private: - TemporaryFileStream & reader; + TemporaryBlockStreamReaderHolder reader; std::mutex & mutex; const size_t result_block_size; @@ -124,12 +124,12 @@ class GraceHashJoin::FileBucket : boost::noncopyable public: using BucketLock = std::unique_lock; - explicit FileBucket(size_t bucket_index_, TemporaryFileStream & left_file_, TemporaryFileStream & right_file_, LoggerPtr log_) - : idx{bucket_index_} - , left_file{left_file_} - , right_file{right_file_} - , state{State::WRITING_BLOCKS} - , log{log_} + explicit FileBucket(size_t bucket_index_, TemporaryBlockStreamHolder left_file_, TemporaryBlockStreamHolder right_file_, LoggerPtr log_) + : idx(bucket_index_) + , left_file(std::move(left_file_)) + , right_file(std::move(right_file_)) + , state(State::WRITING_BLOCKS) + , log(log_) { } @@ -157,12 +157,6 @@ public: return addBlockImpl(block, right_file, lock); } - bool finished() const - { - std::unique_lock left_lock(left_file_mutex); - return left_file.isEof(); - } - bool empty() const { return is_empty.load(); } AccumulatedBlockReader startJoining() @@ -172,24 +166,21 @@ public: std::unique_lock left_lock(left_file_mutex); std::unique_lock right_lock(right_file_mutex); - left_file.finishWriting(); - right_file.finishWriting(); - state = State::JOINING_BLOCKS; } - return AccumulatedBlockReader(right_file, right_file_mutex); + return AccumulatedBlockReader(right_file.getReadStream(), right_file_mutex); } AccumulatedBlockReader getLeftTableReader() { ensureState(State::JOINING_BLOCKS); - return AccumulatedBlockReader(left_file, left_file_mutex); + return AccumulatedBlockReader(left_file.getReadStream(), left_file_mutex); } const size_t idx; private: - bool addBlockImpl(const Block & block, TemporaryFileStream & writer, std::unique_lock & lock) + bool addBlockImpl(const Block & block, TemporaryBlockStreamHolder & writer, std::unique_lock & lock) { ensureState(State::WRITING_BLOCKS); @@ -199,7 +190,7 @@ private: if (block.rows()) is_empty = false; - writer.write(block); + writer->write(block); return true; } @@ -217,8 +208,8 @@ private: throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid state transition, expected {}, got {}", expected, state.load()); } - TemporaryFileStream & left_file; - TemporaryFileStream & right_file; + TemporaryBlockStreamHolder left_file; + TemporaryBlockStreamHolder right_file; mutable std::mutex left_file_mutex; mutable std::mutex right_file_mutex; @@ -274,7 +265,7 @@ GraceHashJoin::GraceHashJoin( , max_num_buckets{context->getSettingsRef()[Setting::grace_hash_join_max_buckets]} , left_key_names(table_join->getOnlyClause().key_names_left) , right_key_names(table_join->getOnlyClause().key_names_right) - , tmp_data(std::make_unique(tmp_data_, CurrentMetrics::TemporaryFilesForJoin)) + , tmp_data(tmp_data_->childScope(CurrentMetrics::TemporaryFilesForJoin)) , hash_join(makeInMemoryJoin("grace0")) , hash_join_sample_block(hash_join->savedBlockSample()) { @@ -398,10 +389,10 @@ void GraceHashJoin::addBuckets(const size_t bucket_count) for (size_t i = 0; i < bucket_count; ++i) try { - auto & left_file = tmp_data->createStream(left_sample_block); - auto & right_file = tmp_data->createStream(prepareRightBlock(right_sample_block)); + TemporaryBlockStreamHolder left_file = TemporaryBlockStreamHolder(left_sample_block, tmp_data.get()); + TemporaryBlockStreamHolder right_file = TemporaryBlockStreamHolder(prepareRightBlock(right_sample_block), tmp_data.get()); - BucketPtr new_bucket = std::make_shared(current_size + i, left_file, right_file, log); + BucketPtr new_bucket = std::make_shared(current_size + i, std::move(left_file), std::move(right_file), log); tmp_buckets.emplace_back(std::move(new_bucket)); } catch (...) @@ -632,12 +623,9 @@ IBlocksStreamPtr GraceHashJoin::getDelayedBlocks() for (bucket_idx = bucket_idx + 1; bucket_idx < buckets.size(); ++bucket_idx) { current_bucket = buckets[bucket_idx].get(); - if (current_bucket->finished() || current_bucket->empty()) + if (current_bucket->empty()) { - LOG_TRACE(log, "Skipping {} {} bucket {}", - current_bucket->finished() ? "finished" : "", - current_bucket->empty() ? "empty" : "", - bucket_idx); + LOG_TRACE(log, "Skipping empty bucket {}", bucket_idx); continue; } diff --git a/src/Interpreters/GraceHashJoin.h b/src/Interpreters/GraceHashJoin.h index d31d6886af7..938c9b1facf 100644 --- a/src/Interpreters/GraceHashJoin.h +++ b/src/Interpreters/GraceHashJoin.h @@ -132,7 +132,7 @@ private: Names left_key_names; Names right_key_names; - TemporaryDataOnDiskPtr tmp_data; + TemporaryDataOnDiskScopePtr tmp_data; Buckets buckets; mutable SharedMutex rehash_mutex; diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index 3e7f3deea8b..af23b520abb 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -35,11 +35,6 @@ #include #include -namespace CurrentMetrics -{ - extern const Metric TemporaryFilesForJoin; -} - namespace DB { @@ -64,7 +59,7 @@ struct NotProcessedCrossJoin : public ExtraBlock { size_t left_position; size_t right_block; - std::unique_ptr reader; + TemporaryBlockStreamReaderHolder reader; }; @@ -106,10 +101,7 @@ HashJoin::HashJoin(std::shared_ptr table_join_, const Block & right_s , instance_id(instance_id_) , asof_inequality(table_join->getAsofInequality()) , data(std::make_shared()) - , tmp_data( - table_join_->getTempDataOnDisk() - ? std::make_unique(table_join_->getTempDataOnDisk(), CurrentMetrics::TemporaryFilesForJoin) - : nullptr) + , tmp_data(table_join_->getTempDataOnDisk()) , right_sample_block(right_sample_block_) , max_joined_block_rows(table_join->maxJoinedBlockRows()) , instance_log_id(!instance_id_.empty() ? "(" + instance_id_ + ") " : "") @@ -520,10 +512,9 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) && (tmp_stream || (max_bytes_in_join && getTotalByteCount() + block_to_save.allocatedBytes() >= max_bytes_in_join) || (max_rows_in_join && getTotalRowCount() + block_to_save.rows() >= max_rows_in_join))) { - if (tmp_stream == nullptr) - { - tmp_stream = &tmp_data->createStream(right_sample_block); - } + if (!tmp_stream) + tmp_stream = TemporaryBlockStreamHolder(right_sample_block, tmp_data.get()); + tmp_stream->write(block_to_save); return true; } @@ -730,7 +721,7 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) { size_t start_left_row = 0; size_t start_right_block = 0; - std::unique_ptr reader = nullptr; + TemporaryBlockStreamReaderHolder reader; if (not_processed) { auto & continuation = static_cast(*not_processed); @@ -804,11 +795,9 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) if (tmp_stream && rows_added <= max_joined_block_rows) { - if (reader == nullptr) - { - tmp_stream->finishWritingAsyncSafe(); - reader = tmp_stream->getReadStream(); - } + if (!reader) + reader = tmp_stream.getReadStream(); + while (auto block_right = reader->read()) { ++block_number; diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 4c1ebbcdc66..0f50e110db9 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -423,8 +423,9 @@ private: std::vector key_sizes; /// Needed to do external cross join - TemporaryDataOnDiskPtr tmp_data; - TemporaryFileStream* tmp_stream{nullptr}; + TemporaryDataOnDiskScopePtr tmp_data; + TemporaryBlockStreamHolder tmp_stream; + mutable std::once_flag finish_writing; /// Block with columns from the right-side table. Block right_sample_block; diff --git a/src/Interpreters/TableJoin.h b/src/Interpreters/TableJoin.h index e1bae55a4ed..4ecbc9eb960 100644 --- a/src/Interpreters/TableJoin.h +++ b/src/Interpreters/TableJoin.h @@ -20,6 +20,11 @@ #include #include +namespace CurrentMetrics +{ + extern const Metric TemporaryFilesForJoin; +} + namespace DB { @@ -265,7 +270,7 @@ public: VolumePtr getGlobalTemporaryVolume() { return tmp_volume; } - TemporaryDataOnDiskScopePtr getTempDataOnDisk() { return tmp_data; } + TemporaryDataOnDiskScopePtr getTempDataOnDisk() { return tmp_data ? tmp_data->childScope(CurrentMetrics::TemporaryFilesForJoin) : nullptr; } ActionsDAG createJoinedBlockActions(ContextPtr context) const; diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index 81796678f24..c3b24fb783b 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -27,11 +27,266 @@ namespace DB namespace ErrorCodes { - extern const int TOO_MANY_ROWS_OR_BYTES; + extern const int INVALID_STATE; extern const int LOGICAL_ERROR; extern const int NOT_ENOUGH_SPACE; + extern const int TOO_MANY_ROWS_OR_BYTES; } +namespace +{ + +inline CompressionCodecPtr getCodec(const TemporaryDataOnDiskSettings & settings) +{ + if (settings.compression_codec.empty()) + return CompressionCodecFactory::instance().get("NONE"); + + return CompressionCodecFactory::instance().get(settings.compression_codec); +} + +} + +TemporaryFileHolder::TemporaryFileHolder() +{ + ProfileEvents::increment(ProfileEvents::ExternalProcessingFilesTotal); +} + + +class TemporaryFileInLocalCache : public TemporaryFileHolder +{ +public: + explicit TemporaryFileInLocalCache(FileCache & file_cache, size_t max_file_size = 0) + { + const auto key = FileSegment::Key::random(); + segment_holder = file_cache.set( + key, 0, std::max(10_MiB, max_file_size), + CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser()); + + chassert(segment_holder->size() == 1); + segment_holder->front().getKeyMetadata()->createBaseDirectory(/* throw_if_failed */true); + } + + std::unique_ptr write() override + { + return std::make_unique(&segment_holder->front()); + } + + std::unique_ptr read(size_t buffer_size) const override + { + return std::make_unique(segment_holder->front().getPath(), /* buf_size = */ buffer_size); + } + + String describeFilePath() const override + { + return fmt::format("fscache://{}", segment_holder->front().getPath()); + } + +private: + FileSegmentsHolderPtr segment_holder; +}; + +class TemporaryFileOnLocalDisk : public TemporaryFileHolder +{ +public: + explicit TemporaryFileOnLocalDisk(VolumePtr volume, size_t max_file_size = 0) + : path_to_file("tmp" + toString(UUIDHelpers::generateV4())) + { + if (max_file_size > 0) + { + auto reservation = volume->reserve(max_file_size); + if (!reservation) + throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Not enough space on temporary disk"); + disk = reservation->getDisk(); + } + else + { + disk = volume->getDisk(); + } + chassert(disk); + } + + std::unique_ptr write() override + { + return disk->writeFile(path_to_file); + } + + std::unique_ptr read(size_t buffer_size) const override + { + ReadSettings settings; + settings.local_fs_buffer_size = buffer_size; + settings.remote_fs_buffer_size = buffer_size; + settings.prefetch_buffer_size = buffer_size; + + return disk->readFile(path_to_file, settings); + } + + String describeFilePath() const override + { + return fmt::format("disk({})://{}/{}", disk->getName(), disk->getPath(), path_to_file); + } + + ~TemporaryFileOnLocalDisk() override + try + { + if (disk->exists(path_to_file)) + disk->removeRecursive(path_to_file); + else + LOG_WARNING(getLogger("TemporaryFileOnLocalDisk"), "Temporary path '{}' does not exist in '{}' on disk {}", path_to_file, disk->getPath(), disk->getName()); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + +private: + DiskPtr disk; + String path_to_file; +}; + +TemporaryFileProvider createTemporaryFileProvider(VolumePtr volume) +{ + if (!volume) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Volume is not initialized"); + return [volume](size_t max_size) -> std::unique_ptr + { + return std::make_unique(volume, max_size); + }; +} + +TemporaryFileProvider createTemporaryFileProvider(FileCache * file_cache) +{ + if (!file_cache || !file_cache->isInitialized()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "File cache is not initialized"); + return [file_cache](size_t max_size) -> std::unique_ptr + { + return std::make_unique(*file_cache, max_size); + }; +} + +TemporaryDataOnDiskScopePtr TemporaryDataOnDiskScope::childScope(CurrentMetrics::Metric current_metric) +{ + TemporaryDataOnDiskSettings child_settings = settings; + child_settings.current_metric = current_metric; + return std::make_shared(shared_from_this(), child_settings); +} + +TemporaryDataReadBuffer::TemporaryDataReadBuffer(std::unique_ptr in_) + : ReadBuffer(nullptr, 0) + , compressed_buf(std::move(in_)) +{ + BufferBase::set(compressed_buf->buffer().begin(), compressed_buf->buffer().size(), compressed_buf->offset()); +} + +bool TemporaryDataReadBuffer::nextImpl() +{ + compressed_buf->position() = position(); + if (!compressed_buf->next()) + { + set(compressed_buf->position(), 0); + return false; + } + BufferBase::set(compressed_buf->buffer().begin(), compressed_buf->buffer().size(), compressed_buf->offset()); + return true; +} + +TemporaryDataBuffer::TemporaryDataBuffer(TemporaryDataOnDiskScope * parent_, size_t max_file_size) + : WriteBuffer(nullptr, 0) + , parent(parent_) + , file_holder(parent->file_provider(max_file_size == 0 ? parent->getSettings().max_size_on_disk : max_file_size)) + , out_compressed_buf(file_holder->write(), getCodec(parent->getSettings())) +{ + WriteBuffer::set(out_compressed_buf->buffer().begin(), out_compressed_buf->buffer().size()); +} + +void TemporaryDataBuffer::nextImpl() +{ + if (!out_compressed_buf) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file buffer writing has been finished"); + + out_compressed_buf->position() = position(); + out_compressed_buf->next(); + BufferBase::set(out_compressed_buf->buffer().begin(), out_compressed_buf->buffer().size(), out_compressed_buf->offset()); + updateAllocAndCheck(); +} + +String TemporaryDataBuffer::describeFilePath() const +{ + return file_holder->describeFilePath(); +} + +TemporaryDataBuffer::~TemporaryDataBuffer() +{ + if (out_compressed_buf) + // read() nor finishWriting() was called + cancel(); +} + +void TemporaryDataBuffer::cancelImpl() noexcept +{ + if (out_compressed_buf) + { + /// CompressedWriteBuffer doesn't call cancel/finalize for wrapped buffer + out_compressed_buf->cancel(); + out_compressed_buf.getHolder()->cancel(); + out_compressed_buf.reset(); + } +} + +void TemporaryDataBuffer::finalizeImpl() +{ + if (!out_compressed_buf) + return; + + /// CompressedWriteBuffer doesn't call cancel/finalize for wrapped buffer + out_compressed_buf->finalize(); + out_compressed_buf.getHolder()->finalize(); + + updateAllocAndCheck(); + out_compressed_buf.reset(); +} + +TemporaryDataBuffer::Stat TemporaryDataBuffer::finishWriting() +{ + /// TemporaryDataBuffer::read can be called from multiple threads + std::call_once(write_finished, [this] + { + if (canceled) + throw Exception(ErrorCodes::INVALID_STATE, "Writing to temporary file buffer was not successful"); + next(); + finalize(); + }); + return stat; +} + +std::unique_ptr TemporaryDataBuffer::read() +{ + finishWriting(); + + /// Keep buffer size less that file size, to avoid memory overhead for large amounts of small files + size_t buffer_size = std::min(stat.compressed_size, DBMS_DEFAULT_BUFFER_SIZE); + return std::make_unique(file_holder->read(buffer_size)); +} + +void TemporaryDataBuffer::updateAllocAndCheck() +{ + if (!out_compressed_buf) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file buffer writing has been finished"); + + size_t new_compressed_size = out_compressed_buf->getCompressedBytes(); + size_t new_uncompressed_size = out_compressed_buf->getUncompressedBytes(); + + if (unlikely(new_compressed_size < stat.compressed_size || new_uncompressed_size < stat.uncompressed_size)) + { + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Temporary file {} size decreased after write: compressed: {} -> {}, uncompressed: {} -> {}", + file_holder ? file_holder->describeFilePath() : "NULL", + new_compressed_size, stat.compressed_size, new_uncompressed_size, stat.uncompressed_size); + } + + parent->deltaAllocAndCheck(new_compressed_size - stat.compressed_size, new_uncompressed_size - stat.uncompressed_size); + stat.compressed_size = new_compressed_size; + stat.uncompressed_size = new_uncompressed_size; +} void TemporaryDataOnDiskScope::deltaAllocAndCheck(ssize_t compressed_delta, ssize_t uncompressed_delta) { @@ -54,391 +309,25 @@ void TemporaryDataOnDiskScope::deltaAllocAndCheck(ssize_t compressed_delta, ssiz stat.uncompressed_size += uncompressed_delta; } -TemporaryDataOnDisk::TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_) - : TemporaryDataOnDiskScope(parent_, parent_->getSettings()) +TemporaryBlockStreamHolder::TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t max_file_size) + : WrapperGuard(std::make_unique(parent_, max_file_size), DBMS_TCP_PROTOCOL_VERSION, header_) + , header(header_) {} -TemporaryDataOnDisk::TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_, CurrentMetrics::Metric metric_scope) - : TemporaryDataOnDiskScope(parent_, parent_->getSettings()) - , current_metric_scope(metric_scope) -{} - -std::unique_ptr TemporaryDataOnDisk::createRawStream(size_t max_file_size) +TemporaryDataBuffer::Stat TemporaryBlockStreamHolder::finishWriting() const { - if (file_cache && file_cache->isInitialized()) - { - auto holder = createCacheFile(max_file_size); - return std::make_unique(std::move(holder)); - } - if (volume) - { - auto tmp_file = createRegularFile(max_file_size); - return std::make_unique(std::move(tmp_file)); - } + if (!holder) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary block stream is not initialized"); - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no cache and no volume"); + impl->flush(); + return holder->finishWriting(); } -TemporaryFileStream & TemporaryDataOnDisk::createStream(const Block & header, size_t max_file_size) +TemporaryBlockStreamReaderHolder TemporaryBlockStreamHolder::getReadStream() const { - if (file_cache && file_cache->isInitialized()) - { - auto holder = createCacheFile(max_file_size); - - std::lock_guard lock(mutex); - TemporaryFileStreamPtr & tmp_stream = streams.emplace_back(std::make_unique(std::move(holder), header, this)); - return *tmp_stream; - } - if (volume) - { - auto tmp_file = createRegularFile(max_file_size); - std::lock_guard lock(mutex); - TemporaryFileStreamPtr & tmp_stream - = streams.emplace_back(std::make_unique(std::move(tmp_file), header, this)); - return *tmp_stream; - } - - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no cache and no volume"); -} - -FileSegmentsHolderPtr TemporaryDataOnDisk::createCacheFile(size_t max_file_size) -{ - if (!file_cache) - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no cache"); - - ProfileEvents::increment(ProfileEvents::ExternalProcessingFilesTotal); - - const auto key = FileSegment::Key::random(); - auto holder = file_cache->set( - key, 0, std::max(10_MiB, max_file_size), - CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser()); - - chassert(holder->size() == 1); - holder->back().getKeyMetadata()->createBaseDirectory(/* throw_if_failed */true); - - return holder; -} - -TemporaryFileOnDiskHolder TemporaryDataOnDisk::createRegularFile(size_t max_file_size) -{ - if (!volume) - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDiskScope has no volume"); - - DiskPtr disk; - if (max_file_size > 0) - { - auto reservation = volume->reserve(max_file_size); - if (!reservation) - throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Not enough space on temporary disk"); - disk = reservation->getDisk(); - } - else - { - disk = volume->getDisk(); - } - /// We do not increment ProfileEvents::ExternalProcessingFilesTotal here because it is incremented in TemporaryFileOnDisk constructor. - return std::make_unique(disk, current_metric_scope); -} - -std::vector TemporaryDataOnDisk::getStreams() const -{ - std::vector res; - std::lock_guard lock(mutex); - res.reserve(streams.size()); - for (const auto & stream : streams) - res.push_back(stream.get()); - return res; -} - -bool TemporaryDataOnDisk::empty() const -{ - std::lock_guard lock(mutex); - return streams.empty(); -} - -static inline CompressionCodecPtr getCodec(const TemporaryDataOnDiskSettings & settings) -{ - if (settings.compression_codec.empty()) - return CompressionCodecFactory::instance().get("NONE"); - - return CompressionCodecFactory::instance().get(settings.compression_codec); -} - -struct TemporaryFileStream::OutputWriter -{ - OutputWriter(std::unique_ptr out_buf_, const Block & header_, const TemporaryDataOnDiskSettings & settings) - : out_buf(std::move(out_buf_)) - , out_compressed_buf(*out_buf, getCodec(settings)) - , out_writer(out_compressed_buf, DBMS_TCP_PROTOCOL_VERSION, header_) - { - } - - size_t write(const Block & block) - { - if (finalized) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot write to finalized stream"); - size_t written_bytes = out_writer.write(block); - num_rows += block.rows(); - return written_bytes; - } - - void flush() - { - if (finalized) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot flush finalized stream"); - - out_compressed_buf.next(); - out_buf->next(); - out_writer.flush(); - } - - void finalize() - { - if (finalized) - return; - - /// if we called finalize() explicitly, and got an exception, - /// we don't want to get it again in the destructor, so set finalized flag first - finalized = true; - - out_writer.flush(); - out_compressed_buf.finalize(); - out_buf->finalize(); - } - - ~OutputWriter() - { - try - { - finalize(); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - } - } - - std::unique_ptr out_buf; - CompressedWriteBuffer out_compressed_buf; - NativeWriter out_writer; - - std::atomic_size_t num_rows = 0; - - bool finalized = false; -}; - -TemporaryFileStream::Reader::Reader(const String & path_, const Block & header_, size_t size_) - : path(path_) - , size(size_ ? std::min(size_, DBMS_DEFAULT_BUFFER_SIZE) : DBMS_DEFAULT_BUFFER_SIZE) - , header(header_) -{ - LOG_TEST(getLogger("TemporaryFileStream"), "Reading {} from {}", header_.dumpStructure(), path); -} - -TemporaryFileStream::Reader::Reader(const String & path_, size_t size_) - : path(path_) - , size(size_ ? std::min(size_, DBMS_DEFAULT_BUFFER_SIZE) : DBMS_DEFAULT_BUFFER_SIZE) -{ - LOG_TEST(getLogger("TemporaryFileStream"), "Reading from {}", path); -} - -Block TemporaryFileStream::Reader::read() -{ - if (!in_reader) - { - if (fs::exists(path)) - in_file_buf = std::make_unique(path, size); - else - in_file_buf = std::make_unique(); - - in_compressed_buf = std::make_unique(*in_file_buf); - if (header.has_value()) - in_reader = std::make_unique(*in_compressed_buf, header.value(), DBMS_TCP_PROTOCOL_VERSION); - else - in_reader = std::make_unique(*in_compressed_buf, DBMS_TCP_PROTOCOL_VERSION); - } - return in_reader->read(); -} - -TemporaryFileStream::TemporaryFileStream(TemporaryFileOnDiskHolder file_, const Block & header_, TemporaryDataOnDisk * parent_) - : parent(parent_) - , header(header_) - , file(std::move(file_)) - , out_writer(std::make_unique(std::make_unique(file->getAbsolutePath()), header, parent->settings)) -{ - LOG_TEST(getLogger("TemporaryFileStream"), "Writing to temporary file {}", file->getAbsolutePath()); -} - -TemporaryFileStream::TemporaryFileStream(FileSegmentsHolderPtr segments_, const Block & header_, TemporaryDataOnDisk * parent_) - : parent(parent_) - , header(header_) - , segment_holder(std::move(segments_)) -{ - if (segment_holder->size() != 1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryFileStream can be created only from single segment"); - auto out_buf = std::make_unique(&segment_holder->front()); - - LOG_TEST(getLogger("TemporaryFileStream"), "Writing to temporary file {}", out_buf->getFileName()); - out_writer = std::make_unique(std::move(out_buf), header, parent_->settings); -} - -size_t TemporaryFileStream::write(const Block & block) -{ - if (!out_writer) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been finished"); - - updateAllocAndCheck(); - size_t bytes_written = out_writer->write(block); - return bytes_written; -} - -void TemporaryFileStream::flush() -{ - if (!out_writer) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been finished"); - - out_writer->flush(); -} - -TemporaryFileStream::Stat TemporaryFileStream::finishWriting() -{ - if (isWriteFinished()) - return stat; - - if (out_writer) - { - out_writer->finalize(); - /// The amount of written data can be changed after finalization, some buffers can be flushed - /// Need to update the stat - updateAllocAndCheck(); - out_writer.reset(); - - /// reader will be created at the first read call, not to consume memory before it is needed - } - return stat; -} - -TemporaryFileStream::Stat TemporaryFileStream::finishWritingAsyncSafe() -{ - std::call_once(finish_writing, [this]{ finishWriting(); }); - return stat; -} - -bool TemporaryFileStream::isWriteFinished() const -{ - assert(in_reader == nullptr || out_writer == nullptr); - return out_writer == nullptr; -} - -Block TemporaryFileStream::read() -{ - if (!isWriteFinished()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been not finished"); - - if (isEof()) - return {}; - - if (!in_reader) - { - in_reader = std::make_unique(getPath(), header, getSize()); - } - - Block block = in_reader->read(); - if (!block) - { - /// finalize earlier to release resources, do not wait for the destructor - this->release(); - } - return block; -} - -std::unique_ptr TemporaryFileStream::getReadStream() -{ - if (!isWriteFinished()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Writing has been not finished"); - - if (isEof()) - return nullptr; - - return std::make_unique(getPath(), header, getSize()); -} - -void TemporaryFileStream::updateAllocAndCheck() -{ - assert(out_writer); - size_t new_compressed_size = out_writer->out_compressed_buf.getCompressedBytes(); - size_t new_uncompressed_size = out_writer->out_compressed_buf.getUncompressedBytes(); - - if (unlikely(new_compressed_size < stat.compressed_size || new_uncompressed_size < stat.uncompressed_size)) - { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Temporary file {} size decreased after write: compressed: {} -> {}, uncompressed: {} -> {}", - getPath(), new_compressed_size, stat.compressed_size, new_uncompressed_size, stat.uncompressed_size); - } - - parent->deltaAllocAndCheck(new_compressed_size - stat.compressed_size, new_uncompressed_size - stat.uncompressed_size); - stat.compressed_size = new_compressed_size; - stat.uncompressed_size = new_uncompressed_size; - stat.num_rows = out_writer->num_rows; -} - -bool TemporaryFileStream::isEof() const -{ - return file == nullptr && !segment_holder; -} - -void TemporaryFileStream::release() -{ - if (in_reader) - in_reader.reset(); - - if (out_writer) - { - out_writer->finalize(); - out_writer.reset(); - } - - if (file) - { - file.reset(); - parent->deltaAllocAndCheck(-stat.compressed_size, -stat.uncompressed_size); - } - - if (segment_holder) - segment_holder.reset(); -} - -String TemporaryFileStream::getPath() const -{ - if (file) - return file->getAbsolutePath(); - if (segment_holder && !segment_holder->empty()) - return segment_holder->front().getPath(); - - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryFileStream has no file"); -} - -size_t TemporaryFileStream::getSize() const -{ - if (file) - return file->getDisk()->getFileSize(file->getRelativePath()); - if (segment_holder && !segment_holder->empty()) - return segment_holder->front().getReservedSize(); - - throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryFileStream has no file"); -} - -TemporaryFileStream::~TemporaryFileStream() -{ - try - { - release(); - } - catch (...) - { - tryLogCurrentException(__PRETTY_FUNCTION__); - assert(false); /// deltaAllocAndCheck with negative can't throw exception - } + if (!holder) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary block stream is not initialized"); + return TemporaryBlockStreamReaderHolder(holder->read(), header, DBMS_TCP_PROTOCOL_VERSION); } } diff --git a/src/Interpreters/TemporaryDataOnDisk.h b/src/Interpreters/TemporaryDataOnDisk.h index d541c93e031..f8d14b00ac5 100644 --- a/src/Interpreters/TemporaryDataOnDisk.h +++ b/src/Interpreters/TemporaryDataOnDisk.h @@ -4,15 +4,21 @@ #include #include -#include +#include #include -#include -#include +#include + #include #include -#include -#include +#include +#include + +#include + +#include + +class FileCacheTest_TemporaryDataReadBufferSize_Test; namespace CurrentMetrics { @@ -25,11 +31,10 @@ namespace DB class TemporaryDataOnDiskScope; using TemporaryDataOnDiskScopePtr = std::shared_ptr; -class TemporaryDataOnDisk; -using TemporaryDataOnDiskPtr = std::unique_ptr; +class TemporaryDataBuffer; +using TemporaryDataBufferPtr = std::unique_ptr; -class TemporaryFileStream; -using TemporaryFileStreamPtr = std::unique_ptr; +class TemporaryFileHolder; class FileCache; @@ -40,15 +45,26 @@ struct TemporaryDataOnDiskSettings /// Compression codec for temporary data, if empty no compression will be used. LZ4 by default String compression_codec = "LZ4"; + + /// Read/Write internal buffer size + size_t buffer_size = DBMS_DEFAULT_BUFFER_SIZE; + + /// Metrics counter to increment when temporary file in current scope are created + CurrentMetrics::Metric current_metric = CurrentMetrics::TemporaryFilesUnknown; }; +/// Creates temporary files located on specified resource (disk, fs_cache, etc.) +using TemporaryFileProvider = std::function(size_t)>; +TemporaryFileProvider createTemporaryFileProvider(VolumePtr volume); +TemporaryFileProvider createTemporaryFileProvider(FileCache * file_cache); + /* * Used to account amount of temporary data written to disk. * If limit is set, throws exception if limit is exceeded. * Data can be nested, so parent scope accounts all data written by children. * Scopes are: global -> per-user -> per-query -> per-purpose (sorting, aggregation, etc). */ -class TemporaryDataOnDiskScope : boost::noncopyable +class TemporaryDataOnDiskScope : boost::noncopyable, public std::enable_shared_from_this { public: struct StatAtomic @@ -57,164 +73,155 @@ public: std::atomic uncompressed_size; }; - explicit TemporaryDataOnDiskScope(VolumePtr volume_, TemporaryDataOnDiskSettings settings_) - : volume(std::move(volume_)) + /// Root scope + template + TemporaryDataOnDiskScope(T && storage, TemporaryDataOnDiskSettings settings_) + : file_provider(createTemporaryFileProvider(std::forward(storage))) , settings(std::move(settings_)) {} - explicit TemporaryDataOnDiskScope(VolumePtr volume_, FileCache * file_cache_, TemporaryDataOnDiskSettings settings_) - : volume(std::move(volume_)) - , file_cache(file_cache_) - , settings(std::move(settings_)) - {} - explicit TemporaryDataOnDiskScope(TemporaryDataOnDiskScopePtr parent_, TemporaryDataOnDiskSettings settings_) + TemporaryDataOnDiskScope(TemporaryDataOnDiskScopePtr parent_, TemporaryDataOnDiskSettings settings_) : parent(std::move(parent_)) - , volume(parent->volume) - , file_cache(parent->file_cache) + , file_provider(parent->file_provider) , settings(std::move(settings_)) {} - /// TODO: remove - /// Refactor all code that uses volume directly to use TemporaryDataOnDisk. - VolumePtr getVolume() const { return volume; } + TemporaryDataOnDiskScopePtr childScope(CurrentMetrics::Metric current_metric); const TemporaryDataOnDiskSettings & getSettings() const { return settings; } - protected: + friend class TemporaryDataBuffer; + void deltaAllocAndCheck(ssize_t compressed_delta, ssize_t uncompressed_delta); TemporaryDataOnDiskScopePtr parent = nullptr; - VolumePtr volume = nullptr; - FileCache * file_cache = nullptr; + TemporaryFileProvider file_provider; StatAtomic stat; const TemporaryDataOnDiskSettings settings; }; -/* - * Holds the set of temporary files. - * New file stream is created with `createStream`. - * Streams are owned by this object and will be deleted when it is deleted. - * It's a leaf node in temporary data scope tree. - */ -class TemporaryDataOnDisk : private TemporaryDataOnDiskScope +/** Used to hold the wrapper and wrapped object together. + * This class provides a convenient way to manage the lifetime of both the wrapper and the wrapped object. + * The wrapper class (Impl) stores a reference to the wrapped object (Holder), and both objects are owned by this class. + * The lifetime of the wrapper and the wrapped object should be the same. + * This pattern is commonly used when the caller only needs to interact with the wrapper and doesn't need to be aware of the wrapped object. + * Examples: CompressedWriteBuffer and WriteBuffer, and NativeReader and ReadBuffer. + */ +template +class WrapperGuard { - friend class TemporaryFileStream; /// to allow it to call `deltaAllocAndCheck` to account data - public: - using TemporaryDataOnDiskScope::StatAtomic; + WrapperGuard() = default; - explicit TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_); + template + WrapperGuard(std::unique_ptr holder_, Args && ... args) + : holder(std::move(holder_)) + , impl(std::make_unique(*holder, std::forward(args)...)) + {} - explicit TemporaryDataOnDisk(TemporaryDataOnDiskScopePtr parent_, CurrentMetrics::Metric metric_scope); + Impl * operator->() { return impl.get(); } + const Impl * operator->() const { return impl.get(); } + Impl & operator*() { return *impl; } + const Impl & operator*() const { return *impl; } + operator bool() const { return impl != nullptr; } - /// If max_file_size > 0, then check that there's enough space on the disk and throw an exception in case of lack of free space - TemporaryFileStream & createStream(const Block & header, size_t max_file_size = 0); + const Holder * getHolder() const { return holder.get(); } + Holder * getHolder() { return holder.get(); } - /// Write raw data directly into buffer. - /// Differences from `createStream`: - /// 1) it doesn't account data in parent scope - /// 2) returned buffer owns resources (instead of TemporaryDataOnDisk itself) - /// If max_file_size > 0, then check that there's enough space on the disk and throw an exception in case of lack of free space - std::unique_ptr createRawStream(size_t max_file_size = 0); + void reset() + { + impl.reset(); + holder.reset(); + } - std::vector getStreams() const; - bool empty() const; - - const StatAtomic & getStat() const { return stat; } - -private: - FileSegmentsHolderPtr createCacheFile(size_t max_file_size); - TemporaryFileOnDiskHolder createRegularFile(size_t max_file_size); - - mutable std::mutex mutex; - std::vector streams TSA_GUARDED_BY(mutex); - - typename CurrentMetrics::Metric current_metric_scope = CurrentMetrics::TemporaryFilesUnknown; +protected: + std::unique_ptr holder; + std::unique_ptr impl; }; -/* - * Data can be written into this stream and then read. - * After finish writing, call `finishWriting` and then either call `read` or 'getReadStream'(only one of the two) to read the data. - * Account amount of data written to disk in parent scope. - */ -class TemporaryFileStream : boost::noncopyable +/// Owns temporary file and provides access to it. +/// On destruction, file is removed and all resources are freed. +/// Lifetime of read/write buffers should be less than lifetime of TemporaryFileHolder. +class TemporaryFileHolder { public: - struct Reader - { - Reader(const String & path, const Block & header_, size_t size = 0); + TemporaryFileHolder(); - explicit Reader(const String & path, size_t size = 0); + virtual std::unique_ptr write() = 0; + virtual std::unique_ptr read(size_t buffer_size) const = 0; - Block read(); + /// Get location for logging purposes + virtual String describeFilePath() const = 0; - const std::string path; - const size_t size; - const std::optional header; + virtual ~TemporaryFileHolder() = default; +}; - std::unique_ptr in_file_buf; - std::unique_ptr in_compressed_buf; - std::unique_ptr in_reader; - }; +class TemporaryDataReadBuffer : public ReadBuffer +{ +public: + explicit TemporaryDataReadBuffer(std::unique_ptr in_); + +private: + friend class ::FileCacheTest_TemporaryDataReadBufferSize_Test; + + bool nextImpl() override; + + WrapperGuard compressed_buf; +}; + +/// Writes data to buffer provided by file_holder, and accounts amount of written data in parent scope. +class TemporaryDataBuffer : public WriteBuffer +{ +public: struct Stat { - /// Statistics for file - /// Non-atomic because we don't allow to `read` or `write` into single file from multiple threads size_t compressed_size = 0; size_t uncompressed_size = 0; - size_t num_rows = 0; }; - TemporaryFileStream(TemporaryFileOnDiskHolder file_, const Block & header_, TemporaryDataOnDisk * parent_); - TemporaryFileStream(FileSegmentsHolderPtr segments_, const Block & header_, TemporaryDataOnDisk * parent_); - - size_t write(const Block & block); - void flush(); + explicit TemporaryDataBuffer(TemporaryDataOnDiskScope * parent_, size_t max_file_size = 0); + void nextImpl() override; + void finalizeImpl() override; + void cancelImpl() noexcept override; + std::unique_ptr read(); Stat finishWriting(); - Stat finishWritingAsyncSafe(); - bool isWriteFinished() const; - std::unique_ptr getReadStream(); + String describeFilePath() const; - Block read(); - - String getPath() const; - size_t getSize() const; - - Block getHeader() const { return header; } - - /// Read finished and file released - bool isEof() const; - - ~TemporaryFileStream(); + ~TemporaryDataBuffer() override; private: void updateAllocAndCheck(); - /// Release everything, close reader and writer, delete file - void release(); - - TemporaryDataOnDisk * parent; - - Block header; - - /// Data can be stored in file directly or in the cache - TemporaryFileOnDiskHolder file; - FileSegmentsHolderPtr segment_holder; + TemporaryDataOnDiskScope * parent; + std::unique_ptr file_holder; + WrapperGuard out_compressed_buf; + std::once_flag write_finished; Stat stat; +}; - std::once_flag finish_writing; +using TemporaryBlockStreamReaderHolder = WrapperGuard; - struct OutputWriter; - std::unique_ptr out_writer; +class TemporaryBlockStreamHolder : public WrapperGuard +{ +public: + TemporaryBlockStreamHolder() = default; - std::unique_ptr in_reader; + TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t max_file_size = 0); + + TemporaryBlockStreamReaderHolder getReadStream() const; + + TemporaryDataBuffer::Stat finishWriting() const; + const Block & getHeader() const { return header; } + +private: + Block header; }; } diff --git a/src/Interpreters/tests/gtest_filecache.cpp b/src/Interpreters/tests/gtest_filecache.cpp index 007b31d9fdc..ae45443d4bd 100644 --- a/src/Interpreters/tests/gtest_filecache.cpp +++ b/src/Interpreters/tests/gtest_filecache.cpp @@ -934,7 +934,7 @@ static Block generateBlock(size_t size = 0) return block; } -static size_t readAllTemporaryData(TemporaryFileStream & stream) +static size_t readAllTemporaryData(NativeReader & stream) { Block block; size_t read_rows = 0; @@ -947,6 +947,7 @@ static size_t readAllTemporaryData(TemporaryFileStream & stream) } TEST_F(FileCacheTest, temporaryData) +try { ServerUUID::setRandomForUnitTests(); DB::FileCacheSettings settings; @@ -959,7 +960,7 @@ TEST_F(FileCacheTest, temporaryData) file_cache.initialize(); const auto user = FileCache::getCommonUser(); - auto tmp_data_scope = std::make_shared(nullptr, &file_cache, TemporaryDataOnDiskSettings{}); + auto tmp_data_scope = std::make_shared(&file_cache, TemporaryDataOnDiskSettings{}); auto some_data_holder = file_cache.getOrSet(FileCacheKey::fromPath("some_data"), 0, 5_KiB, 5_KiB, CreateFileSegmentSettings{}, 0, user); @@ -982,12 +983,17 @@ TEST_F(FileCacheTest, temporaryData) size_t size_used_with_temporary_data; size_t segments_used_with_temporary_data; + + { - auto tmp_data = std::make_unique(tmp_data_scope); + TemporaryBlockStreamHolder stream(generateBlock(), tmp_data_scope.get()); + ASSERT_TRUE(stream); + /// Do nothitng with stream, just create it and destroy. + } - auto & stream = tmp_data->createStream(generateBlock()); - - ASSERT_GT(stream.write(generateBlock(100)), 0); + { + TemporaryBlockStreamHolder stream(generateBlock(), tmp_data_scope.get()); + ASSERT_GT(stream->write(generateBlock(100)), 0); ASSERT_GT(file_cache.getUsedCacheSize(), 0); ASSERT_GT(file_cache.getFileSegmentsNum(), 0); @@ -995,22 +1001,22 @@ TEST_F(FileCacheTest, temporaryData) size_t used_size_before_attempt = file_cache.getUsedCacheSize(); /// data can't be evicted because it is still held by `some_data_holder` ASSERT_THROW({ - stream.write(generateBlock(2000)); - stream.flush(); + stream->write(generateBlock(2000)); + stream.finishWriting(); }, DB::Exception); + ASSERT_THROW(stream.finishWriting(), DB::Exception); + ASSERT_EQ(file_cache.getUsedCacheSize(), used_size_before_attempt); } { size_t before_used_size = file_cache.getUsedCacheSize(); - auto tmp_data = std::make_unique(tmp_data_scope); - - auto write_buf_stream = tmp_data->createRawStream(); + auto write_buf_stream = std::make_unique(tmp_data_scope.get()); write_buf_stream->write("1234567890", 10); write_buf_stream->write("abcde", 5); - auto read_buf = dynamic_cast(write_buf_stream.get())->tryGetReadBuffer(); + auto read_buf = write_buf_stream->read(); ASSERT_GT(file_cache.getUsedCacheSize(), before_used_size + 10); @@ -1023,22 +1029,22 @@ TEST_F(FileCacheTest, temporaryData) } { - auto tmp_data = std::make_unique(tmp_data_scope); - auto & stream = tmp_data->createStream(generateBlock()); + TemporaryBlockStreamHolder stream(generateBlock(), tmp_data_scope.get()); - ASSERT_GT(stream.write(generateBlock(100)), 0); + ASSERT_GT(stream->write(generateBlock(100)), 0); some_data_holder.reset(); - stream.write(generateBlock(2000)); + stream->write(generateBlock(2000)); - auto stat = stream.finishWriting(); + stream.finishWriting(); - ASSERT_TRUE(fs::exists(stream.getPath())); - ASSERT_GT(fs::file_size(stream.getPath()), 100); + String file_path = stream.getHolder()->describeFilePath().substr(strlen("fscache://")); - ASSERT_EQ(stat.num_rows, 2100); - ASSERT_EQ(readAllTemporaryData(stream), 2100); + ASSERT_TRUE(fs::exists(file_path)) << "File " << file_path << " should exist"; + ASSERT_GT(fs::file_size(file_path), 100) << "File " << file_path << " should be larger than 100 bytes"; + + ASSERT_EQ(readAllTemporaryData(*stream.getReadStream()), 2100); size_used_with_temporary_data = file_cache.getUsedCacheSize(); segments_used_with_temporary_data = file_cache.getFileSegmentsNum(); @@ -1054,6 +1060,11 @@ TEST_F(FileCacheTest, temporaryData) ASSERT_LE(file_cache.getUsedCacheSize(), size_used_before_temporary_data); ASSERT_LE(file_cache.getFileSegmentsNum(), segments_used_before_temporary_data); } +catch (...) +{ + std::cerr << getCurrentExceptionMessage(true) << std::endl; + throw; +} TEST_F(FileCacheTest, CachedReadBuffer) { @@ -1148,18 +1159,22 @@ TEST_F(FileCacheTest, TemporaryDataReadBufferSize) DB::FileCache file_cache("cache", settings); file_cache.initialize(); - auto tmp_data_scope = std::make_shared(/*volume=*/nullptr, &file_cache, /*settings=*/TemporaryDataOnDiskSettings{}); - - auto tmp_data = std::make_unique(tmp_data_scope); + auto tmp_data_scope = std::make_shared(&file_cache, TemporaryDataOnDiskSettings{}); auto block = generateBlock(/*size=*/3); - auto & stream = tmp_data->createStream(block); - stream.write(block); - stream.finishWriting(); + TemporaryBlockStreamHolder stream(block, tmp_data_scope.get()); - /// We allocate buffer of size min(getSize(), DBMS_DEFAULT_BUFFER_SIZE) + stream->write(block); + auto stat = stream.finishWriting(); + + /// We allocate buffer of size min(stat.compressed_size, DBMS_DEFAULT_BUFFER_SIZE) /// We do care about buffer size because realistic external group by could generate 10^5 temporary files - ASSERT_EQ(stream.getSize(), 62); + ASSERT_EQ(stat.compressed_size, 62); + + auto reader = stream.getReadStream(); + auto * read_buf = reader.getHolder(); + const auto & internal_buffer = static_cast(read_buf)->compressed_buf.getHolder()->internalBuffer(); + ASSERT_EQ(internal_buffer.size(), 62); } /// Temporary data stored on disk @@ -1170,16 +1185,14 @@ TEST_F(FileCacheTest, TemporaryDataReadBufferSize) disk = createDisk("temporary_data_read_buffer_size_test_dir"); VolumePtr volume = std::make_shared("volume", disk); - auto tmp_data_scope = std::make_shared(/*volume=*/volume, /*cache=*/nullptr, /*settings=*/TemporaryDataOnDiskSettings{}); - - auto tmp_data = std::make_unique(tmp_data_scope); + auto tmp_data_scope = std::make_shared(volume, TemporaryDataOnDiskSettings{}); auto block = generateBlock(/*size=*/3); - auto & stream = tmp_data->createStream(block); - stream.write(block); - stream.finishWriting(); + TemporaryBlockStreamHolder stream(block, tmp_data_scope.get()); + stream->write(block); + auto stat = stream.finishWriting(); - ASSERT_EQ(stream.getSize(), 62); + ASSERT_EQ(stat.compressed_size, 62); } } diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp index 07ee8f4ddef..1560e88ffef 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include @@ -29,17 +30,18 @@ CollapsingSortedAlgorithm::CollapsingSortedAlgorithm( size_t max_block_size_rows_, size_t max_block_size_bytes_, LoggerPtr log_, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr temp_data_buffer_, bool use_average_block_sizes) : IMergingAlgorithmWithSharedChunks( header_, num_inputs, std::move(description_), - out_row_sources_buf_, + temp_data_buffer_.get(), max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) , sign_column_number(header_.getPositionByName(sign_column)) , only_positive_sign(only_positive_sign_) + , temp_data_buffer(temp_data_buffer_) , log(log_) { } diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h index 99fd95d82d9..b7bb9914cf8 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h @@ -11,6 +11,8 @@ namespace Poco namespace DB { +class TemporaryDataBuffer; + /** Merges several sorted inputs to one. * For each group of consecutive identical values of the primary key (the columns by which the data is sorted), * keeps no more than one row with the value of the column `sign_column = -1` ("negative row") @@ -35,7 +37,7 @@ public: size_t max_block_size_rows_, size_t max_block_size_bytes_, LoggerPtr log_, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr temp_data_buffer_ = nullptr, bool use_average_block_sizes = false); const char * getName() const override { return "CollapsingSortedAlgorithm"; } @@ -62,6 +64,8 @@ private: PODArray current_row_sources; /// Sources of rows with the current primary key size_t count_incorrect_data = 0; /// To prevent too many error messages from writing to the log. + std::shared_ptr temp_data_buffer = nullptr; + LoggerPtr log; void reportIncorrectData(); diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 3a9cf7ee141..d4e4ba6aa5f 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB { @@ -15,7 +16,7 @@ MergingSortedAlgorithm::MergingSortedAlgorithm( size_t max_block_size_bytes_, SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr out_row_sources_buf_, bool use_average_block_sizes) : header(std::move(header_)) , merged_data(use_average_block_sizes, max_block_size_, max_block_size_bytes_) diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h index c889668a38e..fc300e41026 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h @@ -9,6 +9,8 @@ namespace DB { +class TemporaryDataBuffer; + /// Merges several sorted inputs into one sorted output. class MergingSortedAlgorithm final : public IMergingAlgorithm { @@ -21,7 +23,7 @@ public: size_t max_block_size_bytes_, SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_ = 0, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false); void addInput(); @@ -45,7 +47,7 @@ private: /// Used in Vertical merge algorithm to gather non-PK/non-index columns (on next step) /// If it is not nullptr then it should be populated during execution - WriteBuffer * out_row_sources_buf = nullptr; + std::shared_ptr out_row_sources_buf = nullptr; /// Chunks currently being merged. Inputs current_inputs; diff --git a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp index cd347d371d9..a3a33080f52 100644 --- a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB { @@ -37,12 +38,13 @@ ReplacingSortedAlgorithm::ReplacingSortedAlgorithm( const String & version_column, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr temp_data_buffer_, bool use_average_block_sizes, bool cleanup_, bool enable_vertical_final_) - : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), out_row_sources_buf_, max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows, max_block_size_bytes)) + : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), temp_data_buffer_.get(), max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows, max_block_size_bytes)) , cleanup(cleanup_), enable_vertical_final(enable_vertical_final_) + , temp_data_buffer(temp_data_buffer_) { if (!is_deleted_column.empty()) is_deleted_column_number = header_.getPositionByName(is_deleted_column); diff --git a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h index 2f23f2a5c4d..d3b9837a253 100644 --- a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h @@ -24,6 +24,8 @@ struct ChunkSelectFinalIndices : public ChunkInfoCloneable temp_data_buffer_ = nullptr, bool use_average_block_sizes = false, bool cleanup = false, bool enable_vertical_final_ = false); @@ -59,6 +61,8 @@ private: RowRef selected_row; /// Last row with maximum version for current primary key, may extend lifetime of chunk in input source size_t max_pos = 0; /// The position (into current_row_sources) of the row with the highest version. + std::shared_ptr temp_data_buffer = nullptr; + /// Sources of rows with the current primary key. PODArray current_row_sources; diff --git a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp index 9f124c6ba18..1ceb1f46234 100644 --- a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp @@ -1,6 +1,7 @@ #include #include #include +#include namespace DB { @@ -14,12 +15,13 @@ VersionedCollapsingAlgorithm::VersionedCollapsingAlgorithm( const String & sign_column_, size_t max_block_size_rows_, size_t max_block_size_bytes_, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr temp_data_buffer_, bool use_average_block_sizes) - : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), out_row_sources_buf_, MAX_ROWS_IN_MULTIVERSION_QUEUE, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) + : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), temp_data_buffer_.get(), MAX_ROWS_IN_MULTIVERSION_QUEUE, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) /// -1 for +1 in FixedSizeDequeWithGaps's internal buffer. 3 is a reasonable minimum size to collapse anything. , max_rows_in_queue(std::min(std::max(3, max_block_size_rows_), MAX_ROWS_IN_MULTIVERSION_QUEUE) - 1) , current_keys(max_rows_in_queue) + , temp_data_buffer(temp_data_buffer_) { sign_column_number = header_.getPositionByName(sign_column_); } diff --git a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h index e6d20ddac75..6f877459147 100644 --- a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h +++ b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h @@ -8,6 +8,8 @@ namespace DB { +class TemporaryDataBuffer; + /** Merges several sorted inputs to one. * For each group of consecutive identical values of the sorting key * (the columns by which the data is sorted, including specially specified version column), @@ -22,7 +24,7 @@ public: SortDescription description_, const String & sign_column_, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr temp_data_buffer_ = nullptr, bool use_average_block_sizes = false); const char * getName() const override { return "VersionedCollapsingAlgorithm"; } @@ -37,6 +39,8 @@ private: FixedSizeDequeWithGaps current_keys; Int8 sign_in_queue = 0; + std::shared_ptr temp_data_buffer = nullptr; + std::queue current_row_sources; /// Sources of rows with the current primary key void insertGap(size_t gap_size); diff --git a/src/Processors/Merges/CollapsingSortedTransform.h b/src/Processors/Merges/CollapsingSortedTransform.h index 99fb700abf1..9b09c802783 100644 --- a/src/Processors/Merges/CollapsingSortedTransform.h +++ b/src/Processors/Merges/CollapsingSortedTransform.h @@ -23,7 +23,7 @@ public: bool only_positive_sign, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false) : IMergingTransform( num_inputs, header, header, /*have_all_inputs_=*/ true, /*limit_hint_=*/ 0, /*always_read_till_end_=*/ false, diff --git a/src/Processors/Merges/MergingSortedTransform.cpp b/src/Processors/Merges/MergingSortedTransform.cpp index d2895a2a2e9..13330dcff6d 100644 --- a/src/Processors/Merges/MergingSortedTransform.cpp +++ b/src/Processors/Merges/MergingSortedTransform.cpp @@ -20,7 +20,7 @@ MergingSortedTransform::MergingSortedTransform( SortingQueueStrategy sorting_queue_strategy, UInt64 limit_, bool always_read_till_end_, - WriteBuffer * out_row_sources_buf_, + std::shared_ptr out_row_sources_buf_, bool use_average_block_sizes, bool have_all_inputs_) : IMergingTransform( diff --git a/src/Processors/Merges/MergingSortedTransform.h b/src/Processors/Merges/MergingSortedTransform.h index 6e52450efa7..fb8e5ce74e3 100644 --- a/src/Processors/Merges/MergingSortedTransform.h +++ b/src/Processors/Merges/MergingSortedTransform.h @@ -20,7 +20,7 @@ public: SortingQueueStrategy sorting_queue_strategy, UInt64 limit_ = 0, bool always_read_till_end_ = false, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false, bool have_all_inputs_ = true); diff --git a/src/Processors/Merges/ReplacingSortedTransform.h b/src/Processors/Merges/ReplacingSortedTransform.h index dc262aab9ee..a9d9f4fb619 100644 --- a/src/Processors/Merges/ReplacingSortedTransform.h +++ b/src/Processors/Merges/ReplacingSortedTransform.h @@ -21,7 +21,7 @@ public: const String & is_deleted_column, const String & version_column, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr temp_data_buffer_ = nullptr, bool use_average_block_sizes = false, bool cleanup = false, bool enable_vertical_final = false) @@ -34,7 +34,7 @@ public: version_column, max_block_size_rows, max_block_size_bytes, - out_row_sources_buf_, + temp_data_buffer_, use_average_block_sizes, cleanup, enable_vertical_final) diff --git a/src/Processors/Merges/VersionedCollapsingTransform.h b/src/Processors/Merges/VersionedCollapsingTransform.h index 32b5d7bf343..0bdccd4795d 100644 --- a/src/Processors/Merges/VersionedCollapsingTransform.h +++ b/src/Processors/Merges/VersionedCollapsingTransform.h @@ -21,7 +21,7 @@ public: SortDescription description_, const String & sign_column_, size_t max_block_size_rows, size_t max_block_size_bytes, - WriteBuffer * out_row_sources_buf_ = nullptr, + std::shared_ptr temp_data_buffer_ = nullptr, bool use_average_block_sizes = false) : IMergingTransform( num_inputs, header, header, /*have_all_inputs_=*/ true, /*limit_hint_=*/ 0, /*always_read_till_end_=*/ false, @@ -31,7 +31,7 @@ public: sign_column_, max_block_size_rows, max_block_size_bytes, - out_row_sources_buf_, + temp_data_buffer_, use_average_block_sizes) { } diff --git a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h index d99f9a7d1f1..1c03a4d74cd 100644 --- a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h +++ b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h @@ -20,7 +20,6 @@ struct BuildQueryPipelineSettings ExpressionActionsSettings actions_settings; QueryStatusPtr process_list_element; ProgressCallback progress_callback = nullptr; - TemporaryFileLookupPtr temporary_file_lookup; const ExpressionActionsSettings & getActionsSettings() const { return actions_settings; } static BuildQueryPipelineSettings fromContext(ContextPtr from); diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index 5ad2f1f62d5..4fde246f764 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -280,9 +280,9 @@ void SortingStep::mergeSorting( if (increase_sort_description_compile_attempts) increase_sort_description_compile_attempts = false; - auto tmp_data_on_disk = sort_settings.tmp_data - ? std::make_unique(sort_settings.tmp_data, CurrentMetrics::TemporaryFilesForSort) - : std::unique_ptr(); + TemporaryDataOnDiskScopePtr tmp_data_on_disk = nullptr; + if (sort_settings.tmp_data) + tmp_data_on_disk = sort_settings.tmp_data->childScope(CurrentMetrics::TemporaryFilesForSort); return std::make_shared( header, diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index 68f23898018..2c54788b995 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -54,9 +54,9 @@ namespace class SourceFromNativeStream : public ISource { public: - explicit SourceFromNativeStream(TemporaryFileStream * tmp_stream_) - : ISource(tmp_stream_->getHeader()) - , tmp_stream(tmp_stream_) + explicit SourceFromNativeStream(const Block & header, TemporaryBlockStreamReaderHolder tmp_stream_) + : ISource(header) + , tmp_stream(std::move(tmp_stream_)) {} String getName() const override { return "SourceFromNativeStream"; } @@ -69,7 +69,7 @@ namespace auto block = tmp_stream->read(); if (!block) { - tmp_stream = nullptr; + tmp_stream.reset(); return {}; } return convertToChunk(block); @@ -78,7 +78,7 @@ namespace std::optional getReadProgress() override { return std::nullopt; } private: - TemporaryFileStream * tmp_stream; + TemporaryBlockStreamReaderHolder tmp_stream; }; } @@ -811,15 +811,18 @@ void AggregatingTransform::initGenerate() Pipes pipes; /// Merge external data from all aggregators used in query. - for (const auto & aggregator : *params->aggregator_list_ptr) + for (auto & aggregator : *params->aggregator_list_ptr) { - const auto & tmp_data = aggregator.getTemporaryData(); - for (auto * tmp_stream : tmp_data.getStreams()) - pipes.emplace_back(Pipe(std::make_unique(tmp_stream))); + auto & tmp_data = aggregator.getTemporaryData(); + num_streams += tmp_data.size(); - num_streams += tmp_data.getStreams().size(); - compressed_size += tmp_data.getStat().compressed_size; - uncompressed_size += tmp_data.getStat().uncompressed_size; + for (auto & tmp_stream : tmp_data) + { + auto stat = tmp_stream.finishWriting(); + compressed_size += stat.compressed_size; + uncompressed_size += stat.uncompressed_size; + pipes.emplace_back(Pipe(std::make_unique(tmp_stream.getHeader(), tmp_stream.getReadStream()))); + } } LOG_DEBUG( diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index c45192e7118..ba157dabffb 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -27,15 +27,20 @@ namespace ProfileEvents namespace DB { +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + class BufferingToFileTransform : public IAccumulatingTransform { public: - BufferingToFileTransform(const Block & header, TemporaryFileStream & tmp_stream_, LoggerPtr log_) + BufferingToFileTransform(const Block & header, TemporaryBlockStreamHolder tmp_stream_, LoggerPtr log_) : IAccumulatingTransform(header, header) - , tmp_stream(tmp_stream_) + , tmp_stream(std::move(tmp_stream_)) , log(log_) { - LOG_INFO(log, "Sorting and writing part of data into temporary file {}", tmp_stream.getPath()); + LOG_INFO(log, "Sorting and writing part of data into temporary file {}", tmp_stream.getHolder()->describeFilePath()); ProfileEvents::increment(ProfileEvents::ExternalSortWritePart); } @@ -44,14 +49,15 @@ public: void consume(Chunk chunk) override { Block block = getInputPort().getHeader().cloneWithColumns(chunk.detachColumns()); - tmp_stream.write(block); + tmp_stream->write(block); } Chunk generate() override { - if (!tmp_stream.isWriteFinished()) + if (!tmp_read_stream) { auto stat = tmp_stream.finishWriting(); + tmp_read_stream = tmp_stream.getReadStream(); ProfileEvents::increment(ProfileEvents::ExternalProcessingCompressedBytesTotal, stat.compressed_size); ProfileEvents::increment(ProfileEvents::ExternalProcessingUncompressedBytesTotal, stat.uncompressed_size); @@ -59,10 +65,11 @@ public: ProfileEvents::increment(ProfileEvents::ExternalSortUncompressedBytes, stat.uncompressed_size); LOG_INFO(log, "Done writing part of data into temporary file {}, compressed {}, uncompressed {} ", - tmp_stream.getPath(), ReadableSize(static_cast(stat.compressed_size)), ReadableSize(static_cast(stat.uncompressed_size))); + tmp_stream.getHolder()->describeFilePath(), + ReadableSize(static_cast(stat.compressed_size)), ReadableSize(static_cast(stat.uncompressed_size))); } - Block block = tmp_stream.read(); + Block block = tmp_read_stream->read(); if (!block) return {}; @@ -71,7 +78,8 @@ public: } private: - TemporaryFileStream & tmp_stream; + TemporaryBlockStreamHolder tmp_stream; + TemporaryBlockStreamReaderHolder tmp_read_stream; LoggerPtr log; }; @@ -86,7 +94,7 @@ MergeSortingTransform::MergeSortingTransform( size_t max_bytes_before_remerge_, double remerge_lowered_memory_bytes_ratio_, size_t max_bytes_before_external_sort_, - TemporaryDataOnDiskPtr tmp_data_, + TemporaryDataOnDiskScopePtr tmp_data_, size_t min_free_disk_space_) : SortingTransform(header, description_, max_merged_block_size_, limit_, increase_sort_description_compile_attempts) , max_bytes_before_remerge(max_bytes_before_remerge_) @@ -168,9 +176,13 @@ void MergeSortingTransform::consume(Chunk chunk) */ if (max_bytes_before_external_sort && sum_bytes_in_blocks > max_bytes_before_external_sort) { + if (!tmp_data) + throw Exception(ErrorCodes::LOGICAL_ERROR, "TemporaryDataOnDisk is not set for MergeSortingTransform"); + temporary_files_num++; + /// If there's less free disk space than reserve_size, an exception will be thrown size_t reserve_size = sum_bytes_in_blocks + min_free_disk_space; - auto & tmp_stream = tmp_data->createStream(header_without_constants, reserve_size); + TemporaryBlockStreamHolder tmp_stream(header_without_constants, tmp_data.get(), reserve_size); size_t max_merged_block_size = this->max_merged_block_size; if (max_block_bytes > 0 && sum_rows_in_blocks > 0 && sum_bytes_in_blocks > 0) { @@ -179,7 +191,7 @@ void MergeSortingTransform::consume(Chunk chunk) max_merged_block_size = std::max(std::min(max_merged_block_size, max_block_bytes / avg_row_bytes), 128UL); } merge_sorter = std::make_unique(header_without_constants, std::move(chunks), description, max_merged_block_size, limit); - auto current_processor = std::make_shared(header_without_constants, tmp_stream, log); + auto current_processor = std::make_shared(header_without_constants, std::move(tmp_stream), log); processors.emplace_back(current_processor); @@ -221,14 +233,14 @@ void MergeSortingTransform::generate() { if (!generated_prefix) { - size_t num_tmp_files = tmp_data ? tmp_data->getStreams().size() : 0; - if (num_tmp_files == 0) - merge_sorter - = std::make_unique(header_without_constants, std::move(chunks), description, max_merged_block_size, limit); + if (temporary_files_num == 0) + { + merge_sorter = std::make_unique(header_without_constants, std::move(chunks), description, max_merged_block_size, limit); + } else { ProfileEvents::increment(ProfileEvents::ExternalSortMerge); - LOG_INFO(log, "There are {} temporary sorted parts to merge", num_tmp_files); + LOG_INFO(log, "There are {} temporary sorted parts to merge", temporary_files_num); processors.emplace_back(std::make_shared( header_without_constants, std::move(chunks), description, max_merged_block_size, limit)); diff --git a/src/Processors/Transforms/MergeSortingTransform.h b/src/Processors/Transforms/MergeSortingTransform.h index a39dd66caa0..f7cb63d518b 100644 --- a/src/Processors/Transforms/MergeSortingTransform.h +++ b/src/Processors/Transforms/MergeSortingTransform.h @@ -29,7 +29,7 @@ public: size_t max_bytes_before_remerge_, double remerge_lowered_memory_bytes_ratio_, size_t max_bytes_before_external_sort_, - TemporaryDataOnDiskPtr tmp_data_, + TemporaryDataOnDiskScopePtr tmp_data_, size_t min_free_disk_space_); String getName() const override { return "MergeSortingTransform"; } @@ -45,7 +45,8 @@ private: size_t max_bytes_before_remerge; double remerge_lowered_memory_bytes_ratio; size_t max_bytes_before_external_sort; - TemporaryDataOnDiskPtr tmp_data; + TemporaryDataOnDiskScopePtr tmp_data; + size_t temporary_files_num = 0; size_t min_free_disk_space; size_t max_block_bytes; diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index a9e5b1535c0..1e274a97a08 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -197,6 +197,12 @@ public: void setQueryIdHolder(std::shared_ptr query_id_holder) { resources.query_id_holders.emplace_back(std::move(query_id_holder)); } void addContext(ContextPtr context) { resources.interpreter_context.emplace_back(std::move(context)); } + template + void addResource(Resource resource, std::vector QueryPlanResourceHolder::*field) + { + (resources.*field).push_back(std::move(resource)); + } + /// Convert query pipeline to pipe. static Pipe getPipe(QueryPipelineBuilder pipeline, QueryPlanResourceHolder & resources); static QueryPipeline getPipeline(QueryPipelineBuilder builder); diff --git a/src/QueryPipeline/QueryPlanResourceHolder.h b/src/QueryPipeline/QueryPlanResourceHolder.h index 10f7f39ab09..ee2ecc25cd5 100644 --- a/src/QueryPipeline/QueryPlanResourceHolder.h +++ b/src/QueryPipeline/QueryPlanResourceHolder.h @@ -13,6 +13,7 @@ class QueryPlan; class Context; struct QueryIdHolder; +class TemporaryDataBuffer; struct QueryPlanResourceHolder { @@ -33,6 +34,7 @@ struct QueryPlanResourceHolder std::vector storage_holders; std::vector table_locks; std::vector> query_id_holders; + std::vector> rows_sources_temporary_file; }; } diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index 8a9ae05b355..52b56860543 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -162,15 +162,16 @@ void HTTPHandler::pushDelayedResults(Output & used_output) for (auto & write_buf : write_buffers) { - if (!write_buf) - continue; - - IReadableWriteBuffer * write_buf_concrete = dynamic_cast(write_buf.get()); - if (write_buf_concrete) + if (auto * write_buf_concrete = dynamic_cast(write_buf.get())) { - ReadBufferPtr reread_buf = write_buf_concrete->tryGetReadBuffer(); - if (reread_buf) - read_buffers.emplace_back(wrapReadBufferPointer(reread_buf)); + if (auto reread_buf = write_buf_concrete->read()) + read_buffers.emplace_back(std::move(reread_buf)); + } + + if (auto * write_buf_concrete = dynamic_cast(write_buf.get())) + { + if (auto reread_buf = write_buf_concrete->tryGetReadBuffer()) + read_buffers.emplace_back(std::move(reread_buf)); } } @@ -312,21 +313,19 @@ void HTTPHandler::processQuery( if (buffer_size_memory > 0 || buffer_until_eof) { - CascadeWriteBuffer::WriteBufferPtrs cascade_buffer1; - CascadeWriteBuffer::WriteBufferConstructors cascade_buffer2; + CascadeWriteBuffer::WriteBufferPtrs cascade_buffers; + CascadeWriteBuffer::WriteBufferConstructors cascade_buffers_lazy; if (buffer_size_memory > 0) - cascade_buffer1.emplace_back(std::make_shared(buffer_size_memory)); + cascade_buffers.emplace_back(std::make_shared(buffer_size_memory)); if (buffer_until_eof) { - auto tmp_data = std::make_shared(server.context()->getTempDataOnDisk()); - - auto create_tmp_disk_buffer = [tmp_data] (const WriteBufferPtr &) -> WriteBufferPtr { - return tmp_data->createRawStream(); - }; - - cascade_buffer2.emplace_back(std::move(create_tmp_disk_buffer)); + auto tmp_data = server.context()->getTempDataOnDisk(); + cascade_buffers_lazy.emplace_back([tmp_data](const WriteBufferPtr &) -> WriteBufferPtr + { + return std::make_unique(tmp_data.get()); + }); } else { @@ -342,10 +341,10 @@ void HTTPHandler::processQuery( return next_buffer; }; - cascade_buffer2.emplace_back(push_memory_buffer_and_continue); + cascade_buffers_lazy.emplace_back(push_memory_buffer_and_continue); } - used_output.out_delayed_and_compressed_holder = std::make_unique(std::move(cascade_buffer1), std::move(cascade_buffer2)); + used_output.out_delayed_and_compressed_holder = std::make_unique(std::move(cascade_buffers), std::move(cascade_buffers_lazy)); used_output.out_maybe_delayed_and_compressed = used_output.out_delayed_and_compressed_holder.get(); } else diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 74d6d60ba1b..5c9d4ea61a2 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -118,68 +118,6 @@ static ColumnsStatistics getStatisticsForColumns( return all_statistics; } -/// Manages the "rows_sources" temporary file that is used during vertical merge. -class RowsSourcesTemporaryFile : public ITemporaryFileLookup -{ -public: - /// A logical name of the temporary file under which it will be known to the plan steps that use it. - static constexpr auto FILE_ID = "rows_sources"; - - explicit RowsSourcesTemporaryFile(TemporaryDataOnDiskScopePtr temporary_data_on_disk_) - : tmp_disk(std::make_unique(temporary_data_on_disk_)) - , uncompressed_write_buffer(tmp_disk->createRawStream()) - , tmp_file_name_on_disk(uncompressed_write_buffer->getFileName()) - { - } - - WriteBuffer & getTemporaryFileForWriting(const String & name) override - { - if (name != FILE_ID) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected temporary file name requested: {}", name); - - if (write_buffer) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file was already requested for writing, there musto be only one writer"); - - write_buffer = (std::make_unique(*uncompressed_write_buffer)); - return *write_buffer; - } - - std::unique_ptr getTemporaryFileForReading(const String & name) override - { - if (name != FILE_ID) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected temporary file name requested: {}", name); - - if (!finalized) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file is not finalized yet"); - - /// tmp_disk might not create real file if no data was written to it. - if (final_size == 0) - return std::make_unique(); - - /// Reopen the file for each read so that multiple reads can be performed in parallel and there is no need to seek to the beginning. - auto raw_file_read_buffer = std::make_unique(tmp_file_name_on_disk); - return std::make_unique(std::move(raw_file_read_buffer)); - } - - /// Returns written data size in bytes - size_t finalizeWriting() - { - write_buffer->finalize(); - uncompressed_write_buffer->finalize(); - finalized = true; - final_size = write_buffer->count(); - return final_size; - } - -private: - std::unique_ptr tmp_disk; - std::unique_ptr uncompressed_write_buffer; - std::unique_ptr write_buffer; - const String tmp_file_name_on_disk; - bool finalized = false; - size_t final_size = 0; -}; - static void addMissedColumnsToSerializationInfos( size_t num_rows_in_parts, const Names & part_columns, @@ -480,7 +418,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const } case MergeAlgorithm::Vertical: { - ctx->rows_sources_temporary_file = std::make_shared(global_ctx->context->getTempDataOnDisk()); + ctx->rows_sources_temporary_file = std::make_unique(global_ctx->context->getTempDataOnDisk().get()); std::map local_merged_column_to_size; for (const auto & part : global_ctx->future_part->parts) @@ -854,22 +792,11 @@ bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const if (global_ctx->chosen_merge_algorithm != MergeAlgorithm::Vertical) return false; - size_t sum_input_rows_exact = global_ctx->merge_list_element_ptr->rows_read; - size_t input_rows_filtered = *global_ctx->input_rows_filtered; global_ctx->merge_list_element_ptr->columns_written = global_ctx->merging_columns.size(); global_ctx->merge_list_element_ptr->progress.store(ctx->column_sizes->keyColumnsWeight(), std::memory_order_relaxed); /// Ensure data has written to disk. - size_t rows_sources_count = ctx->rows_sources_temporary_file->finalizeWriting(); - /// In special case, when there is only one source part, and no rows were skipped, we may have - /// skipped writing rows_sources file. Otherwise rows_sources_count must be equal to the total - /// number of input rows. - if ((rows_sources_count > 0 || global_ctx->future_part->parts.size() > 1) && sum_input_rows_exact != rows_sources_count + input_rows_filtered) - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "Number of rows in source parts ({}) excluding filtered rows ({}) differs from number " - "of bytes written to rows_sources file ({}). It is a bug.", - sum_input_rows_exact, input_rows_filtered, rows_sources_count); + ctx->rows_sources_temporary_file->finishWriting(); ctx->it_name_and_type = global_ctx->gathering_columns.cbegin(); @@ -901,12 +828,12 @@ class ColumnGathererStep : public ITransformingStep public: ColumnGathererStep( const Header & input_header_, - const String & rows_sources_temporary_file_name_, + std::unique_ptr rows_sources_read_buf_, UInt64 merge_block_size_rows_, UInt64 merge_block_size_bytes_, bool is_result_sparse_) : ITransformingStep(input_header_, input_header_, getTraits()) - , rows_sources_temporary_file_name(rows_sources_temporary_file_name_) + , rows_sources_read_buf(std::move(rows_sources_read_buf_)) , merge_block_size_rows(merge_block_size_rows_) , merge_block_size_bytes(merge_block_size_bytes_) , is_result_sparse(is_result_sparse_) @@ -914,15 +841,13 @@ public: String getName() const override { return "ColumnGatherer"; } - void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & pipeline_settings) override + void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & /* pipeline_settings */) override { - const auto &header = pipeline.getHeader(); + const auto & header = pipeline.getHeader(); const auto input_streams_count = pipeline.getNumStreams(); - if (!pipeline_settings.temporary_file_lookup) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file lookup is not set in pipeline settings for vertical merge"); - - auto rows_sources_read_buf = pipeline_settings.temporary_file_lookup->getTemporaryFileForReading(rows_sources_temporary_file_name); + if (!rows_sources_read_buf) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary data buffer for rows sources is not set"); auto transform = std::make_unique( header, @@ -957,7 +882,7 @@ private: } MergeTreeData::MergingParams merging_params{}; - const String rows_sources_temporary_file_name; + std::unique_ptr rows_sources_read_buf; const UInt64 merge_block_size_rows; const UInt64 merge_block_size_bytes; const bool is_result_sparse; @@ -1008,7 +933,7 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic const auto data_settings = global_ctx->data->getSettings(); auto merge_step = std::make_unique( merge_column_query_plan.getCurrentHeader(), - RowsSourcesTemporaryFile::FILE_ID, + ctx->rows_sources_temporary_file->read(), (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], is_result_sparse); @@ -1037,9 +962,9 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic } auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); - pipeline_settings.temporary_file_lookup = ctx->rows_sources_temporary_file; auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_column_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); + builder->addResource>(ctx->rows_sources_temporary_file, &QueryPlanResourceHolder::rows_sources_temporary_file); return {QueryPipelineBuilder::getPipeline(std::move(*builder)), std::move(indexes_to_recalc)}; } @@ -1401,7 +1326,7 @@ public: const SortDescription & sort_description_, const Names partition_key_columns_, const MergeTreeData::MergingParams & merging_params_, - const String & rows_sources_temporary_file_name_, + std::shared_ptr rows_sources_temporary_file_, UInt64 merge_block_size_rows_, UInt64 merge_block_size_bytes_, bool blocks_are_granules_size_, @@ -1411,7 +1336,7 @@ public: , sort_description(sort_description_) , partition_key_columns(partition_key_columns_) , merging_params(merging_params_) - , rows_sources_temporary_file_name(rows_sources_temporary_file_name_) + , rows_sources_temporary_file(rows_sources_temporary_file_) , merge_block_size_rows(merge_block_size_rows_) , merge_block_size_bytes(merge_block_size_bytes_) , blocks_are_granules_size(blocks_are_granules_size_) @@ -1421,7 +1346,7 @@ public: String getName() const override { return "MergeParts"; } - void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & pipeline_settings) override + void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & /* pipeline_settings */) override { /// The order of the streams is important: when the key is matched, the elements go in the order of the source stream number. /// In the merged part, the lines with the same key must be in the ascending order of the identifier of original part, @@ -1431,14 +1356,6 @@ public: const auto &header = pipeline.getHeader(); const auto input_streams_count = pipeline.getNumStreams(); - WriteBuffer * rows_sources_write_buf = nullptr; - if (!rows_sources_temporary_file_name.empty()) - { - if (!pipeline_settings.temporary_file_lookup) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file lookup is not set in pipeline settings for vertical merge"); - rows_sources_write_buf = &pipeline_settings.temporary_file_lookup->getTemporaryFileForWriting(rows_sources_temporary_file_name); - } - switch (merging_params.mode) { case MergeTreeData::MergingParams::Ordinary: @@ -1451,14 +1368,14 @@ public: SortingQueueStrategy::Default, /* limit_= */0, /* always_read_till_end_= */false, - rows_sources_write_buf, + rows_sources_temporary_file, blocks_are_granules_size); break; case MergeTreeData::MergingParams::Collapsing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.sign_column, false, - merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size); + merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size); break; case MergeTreeData::MergingParams::Summing: @@ -1473,7 +1390,7 @@ public: case MergeTreeData::MergingParams::Replacing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.is_deleted_column, merging_params.version_column, - merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size, + merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size, cleanup); break; @@ -1486,7 +1403,7 @@ public: case MergeTreeData::MergingParams::VersionedCollapsing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.sign_column, - merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size); + merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size); break; } @@ -1528,7 +1445,7 @@ private: const SortDescription sort_description; const Names partition_key_columns; const MergeTreeData::MergingParams merging_params{}; - const String rows_sources_temporary_file_name; + std::shared_ptr rows_sources_temporary_file; const UInt64 merge_block_size_rows; const UInt64 merge_block_size_bytes; const bool blocks_are_granules_size; @@ -1697,7 +1614,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const sort_description, partition_key_columns, global_ctx->merging_params, - (is_vertical_merge ? RowsSourcesTemporaryFile::FILE_ID : ""), /// rows_sources temporaty file is used only for vertical merge + (is_vertical_merge ? ctx->rows_sources_temporary_file : nullptr), /// rows_sources temporaty file is used only for vertical merge (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], ctx->blocks_are_granules_size, @@ -1762,7 +1679,6 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const { auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); - pipeline_settings.temporary_file_lookup = ctx->rows_sources_temporary_file; auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_parts_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 5a4fb1ec0b8..a3d72127627 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -41,7 +41,6 @@ namespace DB class MergeTask; using MergeTaskPtr = std::shared_ptr; -class RowsSourcesTemporaryFile; /** * Overview of the merge algorithm @@ -235,7 +234,7 @@ private: bool force_ttl{false}; CompressionCodecPtr compression_codec{nullptr}; size_t sum_input_rows_upper_bound{0}; - std::shared_ptr rows_sources_temporary_file; + std::shared_ptr rows_sources_temporary_file; std::optional column_sizes{}; /// For projections to rebuild @@ -314,7 +313,7 @@ private: struct VerticalMergeRuntimeContext : public IStageRuntimeContext { /// Begin dependencies from previous stage - std::shared_ptr rows_sources_temporary_file; + std::shared_ptr rows_sources_temporary_file; std::optional column_sizes; CompressionCodecPtr compression_codec; std::list::const_iterator it_name_and_type; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 76bcf41d6d8..a53d4213cbd 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -113,10 +113,11 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( } if (!prewhere_actions.steps.empty()) - LOG_TRACE(log, "PREWHERE condition was split into {} steps: {}", prewhere_actions.steps.size(), prewhere_actions.dumpConditions()); + LOG_TRACE(log, "PREWHERE condition was split into {} steps", prewhere_actions.steps.size()); if (prewhere_info) - LOG_TEST(log, "Original PREWHERE DAG:\n{}\nPREWHERE actions:\n{}", + LOG_TEST(log, "Original PREWHERE DAG:{}\n{}\nPREWHERE actions:\n{}", + prewhere_actions.dumpConditions(), prewhere_info->prewhere_actions.dumpDAG(), (!prewhere_actions.steps.empty() ? prewhere_actions.dump() : std::string(""))); } From db2aab199db6e542c5a87c30466c358a2207c30a Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 16 Oct 2024 11:57:11 +0000 Subject: [PATCH 04/74] log --- src/Interpreters/TemporaryDataOnDisk.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index c3b24fb783b..6cc49fe83c8 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -58,6 +58,7 @@ public: explicit TemporaryFileInLocalCache(FileCache & file_cache, size_t max_file_size = 0) { const auto key = FileSegment::Key::random(); + LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Creating temporary file in cache with key {}", key); segment_holder = file_cache.set( key, 0, std::max(10_MiB, max_file_size), CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser()); @@ -91,6 +92,7 @@ public: explicit TemporaryFileOnLocalDisk(VolumePtr volume, size_t max_file_size = 0) : path_to_file("tmp" + toString(UUIDHelpers::generateV4())) { + LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Creating temporary file '{}'", path_to_file); if (max_file_size > 0) { auto reservation = volume->reserve(max_file_size); @@ -129,9 +131,14 @@ public: try { if (disk->exists(path_to_file)) + { + LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Removing temporary file '{}'", path_to_file); disk->removeRecursive(path_to_file); + } else + { LOG_WARNING(getLogger("TemporaryFileOnLocalDisk"), "Temporary path '{}' does not exist in '{}' on disk {}", path_to_file, disk->getPath(), disk->getName()); + } } catch (...) { From b09d3c5479edcecaa041df6e6de7a45a2d407aa8 Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 16 Oct 2024 12:01:21 +0000 Subject: [PATCH 05/74] fix --- src/Interpreters/Context.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 6ada12e63f9..f0e29dcdc41 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -1256,6 +1256,10 @@ try /// We skip directories (for example, 'http_buffers' - it's used for buffering of the results) and all other file types. } } + else + { + fs::create_directories(path); + } } catch (...) { From f238530cc5d222f62611214a9434138d79aabefd Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 17 Oct 2024 15:10:39 +0000 Subject: [PATCH 06/74] w --- src/Interpreters/Aggregator.cpp | 5 ++++ src/Interpreters/Aggregator.h | 2 +- src/Interpreters/GraceHashJoin.cpp | 4 +-- src/Interpreters/HashJoin/HashJoin.cpp | 15 +++++------ src/Interpreters/HashJoin/HashJoin.h | 2 +- src/Interpreters/TemporaryDataOnDisk.h | 25 ++++++++++--------- .../Transforms/MergeSortingTransform.cpp | 4 +-- src/Storages/MergeTree/MergeTask.cpp | 2 +- 8 files changed, 33 insertions(+), 26 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index e6fecc37cfa..cdc819d3a32 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1639,6 +1639,11 @@ Block Aggregator::convertOneBucketToBlock(AggregatedDataVariants & variants, Are return block; } +std::vector & Aggregator::getTemporaryData() +{ + return tmp_files; +} + template void Aggregator::writeToTemporaryFileImpl( AggregatedDataVariants & data_variants, diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index bc28d3dccb8..3ac5ca30ed4 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -311,7 +311,7 @@ public: bool hasTemporaryData() const { return !tmp_files.empty(); } - std::vector & getTemporaryData() { return tmp_files; } + std::vector & getTemporaryData(); /// Get data structure of the result. Block getHeader(bool final) const; diff --git a/src/Interpreters/GraceHashJoin.cpp b/src/Interpreters/GraceHashJoin.cpp index a2010b7d94b..3fb83c3ce47 100644 --- a/src/Interpreters/GraceHashJoin.cpp +++ b/src/Interpreters/GraceHashJoin.cpp @@ -389,8 +389,8 @@ void GraceHashJoin::addBuckets(const size_t bucket_count) for (size_t i = 0; i < bucket_count; ++i) try { - TemporaryBlockStreamHolder left_file = TemporaryBlockStreamHolder(left_sample_block, tmp_data.get()); - TemporaryBlockStreamHolder right_file = TemporaryBlockStreamHolder(prepareRightBlock(right_sample_block), tmp_data.get()); + TemporaryBlockStreamHolder left_file(left_sample_block, tmp_data.get()); + TemporaryBlockStreamHolder right_file(prepareRightBlock(right_sample_block), tmp_data.get()); BucketPtr new_bucket = std::make_shared(current_size + i, std::move(left_file), std::move(right_file), log); tmp_buckets.emplace_back(std::move(new_bucket)); diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index af23b520abb..a2c9f94a6ae 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -59,7 +59,7 @@ struct NotProcessedCrossJoin : public ExtraBlock { size_t left_position; size_t right_block; - TemporaryBlockStreamReaderHolder reader; + std::optional reader; }; @@ -513,9 +513,9 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) || (max_rows_in_join && getTotalRowCount() + block_to_save.rows() >= max_rows_in_join))) { if (!tmp_stream) - tmp_stream = TemporaryBlockStreamHolder(right_sample_block, tmp_data.get()); + tmp_stream.emplace(right_sample_block, tmp_data.get()); - tmp_stream->write(block_to_save); + tmp_stream.value()->write(block_to_save); return true; } @@ -721,13 +721,14 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) { size_t start_left_row = 0; size_t start_right_block = 0; - TemporaryBlockStreamReaderHolder reader; + std::optional reader; if (not_processed) { auto & continuation = static_cast(*not_processed); start_left_row = continuation.left_position; start_right_block = continuation.right_block; - reader = std::move(continuation.reader); + if (continuation.reader) + reader = std::move(*continuation.reader); not_processed.reset(); } @@ -796,9 +797,9 @@ void HashJoin::joinBlockImplCross(Block & block, ExtraBlockPtr & not_processed) if (tmp_stream && rows_added <= max_joined_block_rows) { if (!reader) - reader = tmp_stream.getReadStream(); + reader = tmp_stream->getReadStream(); - while (auto block_right = reader->read()) + while (auto block_right = reader.value()->read()) { ++block_number; process_right_block(block_right); diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 0f50e110db9..8572c5df096 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -424,7 +424,7 @@ private: /// Needed to do external cross join TemporaryDataOnDiskScopePtr tmp_data; - TemporaryBlockStreamHolder tmp_stream; + std::optional tmp_stream; mutable std::once_flag finish_writing; /// Block with columns from the right-side table. diff --git a/src/Interpreters/TemporaryDataOnDisk.h b/src/Interpreters/TemporaryDataOnDisk.h index f8d14b00ac5..86fa9e57e81 100644 --- a/src/Interpreters/TemporaryDataOnDisk.h +++ b/src/Interpreters/TemporaryDataOnDisk.h @@ -114,18 +114,19 @@ template class WrapperGuard { public: - WrapperGuard() = default; - template WrapperGuard(std::unique_ptr holder_, Args && ... args) : holder(std::move(holder_)) , impl(std::make_unique(*holder, std::forward(args)...)) - {} + { + chassert(holder); + chassert(impl); + } - Impl * operator->() { return impl.get(); } - const Impl * operator->() const { return impl.get(); } - Impl & operator*() { return *impl; } - const Impl & operator*() const { return *impl; } + Impl * operator->() { chassert(impl); chassert(holder); return impl.get(); } + const Impl * operator->() const { chassert(impl); chassert(holder); return impl.get(); } + Impl & operator*() { chassert(impl); chassert(holder); return *impl; } + const Impl & operator*() const { chassert(impl); chassert(holder); return *impl; } operator bool() const { return impl != nullptr; } const Holder * getHolder() const { return holder.get(); } @@ -153,13 +154,13 @@ public: virtual std::unique_ptr write() = 0; virtual std::unique_ptr read(size_t buffer_size) const = 0; - /// Get location for logging purposes + /// Get location for logging virtual String describeFilePath() const = 0; virtual ~TemporaryFileHolder() = default; }; - +/// Reads raw data from temporary file class TemporaryDataReadBuffer : public ReadBuffer { public: @@ -173,7 +174,7 @@ private: WrapperGuard compressed_buf; }; -/// Writes data to buffer provided by file_holder, and accounts amount of written data in parent scope. +/// Writes raw data to buffer provided by file_holder, and accounts amount of written data in parent scope. class TemporaryDataBuffer : public WriteBuffer { public: @@ -206,13 +207,13 @@ private: Stat stat; }; + +/// High level interfaces for reading and writing temporary data by blocks. using TemporaryBlockStreamReaderHolder = WrapperGuard; class TemporaryBlockStreamHolder : public WrapperGuard { public: - TemporaryBlockStreamHolder() = default; - TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t max_file_size = 0); TemporaryBlockStreamReaderHolder getReadStream() const; diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index ba157dabffb..d3299ea651f 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -69,7 +69,7 @@ public: ReadableSize(static_cast(stat.compressed_size)), ReadableSize(static_cast(stat.uncompressed_size))); } - Block block = tmp_read_stream->read(); + Block block = tmp_read_stream.value()->read(); if (!block) return {}; @@ -79,7 +79,7 @@ public: private: TemporaryBlockStreamHolder tmp_stream; - TemporaryBlockStreamReaderHolder tmp_read_stream; + std::optional tmp_read_stream; LoggerPtr log; }; diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 5c9d4ea61a2..1009458574e 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -964,7 +964,7 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_column_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); - builder->addResource>(ctx->rows_sources_temporary_file, &QueryPlanResourceHolder::rows_sources_temporary_file); + builder->addResource(ctx->rows_sources_temporary_file, &QueryPlanResourceHolder::rows_sources_temporary_file); return {QueryPipelineBuilder::getPipeline(std::move(*builder)), std::move(indexes_to_recalc)}; } From 017d9557c5c6d41c671d55c21fb2e8810d231dd3 Mon Sep 17 00:00:00 2001 From: vdimir Date: Thu, 17 Oct 2024 15:33:33 +0000 Subject: [PATCH 07/74] f --- src/Interpreters/Aggregator.cpp | 12 +++++++++++- src/Interpreters/Aggregator.h | 7 ++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index cdc819d3a32..bb9e22e5a1b 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1519,7 +1519,10 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, si Stopwatch watch; size_t rows = data_variants.size(); + std::unique_lock lk(tmp_files_mutex); auto & out_stream = tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size); + lk.unlock(); + ProfileEvents::increment(ProfileEvents::ExternalAggregationWritePart); LOG_DEBUG(log, "Writing part of aggregation data into temporary file {}", out_stream.getHolder()->describeFilePath()); @@ -1639,11 +1642,18 @@ Block Aggregator::convertOneBucketToBlock(AggregatedDataVariants & variants, Are return block; } -std::vector & Aggregator::getTemporaryData() +std::list & Aggregator::getTemporaryData() { return tmp_files; } +bool Aggregator::hasTemporaryData() const +{ + std::lock_guard lk(tmp_files_mutex); + return !tmp_files.empty(); +} + + template void Aggregator::writeToTemporaryFileImpl( AggregatedDataVariants & data_variants, diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 3ac5ca30ed4..451583946eb 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -309,9 +309,9 @@ public: /// For external aggregation. void writeToTemporaryFile(AggregatedDataVariants & data_variants, size_t max_temp_file_size = 0) const; - bool hasTemporaryData() const { return !tmp_files.empty(); } + bool hasTemporaryData() const; - std::vector & getTemporaryData(); + std::list & getTemporaryData(); /// Get data structure of the result. Block getHeader(bool final) const; @@ -356,7 +356,8 @@ private: /// For external aggregation. TemporaryDataOnDiskScopePtr tmp_data; - mutable std::vector tmp_files; + mutable std::mutex tmp_files_mutex; + mutable std::list tmp_files; size_t min_bytes_for_prefetch = 0; From a5b9083f2c2f03345f1b14630d9bae8c25996697 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 18 Oct 2024 14:40:47 +0000 Subject: [PATCH 08/74] f --- src/Interpreters/TemporaryDataOnDisk.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index 6cc49fe83c8..c0c9d0a80c5 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -9,13 +9,14 @@ #include #include #include +#include #include #include #include #include #include -#include "Common/Exception.h" +#include namespace ProfileEvents { @@ -130,7 +131,7 @@ public: ~TemporaryFileOnLocalDisk() override try { - if (disk->exists(path_to_file)) + if (disk->existsFile(path_to_file)) { LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Removing temporary file '{}'", path_to_file); disk->removeRecursive(path_to_file); From e8fdacdeced2a1bab0600524db90fc6cb29aaaf2 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 21 Oct 2024 16:50:16 +0000 Subject: [PATCH 09/74] fix --- src/Interpreters/Cache/Metadata.cpp | 1 + src/Interpreters/TemporaryDataOnDisk.cpp | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 2ee985b1c31..6a2cca33a13 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -470,6 +470,7 @@ private: void CacheMetadata::cleanupThreadFunc() { + LOG_DEBUG(log, "Cleanup thread started"); while (true) { Key key; diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index c0c9d0a80c5..ea29afbe1fa 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -59,7 +59,7 @@ public: explicit TemporaryFileInLocalCache(FileCache & file_cache, size_t max_file_size = 0) { const auto key = FileSegment::Key::random(); - LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Creating temporary file in cache with key {}", key); + LOG_TRACE(getLogger("TemporaryFileInLocalCache"), "Creating temporary file in cache with key {}", key); segment_holder = file_cache.set( key, 0, std::max(10_MiB, max_file_size), CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser()); @@ -270,6 +270,9 @@ std::unique_ptr TemporaryDataBuffer::read() { finishWriting(); + if (stat.compressed_size == 0 && stat.uncompressed_size == 0) + return std::make_unique(std::make_unique()); + /// Keep buffer size less that file size, to avoid memory overhead for large amounts of small files size_t buffer_size = std::min(stat.compressed_size, DBMS_DEFAULT_BUFFER_SIZE); return std::make_unique(file_holder->read(buffer_size)); From 881f1a94ae72433a1b1c49ee76877a3af66b1527 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 21 Oct 2024 16:52:18 +0000 Subject: [PATCH 10/74] fix tidy --- src/Interpreters/TemporaryDataOnDisk.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/TemporaryDataOnDisk.h b/src/Interpreters/TemporaryDataOnDisk.h index 86fa9e57e81..eab3571dd07 100644 --- a/src/Interpreters/TemporaryDataOnDisk.h +++ b/src/Interpreters/TemporaryDataOnDisk.h @@ -115,7 +115,7 @@ class WrapperGuard { public: template - WrapperGuard(std::unique_ptr holder_, Args && ... args) + explicit WrapperGuard(std::unique_ptr holder_, Args && ... args) : holder(std::move(holder_)) , impl(std::make_unique(*holder, std::forward(args)...)) { @@ -127,7 +127,7 @@ public: const Impl * operator->() const { chassert(impl); chassert(holder); return impl.get(); } Impl & operator*() { chassert(impl); chassert(holder); return *impl; } const Impl & operator*() const { chassert(impl); chassert(holder); return *impl; } - operator bool() const { return impl != nullptr; } + operator bool() const { return impl != nullptr; } /// NOLINT const Holder * getHolder() const { return holder.get(); } Holder * getHolder() { return holder.get(); } From 0e702fc56d55900aaf9ce18696f18cd4855d9d17 Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 21 Oct 2024 16:57:32 +0000 Subject: [PATCH 11/74] upd tests/integration/test_tmp_policy/test.py --- tests/integration/test_tmp_policy/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_tmp_policy/test.py b/tests/integration/test_tmp_policy/test.py index 870a70b127a..097f93660b2 100644 --- a/tests/integration/test_tmp_policy/test.py +++ b/tests/integration/test_tmp_policy/test.py @@ -35,8 +35,8 @@ def test_disk_selection(start_cluster): node.query(query, settings=settings) assert node.contains_in_log( - "Writing part of aggregation data into temporary file /disk1/" + "Writing part of aggregation data into temporary file.*/disk1/" ) assert node.contains_in_log( - "Writing part of aggregation data into temporary file /disk2/" + "Writing part of aggregation data into temporary file.*/disk2/" ) From eccf5dd15e91663adb0c54e045f6c87e789656b3 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 22 Oct 2024 13:19:42 +0000 Subject: [PATCH 12/74] better error message --- src/Interpreters/TemporaryDataOnDisk.cpp | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index ea29afbe1fa..60bfd379a72 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -15,6 +15,8 @@ #include #include +#include +#include #include #include @@ -98,7 +100,24 @@ public: { auto reservation = volume->reserve(max_file_size); if (!reservation) - throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Not enough space on temporary disk"); + { + auto disks = volume->getDisks(); + Strings disks_info; + for (const auto & d : disks) + { + auto to_double = [](auto x) { return static_cast(x); }; + disks_info.push_back(fmt::format("{}: available: {} unreserved: {}, total: {}, keeping: {}", + d->getName(), + ReadableSize(d->getAvailableSpace().transform(to_double).value_or(NaNOrZero())), + ReadableSize(d->getUnreservedSpace().transform(to_double).value_or(NaNOrZero())), + ReadableSize(d->getTotalSpace().transform(to_double).value_or(NaNOrZero())), + ReadableSize(d->getKeepingFreeSpace()))); + } + + throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, + "Not enough space on temporary disk, cannot reserve {} bytes on [{}]", + max_file_size, fmt::join(disks_info, ", ")); + } disk = reservation->getDisk(); } else From 349af95cd1e2c998391bec9710bacb0458175835 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 25 Oct 2024 10:45:32 +0000 Subject: [PATCH 13/74] fix data race --- src/Interpreters/Cache/Metadata.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 6a2cca33a13..2ee985b1c31 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -470,7 +470,6 @@ private: void CacheMetadata::cleanupThreadFunc() { - LOG_DEBUG(log, "Cleanup thread started"); while (true) { Key key; From 084f878fb19995763e3db825752dc61c9d768b43 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 25 Oct 2024 11:10:33 +0000 Subject: [PATCH 14/74] log --- src/Interpreters/Aggregator.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index bb9e22e5a1b..2dd6513d498 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1504,6 +1504,7 @@ bool Aggregator::executeOnBlock(Columns columns, && worth_convert_to_two_level) { size_t size = current_memory_usage + params.min_free_disk_space; + LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} {}", __FILE__, __LINE__, current_memory_usage, params.min_free_disk_space); writeToTemporaryFile(result, size); } @@ -1520,6 +1521,7 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, si size_t rows = data_variants.size(); std::unique_lock lk(tmp_files_mutex); + LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: max_temp_file_size {}", __FILE__, __LINE__, max_temp_file_size); auto & out_stream = tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size); lk.unlock(); @@ -2932,6 +2934,7 @@ bool Aggregator::mergeOnBlock(Block block, AggregatedDataVariants & result, bool && worth_convert_to_two_level) { size_t size = current_memory_usage + params.min_free_disk_space; + LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} {}", __FILE__, __LINE__, current_memory_usage, params.min_free_disk_space); writeToTemporaryFile(result, size); } From 54b93953847699f1f9d14939bd1e0067d933dbba Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 25 Oct 2024 11:11:19 +0000 Subject: [PATCH 15/74] fix typo --- src/Storages/MergeTree/MergeTask.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 1009458574e..d781cef9f17 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -1614,7 +1614,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const sort_description, partition_key_columns, global_ctx->merging_params, - (is_vertical_merge ? ctx->rows_sources_temporary_file : nullptr), /// rows_sources temporaty file is used only for vertical merge + (is_vertical_merge ? ctx->rows_sources_temporary_file : nullptr), /// rows_sources' temporary file is used only for vertical merge (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], ctx->blocks_are_granules_size, From c952d9d8153ce59458fdb69a208b361c7454cab1 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 25 Oct 2024 20:55:51 +0000 Subject: [PATCH 16/74] Right JOIN with parallel replicas --- .../ClusterProxy/executeQuery.cpp | 4 +- src/Planner/PlannerJoinTree.cpp | 2 + src/Planner/findParallelReplicasQuery.cpp | 52 ++++++++++++---- src/Storages/buildQueryTreeForShard.cpp | 62 +++++++++++++------ 4 files changed, 86 insertions(+), 34 deletions(-) diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index e88fdeb0379..4b1f3094be3 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -477,8 +477,8 @@ void executeQueryWithParallelReplicas( QueryPlanStepPtr analyzed_read_from_merge_tree) { auto logger = getLogger("executeQueryWithParallelReplicas"); - LOG_DEBUG(logger, "Executing read from {}, header {}, query ({}), stage {} with parallel replicas", - storage_id.getNameForLogs(), header.dumpStructure(), query_ast->formatForLogging(), processed_stage); + LOG_DEBUG(logger, "Executing read from {}, header {}, query ({}), stage {} with parallel replicas\n{}", + storage_id.getNameForLogs(), header.dumpStructure(), query_ast->formatForLogging(), processed_stage, StackTrace().toString()); const auto & settings = context->getSettingsRef(); diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 39c1352c9cf..7889a358d95 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -665,6 +665,8 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres bool is_single_table_expression, bool wrap_read_columns_in_subquery) { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "table_expression:\n{}", table_expression->dumpTree()); + const auto & query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index b97a9a36381..891e5034f44 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -100,14 +100,19 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre auto join_kind = join_node.getKind(); auto join_strictness = join_node.getStrictness(); - bool can_parallelize_join = - join_kind == JoinKind::Left - || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All); - - if (!can_parallelize_join) + if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) + { + query_tree_node = join_node.getLeftTableExpression().get(); + } + else if (join_kind == JoinKind::Right) + { + query_tree_node = join_node.getRightTableExpression().get(); + } + else + { return {}; + } - query_tree_node = join_node.getLeftTableExpression().get(); break; } default: @@ -310,13 +315,15 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * query_tree_node) { - std::stack right_join_nodes; - while (query_tree_node || !right_join_nodes.empty()) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "\n{}", StackTrace().toString()); + + std::stack join_nodes; + while (query_tree_node || !join_nodes.empty()) { if (!query_tree_node) { - query_tree_node = right_join_nodes.top(); - right_join_nodes.pop(); + query_tree_node = join_nodes.top(); + join_nodes.pop(); } auto join_tree_node_type = query_tree_node->getNodeType(); @@ -365,8 +372,23 @@ static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * que case QueryTreeNodeType::JOIN: { const auto & join_node = query_tree_node->as(); - query_tree_node = join_node.getLeftTableExpression().get(); - right_join_nodes.push(join_node.getRightTableExpression().get()); + const auto join_kind = join_node.getKind(); + const auto join_strictness = join_node.getStrictness(); + + if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner and join_strictness == JoinStrictness::All)) + { + query_tree_node = join_node.getLeftTableExpression().get(); + join_nodes.push(join_node.getRightTableExpression().get()); + } + else if (join_kind == JoinKind::Right) + { + query_tree_node = join_node.getRightTableExpression().get(); + join_nodes.push(join_node.getLeftTableExpression().get()); + } + else + { + return nullptr; + } break; } default: @@ -400,7 +422,9 @@ const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tr if (!context->canUseParallelReplicasOnFollower()) return nullptr; - return findTableForParallelReplicas(query_tree_node.get()); + const auto * res = findTableForParallelReplicas(query_tree_node.get()); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); + return res; } JoinTreeQueryPlan buildQueryPlanForParallelReplicas( @@ -408,6 +432,8 @@ JoinTreeQueryPlan buildQueryPlanForParallelReplicas( const PlannerContextPtr & planner_context, std::shared_ptr storage_limits) { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "\n{}", StackTrace().toString()); + auto processed_stage = QueryProcessingStage::WithMergeableState; auto context = planner_context->getQueryContext(); diff --git a/src/Storages/buildQueryTreeForShard.cpp b/src/Storages/buildQueryTreeForShard.cpp index bbf32c68d19..df9bfd049fb 100644 --- a/src/Storages/buildQueryTreeForShard.cpp +++ b/src/Storages/buildQueryTreeForShard.cpp @@ -314,6 +314,35 @@ TableNodePtr executeSubqueryNode(const QueryTreeNodePtr & subquery_node, return temporary_table_expression_node; } +QueryTreeNodePtr getSubqueryFromTableExpression( + const QueryTreeNodePtr & join_table_expression, + const std::unordered_map & column_source_to_columns, + const ContextPtr & context) +{ + auto join_table_expression_node_type = join_table_expression->getNodeType(); + QueryTreeNodePtr subquery_node; + + if (join_table_expression_node_type == QueryTreeNodeType::QUERY || join_table_expression_node_type == QueryTreeNodeType::UNION) + { + subquery_node = join_table_expression; + } + else if ( + join_table_expression_node_type == QueryTreeNodeType::TABLE || join_table_expression_node_type == QueryTreeNodeType::TABLE_FUNCTION) + { + const auto & columns = column_source_to_columns.at(join_table_expression).columns; + subquery_node = buildSubqueryToReadColumnsFromTableExpression(columns, join_table_expression, context); + } + else + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Expected JOIN right table expression to be table, table function, query or union node. Actual {}", + join_table_expression->formatASTForErrorMessage()); + } + + return subquery_node; +} + } QueryTreeNodePtr buildQueryTreeForShard(const PlannerContextPtr & planner_context, QueryTreeNodePtr query_tree_to_modify) @@ -335,37 +364,32 @@ QueryTreeNodePtr buildQueryTreeForShard(const PlannerContextPtr & planner_contex { if (auto * join_node = global_in_or_join_node.query_node->as()) { - auto join_right_table_expression = join_node->getRightTableExpression(); - auto join_right_table_expression_node_type = join_right_table_expression->getNodeType(); - - QueryTreeNodePtr subquery_node; - - if (join_right_table_expression_node_type == QueryTreeNodeType::QUERY || - join_right_table_expression_node_type == QueryTreeNodeType::UNION) + QueryTreeNodePtr join_table_expression; + const auto join_kind = join_node->getKind(); + const auto join_strictness = join_node->getStrictness(); + if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) { - subquery_node = join_right_table_expression; + join_table_expression = join_node->getRightTableExpression(); } - else if (join_right_table_expression_node_type == QueryTreeNodeType::TABLE || - join_right_table_expression_node_type == QueryTreeNodeType::TABLE_FUNCTION) + else if (join_kind == JoinKind::Right) { - const auto & columns = column_source_to_columns.at(join_right_table_expression).columns; - subquery_node = buildSubqueryToReadColumnsFromTableExpression(columns, - join_right_table_expression, - planner_context->getQueryContext()); + join_table_expression = join_node->getLeftTableExpression(); } else { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Expected JOIN right table expression to be table, table function, query or union node. Actual {}", - join_right_table_expression->formatASTForErrorMessage()); + throw Exception( + ErrorCodes::LOGICAL_ERROR, "Unexpected join kind: {}", join_kind); } + auto subquery_node + = getSubqueryFromTableExpression(join_table_expression, column_source_to_columns, planner_context->getQueryContext()); + auto temporary_table_expression_node = executeSubqueryNode(subquery_node, planner_context->getMutableQueryContext(), global_in_or_join_node.subquery_depth); - temporary_table_expression_node->setAlias(join_right_table_expression->getAlias()); + temporary_table_expression_node->setAlias(join_table_expression->getAlias()); - replacement_map.emplace(join_right_table_expression.get(), std::move(temporary_table_expression_node)); + replacement_map.emplace(join_table_expression.get(), std::move(temporary_table_expression_node)); continue; } if (auto * in_function_node = global_in_or_join_node.query_node->as()) From aeffae571c91909a7196bec6e952026932c43cc6 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Sat, 26 Oct 2024 19:35:33 +0200 Subject: [PATCH 17/74] CI: Functional Tests with praktika --- .github/workflows/pr.yaml | 287 ++++++++++++++++++++ .github/workflows/pull_request.yml | 212 --------------- ci/jobs/build_clickhouse.py | 12 +- ci/jobs/check_style.py | 2 +- ci/jobs/fast_test.py | 2 +- ci/jobs/functional_stateless_tests.py | 48 ++++ ci/jobs/scripts/functional_tests_results.py | 3 +- ci/praktika/__main__.py | 23 +- ci/praktika/_environment.py | 3 +- ci/praktika/_settings.py | 3 +- ci/praktika/hook_cache.py | 6 +- ci/praktika/json.html | 50 +++- ci/praktika/result.py | 2 +- ci/praktika/runner.py | 56 ++-- ci/praktika/yaml_generator.py | 6 +- ci/settings/definitions.py | 1 + ci/settings/settings.py | 2 + ci/workflows/pull_request.py | 16 +- 18 files changed, 477 insertions(+), 257 deletions(-) create mode 100644 .github/workflows/pr.yaml delete mode 100644 .github/workflows/pull_request.yml create mode 100644 ci/jobs/functional_stateless_tests.py diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml new file mode 100644 index 00000000000..34c794f6088 --- /dev/null +++ b/.github/workflows/pr.yaml @@ -0,0 +1,287 @@ +# generated by praktika + +name: PR + +on: + pull_request: + branches: ['master'] + +# Cancel the previous wf run in PRs. +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + GH_TOKEN: ${{ github.token }} + +# Allow updating GH commit statuses and PR comments to post an actual job reports link +permissions: write-all + +jobs: + + config_workflow: + runs-on: [ci_services] + needs: [] + name: "Config Workflow" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Config Workflow''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Config Workflow''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + docker_builds: + runs-on: [ci_services_ebs] + needs: [config_workflow] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIEJ1aWxkcw==') }} + name: "Docker Builds" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Docker Builds''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Docker Builds''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + style_check: + runs-on: [ci_services] + needs: [config_workflow, docker_builds] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3R5bGUgQ2hlY2s=') }} + name: "Style Check" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Style Check''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Style Check''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + fast_test: + runs-on: [builder] + needs: [config_workflow, docker_builds] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RmFzdCB0ZXN0') }} + name: "Fast test" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Fast test''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Fast test''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + build_amd64_debug: + runs-on: [builder] + needs: [config_workflow, docker_builds] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgYW1kNjQgZGVidWc=') }} + name: "Build amd64 debug" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Build amd64 debug''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Build amd64 debug''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_debug: + runs-on: [builder] + needs: [config_workflow, docker_builds, build_amd64_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKQ==') }} + name: "Stateless tests (amd, debug)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd, debug)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd, debug)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + finish_workflow: + runs-on: [ci_services] + needs: [config_workflow, docker_builds, style_check, fast_test, build_amd64_debug, stateless_tests_amd_debug] + if: ${{ !cancelled() }} + name: "Finish Workflow" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Finish Workflow''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Finish Workflow''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml deleted file mode 100644 index e4eb44b2774..00000000000 --- a/.github/workflows/pull_request.yml +++ /dev/null @@ -1,212 +0,0 @@ -# yamllint disable rule:comments-indentation -name: PullRequestCI - -env: - # Force the stdout and stderr streams to be unbuffered - PYTHONUNBUFFERED: 1 - -on: # yamllint disable-line rule:truthy - pull_request: - types: - - synchronize - - reopened - - opened - branches: - - master - -# Cancel the previous wf run in PRs. -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - RunConfig: - runs-on: [self-hosted, style-checker-aarch64] - outputs: - data: ${{ steps.runconfig.outputs.CI_DATA }} - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true # to ensure correct digests - fetch-depth: 0 # to get a version - filter: tree:0 - - name: Debug Info - uses: ./.github/actions/debug - - name: Set pending Sync status - run: | - python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --set-pending-status - - name: Labels check - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 run_check.py - - name: Python unit tests - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - echo "Testing the main ci directory" - python3 -m unittest discover -s . -p 'test_*.py' - - name: PrepareRunConfig - id: runconfig - run: | - python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json - - echo "::group::CI configuration" - python3 -m json.tool ${{ runner.temp }}/ci_run_data.json - echo "::endgroup::" - - { - echo 'CI_DATA<> "$GITHUB_OUTPUT" - - name: Re-create GH statuses for skipped jobs if any - run: | - python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses - BuildDockers: - needs: [RunConfig] - if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }} - uses: ./.github/workflows/docker_test_images.yml - with: - data: ${{ needs.RunConfig.outputs.data }} - StyleCheck: - needs: [RunConfig, BuildDockers] - if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Style check')}} - uses: ./.github/workflows/reusable_test.yml - with: - test_name: Style check - runner_type: style-checker-aarch64 - run_command: | - python3 style_check.py - data: ${{ needs.RunConfig.outputs.data }} - secrets: - secret_envs: | - ROBOT_CLICKHOUSE_SSH_KEY< "$WORKFLOW_RESULT_FILE" << 'EOF' - ${{ toJson(needs) }} - EOF - python3 merge_pr.py --set-ci-status - - name: Check Workflow results - uses: ./.github/actions/check_workflow - with: - needs: ${{ toJson(needs) }} - - ################################# Stage Final ################################# - # - FinishCheck: - if: ${{ !failure() && !cancelled() }} - needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2] - runs-on: [self-hosted, style-checker-aarch64] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - filter: tree:0 - - name: Finish label - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} - -############################################################################################# -###################################### JEPSEN TESTS ######################################### -############################################################################################# - # This is special test NOT INCLUDED in FinishCheck - # When it's skipped, all dependent tasks will be skipped too. - # DO NOT add it there - Jepsen: - # we need concurrency as the job uses dedicated instances in the cloud - concurrency: - group: jepsen - if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse Keeper Jepsen') }} - needs: [RunConfig, Builds_1] - uses: ./.github/workflows/reusable_test.yml - with: - test_name: ClickHouse Keeper Jepsen - runner_type: style-checker-aarch64 - data: ${{ needs.RunConfig.outputs.data }} diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index 21ed8091608..cfa358b4059 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -13,8 +13,14 @@ class JobStages(metaclass=MetaClasses.WithIter): def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") - parser.add_argument("BUILD_TYPE", help="Type: ") - parser.add_argument("--param", help="Optional custom job start stage", default=None) + parser.add_argument( + "BUILD_TYPE", help="Type: __" + ) + parser.add_argument( + "--param", + help="Optional user-defined job start stage (for local run)", + default=None, + ) return parser.parse_args() @@ -95,7 +101,7 @@ def main(): Shell.check(f"ls -l {build_dir}/programs/") res = results[-1].is_ok() - Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly() + Result.create_from(results=results, stopwatch=stop_watch).complete_job() if __name__ == "__main__": diff --git a/ci/jobs/check_style.py b/ci/jobs/check_style.py index f9cdc76302d..d4b81abc92c 100644 --- a/ci/jobs/check_style.py +++ b/ci/jobs/check_style.py @@ -379,4 +379,4 @@ if __name__ == "__main__": ) ) - Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly() + Result.create_from(results=results, stopwatch=stop_watch).complete_job() diff --git a/ci/jobs/fast_test.py b/ci/jobs/fast_test.py index 1dcd65b6ed2..dc5e1c975a6 100644 --- a/ci/jobs/fast_test.py +++ b/ci/jobs/fast_test.py @@ -330,7 +330,7 @@ def main(): CH.terminate() - Result.create_from(results=results, stopwatch=stop_watch).finish_job_accordingly() + Result.create_from(results=results, stopwatch=stop_watch).complete_job() if __name__ == "__main__": diff --git a/ci/jobs/functional_stateless_tests.py b/ci/jobs/functional_stateless_tests.py new file mode 100644 index 00000000000..dfdd5821a19 --- /dev/null +++ b/ci/jobs/functional_stateless_tests.py @@ -0,0 +1,48 @@ +import argparse + +from praktika.result import Result +from praktika.settings import Settings +from praktika.utils import MetaClasses, Shell, Utils + + +class JobStages(metaclass=MetaClasses.WithIter): + CHECKOUT_SUBMODULES = "checkout" + CMAKE = "cmake" + BUILD = "build" + + +def parse_args(): + parser = argparse.ArgumentParser(description="ClickHouse Build Job") + parser.add_argument("BUILD_TYPE", help="Type: ") + parser.add_argument("--param", help="Optional custom job start stage", default=None) + return parser.parse_args() + + +def main(): + + args = parse_args() + + stop_watch = Utils.Stopwatch() + + stages = list(JobStages) + stage = args.param or JobStages.CHECKOUT_SUBMODULES + if stage: + assert stage in JobStages, f"--param must be one of [{list(JobStages)}]" + print(f"Job will start from stage [{stage}]") + while stage in stages: + stages.pop(0) + stages.insert(0, stage) + + res = True + results = [] + + if res and JobStages.CHECKOUT_SUBMODULES in stages: + info = Shell.get_output(f"ls -l {Settings.INPUT_DIR}") + results.append(Result(name="TEST", status=Result.Status.SUCCESS, info=info)) + res = results[-1].is_ok() + + Result.create_from(results=results, stopwatch=stop_watch).complete_job() + + +if __name__ == "__main__": + main() diff --git a/ci/jobs/scripts/functional_tests_results.py b/ci/jobs/scripts/functional_tests_results.py index 5ac9d6b985d..aba3e4f7f5b 100755 --- a/ci/jobs/scripts/functional_tests_results.py +++ b/ci/jobs/scripts/functional_tests_results.py @@ -1,7 +1,6 @@ import dataclasses from typing import List -from praktika.environment import Environment from praktika.result import Result OK_SIGN = "[ OK " @@ -250,7 +249,7 @@ class FTResultsProcessor: # test_results.sort(key=test_result_comparator) return Result.create_from( - name=Environment.JOB_NAME, + name="Tests", results=test_results, status=state, files=[self.tests_output_file], diff --git a/ci/praktika/__main__.py b/ci/praktika/__main__.py index 7f472ecd9ae..fbb9f92909a 100644 --- a/ci/praktika/__main__.py +++ b/ci/praktika/__main__.py @@ -37,6 +37,24 @@ def create_parser(): type=str, default=None, ) + run_parser.add_argument( + "--pr", + help="PR number. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run in that PR", + type=int, + default=None, + ) + run_parser.add_argument( + "--sha", + help="Commit sha. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run on that sha, head sha will be used if not set", + type=str, + default=None, + ) + run_parser.add_argument( + "--branch", + help="Commit sha. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run on that branch, main branch name will be used if not set", + type=str, + default=None, + ) run_parser.add_argument( "--ci", help="When not set - dummy env will be generated, for local test", @@ -85,9 +103,12 @@ if __name__ == "__main__": workflow=workflow, job=job, docker=args.docker, - dummy_env=not args.ci, + local_run=not args.ci, no_docker=args.no_docker, param=args.param, + pr=args.pr, + branch=args.branch, + sha=args.sha, ) else: parser.print_help() diff --git a/ci/praktika/_environment.py b/ci/praktika/_environment.py index ce9c6f5b486..4ac8ad319f9 100644 --- a/ci/praktika/_environment.py +++ b/ci/praktika/_environment.py @@ -159,7 +159,8 @@ class _Environment(MetaClasses.Serializable): @classmethod def get_s3_prefix_static(cls, pr_number, branch, sha, latest=False): prefix = "" - if pr_number > 0: + assert sha or latest + if pr_number and pr_number > 0: prefix += f"{pr_number}" else: prefix += f"{branch}" diff --git a/ci/praktika/_settings.py b/ci/praktika/_settings.py index 3052d8ef877..1777257f484 100644 --- a/ci/praktika/_settings.py +++ b/ci/praktika/_settings.py @@ -1,5 +1,4 @@ import dataclasses -from pathlib import Path from typing import Dict, Iterable, List, Optional @@ -8,6 +7,7 @@ class _Settings: ###################################### # Pipeline generation settings # ###################################### + MAIN_BRANCH = "main" CI_PATH = "./ci" WORKFLOW_PATH_PREFIX: str = "./.github/workflows" WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows" @@ -111,6 +111,7 @@ _USER_DEFINED_SETTINGS = [ "CI_DB_INSERT_TIMEOUT_SEC", "SECRET_GH_APP_PEM_KEY", "SECRET_GH_APP_ID", + "MAIN_BRANCH", ] diff --git a/ci/praktika/hook_cache.py b/ci/praktika/hook_cache.py index b1b5c654f20..5cfedec0144 100644 --- a/ci/praktika/hook_cache.py +++ b/ci/praktika/hook_cache.py @@ -8,11 +8,9 @@ from praktika.utils import Utils class CacheRunnerHooks: @classmethod - def configure(cls, _workflow): - workflow_config = RunConfig.from_fs(_workflow.name) + def configure(cls, workflow): + workflow_config = RunConfig.from_fs(workflow.name) cache = Cache() - assert _Environment.get().WORKFLOW_NAME - workflow = _get_workflows(name=_Environment.get().WORKFLOW_NAME)[0] print(f"Workflow Configure, workflow [{workflow.name}]") assert ( workflow.enable_cache diff --git a/ci/praktika/json.html b/ci/praktika/json.html index 2f8c3e45d0b..af03ed702f8 100644 --- a/ci/praktika/json.html +++ b/ci/praktika/json.html @@ -89,6 +89,17 @@ letter-spacing: -0.5px; } + .dropdown-value { + width: 100px; + font-weight: normal; + font-family: inherit; + background-color: transparent; + color: inherit; + /*border: none;*/ + /*outline: none;*/ + /*cursor: pointer;*/ + } + #result-container { background-color: var(--tile-background); margin-left: calc(var(--status-width) + 20px); @@ -282,6 +293,12 @@ } } + function updateUrlParameter(paramName, paramValue) { + const url = new URL(window.location.href); + url.searchParams.set(paramName, paramValue); + window.location.href = url.toString(); + } + // Attach the toggle function to the click event of the icon document.getElementById('theme-toggle').addEventListener('click', toggleTheme); @@ -291,14 +308,14 @@ const monthNames = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]; const month = monthNames[date.getMonth()]; - const year = date.getFullYear(); + //const year = date.getFullYear(); const hours = String(date.getHours()).padStart(2, '0'); const minutes = String(date.getMinutes()).padStart(2, '0'); const seconds = String(date.getSeconds()).padStart(2, '0'); //const milliseconds = String(date.getMilliseconds()).padStart(2, '0'); return showDate - ? `${day}-${month}-${year} ${hours}:${minutes}:${seconds}` + ? `${day}'${month} ${hours}:${minutes}:${seconds}` : `${hours}:${minutes}:${seconds}`; } @@ -346,7 +363,7 @@ return 'status-other'; } - function addKeyValueToStatus(key, value) { + function addKeyValueToStatus(key, value, options = null) { const statusContainer = document.getElementById('status-container'); @@ -357,10 +374,25 @@ keyElement.className = 'json-key'; keyElement.textContent = key + ':'; - const valueElement = document.createElement('div'); - valueElement.className = 'json-value'; - valueElement.textContent = value; - + let valueElement + if (value) { + valueElement = document.createElement('div'); + valueElement.className = 'json-value'; + valueElement.textContent = value; + } else if (options) { + valueElement = document.createElement('select'); + valueElement.className = 'dropdown-value'; + valueElement.addEventListener('change', (event) => { + const selectedValue = event.target.value; + updateUrlParameter(key, selectedValue); + }); + options.forEach(optionValue => { + const option = document.createElement('option'); + option.value = optionValue; + option.textContent = optionValue; + valueElement.appendChild(option); + }); + } keyValuePair.appendChild(keyElement) keyValuePair.appendChild(valueElement) statusContainer.appendChild(keyValuePair); @@ -487,7 +519,7 @@ const columnSymbols = { name: '📂', - status: '✔️', + status: '⏯️', start_time: '🕒', duration: '⏳', info: 'ℹ️', @@ -726,7 +758,7 @@ } else { console.error("TODO") } - addKeyValueToStatus("sha", sha); + addKeyValueToStatus("sha", null, [sha, 'lala']); if (nameParams[1]) { addKeyValueToStatus("job", nameParams[1]); } diff --git a/ci/praktika/result.py b/ci/praktika/result.py index 3d3c986d5f9..2ba8309ad60 100644 --- a/ci/praktika/result.py +++ b/ci/praktika/result.py @@ -318,7 +318,7 @@ class Result(MetaClasses.Serializable): files=[log_file] if log_file else None, ) - def finish_job_accordingly(self): + def complete_job(self): self.dump() if not self.is_ok(): print("ERROR: Job Failed") diff --git a/ci/praktika/runner.py b/ci/praktika/runner.py index 797a799a74d..823c7e0f36d 100644 --- a/ci/praktika/runner.py +++ b/ci/praktika/runner.py @@ -19,7 +19,7 @@ from praktika.utils import Shell, TeePopen, Utils class Runner: @staticmethod - def generate_dummy_environment(workflow, job): + def generate_local_run_environment(workflow, job, pr=None, branch=None, sha=None): print("WARNING: Generate dummy env for local test") Shell.check( f"mkdir -p {Settings.TEMP_DIR} {Settings.INPUT_DIR} {Settings.OUTPUT_DIR}" @@ -28,9 +28,9 @@ class Runner: WORKFLOW_NAME=workflow.name, JOB_NAME=job.name, REPOSITORY="", - BRANCH="", - SHA="", - PR_NUMBER=-1, + BRANCH=branch or Settings.MAIN_BRANCH if not pr else "", + SHA=sha or Shell.get_output("git rev-parse HEAD"), + PR_NUMBER=pr or -1, EVENT_TYPE="", JOB_OUTPUT_STREAM="", EVENT_FILE_PATH="", @@ -86,7 +86,7 @@ class Runner: return 0 - def _pre_run(self, workflow, job): + def _pre_run(self, workflow, job, local_run=False): env = _Environment.get() result = Result( @@ -96,9 +96,10 @@ class Runner: ) result.dump() - if workflow.enable_report and job.name != Settings.CI_CONFIG_JOB_NAME: - print("Update Job and Workflow Report") - HtmlRunnerHooks.pre_run(workflow, job) + if not local_run: + if workflow.enable_report and job.name != Settings.CI_CONFIG_JOB_NAME: + print("Update Job and Workflow Report") + HtmlRunnerHooks.pre_run(workflow, job) print("Download required artifacts") required_artifacts = [] @@ -133,11 +134,17 @@ class Runner: env.dump() if job.run_in_docker and not no_docker: - # TODO: add support for any image, including not from ci config (e.g. ubuntu:latest) - docker_tag = RunConfig.from_fs(workflow.name).digest_dockers[ - job.run_in_docker - ] - docker = docker or f"{job.run_in_docker}:{docker_tag}" + if ":" in job.run_in_docker: + docker_name, docker_tag = job.run_in_docker.split(":") + print( + f"WARNING: Job [{job.name}] use custom docker image with a tag - praktika won't control docker version" + ) + else: + docker_name, docker_tag = ( + job.run_in_docker, + RunConfig.from_fs(workflow.name).digest_dockers[job.run_in_docker], + ) + docker = docker or f"{docker_name}:{docker_tag}" cmd = f"docker run --rm --user \"$(id -u):$(id -g)\" -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}" else: cmd = job.command @@ -285,14 +292,23 @@ class Runner: return True def run( - self, workflow, job, docker="", dummy_env=False, no_docker=False, param=None + self, + workflow, + job, + docker="", + local_run=False, + no_docker=False, + param=None, + pr=None, + sha=None, + branch=None, ): res = True setup_env_code = -10 prerun_code = -10 run_code = -10 - if res and not dummy_env: + if res and not local_run: print( f"\n\n=== Setup env script [{job.name}], workflow [{workflow.name}] ===" ) @@ -309,13 +325,15 @@ class Runner: traceback.print_exc() print(f"=== Setup env finished ===\n\n") else: - self.generate_dummy_environment(workflow, job) + self.generate_local_run_environment( + workflow, job, pr=pr, branch=branch, sha=sha + ) - if res and not dummy_env: + if res: res = False print(f"=== Pre run script [{job.name}], workflow [{workflow.name}] ===") try: - prerun_code = self._pre_run(workflow, job) + prerun_code = self._pre_run(workflow, job, local_run=local_run) res = prerun_code == 0 if not res: print(f"ERROR: Pre-run failed with exit code [{prerun_code}]") @@ -339,7 +357,7 @@ class Runner: traceback.print_exc() print(f"=== Run scrip finished ===\n\n") - if not dummy_env: + if not local_run: print(f"=== Post run script [{job.name}], workflow [{workflow.name}] ===") self._post_run(workflow, job, setup_env_code, prerun_code, run_code) print(f"=== Post run scrip finished ===") diff --git a/ci/praktika/yaml_generator.py b/ci/praktika/yaml_generator.py index 00c469fec0c..fb918b4ddba 100644 --- a/ci/praktika/yaml_generator.py +++ b/ci/praktika/yaml_generator.py @@ -102,7 +102,11 @@ jobs: run: | . /tmp/praktika_setup_env.sh set -o pipefail - {PYTHON} -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& tee {RUN_LOG} + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''{JOB_NAME}''' --workflow "{WORKFLOW_NAME}" --ci |& tee /tmp/praktika/praktika_run.log + fi {UPLOADS_GITHUB}\ """ diff --git a/ci/settings/definitions.py b/ci/settings/definitions.py index 176e865e6f3..c67bdee015b 100644 --- a/ci/settings/definitions.py +++ b/ci/settings/definitions.py @@ -231,3 +231,4 @@ class JobNames: STYLE_CHECK = "Style Check" FAST_TEST = "Fast test" BUILD_AMD_DEBUG = "Build amd64 debug" + STATELESS_TESTS = "Stateless tests (amd, debug)" diff --git a/ci/settings/settings.py b/ci/settings/settings.py index 8d5e7bc3c87..0f3b1efcee0 100644 --- a/ci/settings/settings.py +++ b/ci/settings/settings.py @@ -4,6 +4,8 @@ from ci.settings.definitions import ( RunnerLabels, ) +MAIN_BRANCH = "master" + S3_ARTIFACT_PATH = f"{S3_BUCKET_NAME}/artifacts" CI_CONFIG_RUNS_ON = [RunnerLabels.CI_SERVICES] DOCKER_BUILD_RUNS_ON = [RunnerLabels.CI_SERVICES_EBS] diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index 74129177efb..c7715b40fca 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -59,6 +59,19 @@ job_build_amd_debug = Job.Config( provides=[ArtifactNames.ch_debug_binary], ) +stateless_tests_job = Job.Config( + name=JobNames.STATELESS_TESTS, + runs_on=[RunnerLabels.BUILDER], + command="python3 ./ci/jobs/functional_stateless_tests.py amd_debug", + run_in_docker="clickhouse/fasttest:latest", + digest_config=Job.CacheDigestConfig( + include_paths=[ + "./ci/jobs/functional_stateless_tests.py", + ], + ), + requires=[ArtifactNames.ch_debug_binary], +) + workflow = Workflow.Config( name="PR", event=Workflow.Event.PULL_REQUEST, @@ -67,6 +80,7 @@ workflow = Workflow.Config( style_check_job, fast_test_job, job_build_amd_debug, + stateless_tests_job, ], artifacts=[ Artifact.Config( @@ -91,4 +105,4 @@ if __name__ == "__main__": # local job test inside praktika environment from praktika.runner import Runner - Runner().run(workflow, fast_test_job, docker="fasttest", dummy_env=True) + Runner().run(workflow, fast_test_job, docker="fasttest", local_run=True) From b03a296542de52c3cb2b6f309a4bc496e4a70454 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 28 Oct 2024 23:25:38 +0000 Subject: [PATCH 18/74] Fix right join - disabling PR lead to dup result --- src/Planner/PlannerJoinTree.cpp | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 7889a358d95..834e572b167 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -665,11 +665,15 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres bool is_single_table_expression, bool wrap_read_columns_in_subquery) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "table_expression:\n{}", table_expression->dumpTree()); - const auto & query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); + LOG_DEBUG( + getLogger(__PRETTY_FUNCTION__), + "pr_enabled={} table_expression:\n{}", + settings[Setting::allow_experimental_parallel_reading_from_replicas].toString(), + table_expression->dumpTree()); + auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression); QueryProcessingStage::Enum from_stage = QueryProcessingStage::Enum::FetchColumns; @@ -914,11 +918,11 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres /// It is just a safety check needed until we have a proper sending plan to replicas. /// If we have a non-trivial storage like View it might create its own Planner inside read(), run findTableForParallelReplicas() /// and find some other table that might be used for reading with parallel replicas. It will lead to errors. - const bool other_table_already_chosen_for_reading_with_parallel_replicas - = planner_context->getGlobalPlannerContext()->parallel_replicas_table - && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; - if (other_table_already_chosen_for_reading_with_parallel_replicas) - planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); + // const bool other_table_already_chosen_for_reading_with_parallel_replicas + // = planner_context->getGlobalPlannerContext()->parallel_replicas_table + // && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; + // if (other_table_already_chosen_for_reading_with_parallel_replicas) + // planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); storage->read( query_plan, @@ -930,6 +934,8 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres max_block_size, max_streams); + LOG_DEBUG(getLogger("dumpQueryPlan"), "\n{}", dumpQueryPlan(query_plan)); + auto parallel_replicas_enabled_for_storage = [](const StoragePtr & table, const Settings & query_settings) { if (!table->isMergeTree()) @@ -1249,6 +1255,8 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ const ColumnIdentifierSet & outer_scope_columns, PlannerContextPtr & planner_context) { + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Join expression: {}", join_table_expression->dumpTree()); + auto & join_node = join_table_expression->as(); if (left_join_tree_query_plan.from_stage != QueryProcessingStage::FetchColumns) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, @@ -1921,6 +1929,8 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, "Expected 1 query plan for JOIN TREE. Actual {}", query_plans_stack.size()); + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "JOIN query plan:\n{}", dumpQueryPlan(query_plans_stack.back().query_plan)); + return std::move(query_plans_stack.back()); } From dc976c48d284fa79ad05fe234130ed3794522511 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 28 Oct 2024 23:36:57 +0000 Subject: [PATCH 19/74] Test --- .../03254_pr_join_on_dups.reference | 273 ++++++++++++++++++ .../0_stateless/03254_pr_join_on_dups.sql | 81 ++++++ 2 files changed, 354 insertions(+) create mode 100644 tests/queries/0_stateless/03254_pr_join_on_dups.reference create mode 100644 tests/queries/0_stateless/03254_pr_join_on_dups.sql diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.reference b/tests/queries/0_stateless/03254_pr_join_on_dups.reference new file mode 100644 index 00000000000..58602bafb5d --- /dev/null +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.reference @@ -0,0 +1,273 @@ +inner +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +inner subs +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +inner expr +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +left +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +left subs +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +left expr +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +right +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +right subs +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +9 l9 \N 9 r9 nr9 +full +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +full subs +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +self inner +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self inner nullable +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +self inner nullable vs not nullable +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +self inner nullable vs not nullable 2 +4 r6 nr6 4 r6 nr6 +6 r7 nr7 6 r7 nr7 +7 r8 nr8 7 r8 nr8 +9 r9 nr9 9 r9 nr9 +self left +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self left nullable +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 0 \N +4 l6 \N 0 \N +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N +self left nullable vs not nullable +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N +self left nullable vs not nullable 2 +1 r1 \N 0 \N +1 r2 \N 0 \N +2 r3 \N 0 \N +3 r4 \N 0 \N +3 r5 \N 0 \N +4 r6 nr6 4 r6 nr6 +6 r7 nr7 6 r7 nr7 +7 r8 nr8 7 r8 nr8 +9 r9 nr9 9 r9 nr9 +self right +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self right nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +self right nullable vs not nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +self full +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self full nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 0 \N +4 l6 \N 0 \N +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N +self full nullable vs not nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql new file mode 100644 index 00000000000..71695c0d486 --- /dev/null +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -0,0 +1,81 @@ +drop table if exists X sync; +drop table if exists Y sync; + +create table X (id Int32, x_a String, x_b Nullable(Int32)) engine ReplicatedMergeTree('/clickhouse/{database}/X', '1') order by tuple(); +create table Y (id Int32, y_a String, y_b Nullable(String)) engine ReplicatedMergeTree('/clickhouse/{database}/Y', '1') order by tuple(); + +insert into X (id, x_a, x_b) values (1, 'l1', 1), (2, 'l2', 2), (2, 'l3', 3), (3, 'l4', 4); +insert into X (id, x_a) values (4, 'l5'), (4, 'l6'), (5, 'l7'), (8, 'l8'), (9, 'l9'); +insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), (3, 'r5'); +insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); + +set enable_parallel_replicas = 2, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_local_plan=1; + +select 'inner'; +select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'inner subs'; +select s.*, j.* from (select * from X) as s inner join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +select 'inner expr'; +select X.*, Y.* from X inner join Y on (X.id + 1) = (Y.id + 1) order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; + +select 'left'; +select X.*, Y.* from X left join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'left subs'; +select s.*, j.* from (select * from X) as s left join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +select 'left expr'; +select X.*, Y.* from X left join Y on (X.id + 1) = (Y.id + 1) order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; + +select 'right'; +select X.*, Y.* from X right join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'right subs'; +select s.*, j.* from (select * from X) as s right join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +--select 'right expr'; +--select X.*, Y.* from X right join Y on (X.id + 1) = (Y.id + 1) order by id; + +select 'full'; +select X.*, Y.* from X full join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'full subs'; +select s.*, j.* from (select * from X) as s full join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +--select 'full expr'; +--select X.*, Y.* from X full join Y on (X.id + 1) = (Y.id + 1) order by id; + +select 'self inner'; +select X.*, s.* from X inner join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self inner nullable'; +select X.*, s.* from X inner join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self inner nullable vs not nullable'; +select X.*, s.* from X inner join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +-- TODO: s.y_b == '' instead of NULL +select 'self inner nullable vs not nullable 2'; +select Y.*, s.* from Y inner join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; + +select 'self left'; +select X.*, s.* from X left join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self left nullable'; +select X.*, s.* from X left join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self left nullable vs not nullable'; +select X.*, s.* from X left join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +-- TODO: s.y_b == '' instead of NULL +select 'self left nullable vs not nullable 2'; +select Y.*, s.* from Y left join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; + +select 'self right'; +select X.*, s.* from X right join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self right nullable'; +select X.*, s.* from X right join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self right nullable vs not nullable'; +select X.*, s.* from X right join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +--select 'self right nullable vs not nullable 2'; +--select Y.*, s.* from Y right join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by id; + +select 'self full'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable'; +select X.*, s.* from X full join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable vs not nullable'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +--select 'self full nullable vs not nullable 2'; +--select Y.*, s.* from Y full join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by id; + +-- drop table X; +-- drop table Y; From e2c2e67c7b4915da6302a516826573cf1ccee701 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 29 Oct 2024 10:02:24 +0000 Subject: [PATCH 20/74] Fix --- src/Planner/findParallelReplicasQuery.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 891e5034f44..58a7f48ee2b 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -423,7 +423,10 @@ const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tr return nullptr; const auto * res = findTableForParallelReplicas(query_tree_node.get()); - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); + if (res) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); + else + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not table found"); return res; } From c7fce84729435f98222d0e02ba035cdd6085a0df Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 29 Oct 2024 12:17:46 +0000 Subject: [PATCH 21/74] Cleanup --- src/Interpreters/ClusterProxy/executeQuery.cpp | 4 ++-- src/Planner/Planner.cpp | 2 +- src/Planner/PlannerJoinTree.cpp | 16 +++++----------- src/Planner/findParallelReplicasQuery.cpp | 4 ++-- src/Planner/findQueryForParallelReplicas.h | 4 ++-- .../03173_parallel_replicas_join_bug.sh | 3 +++ 6 files changed, 15 insertions(+), 18 deletions(-) diff --git a/src/Interpreters/ClusterProxy/executeQuery.cpp b/src/Interpreters/ClusterProxy/executeQuery.cpp index 4b1f3094be3..e88fdeb0379 100644 --- a/src/Interpreters/ClusterProxy/executeQuery.cpp +++ b/src/Interpreters/ClusterProxy/executeQuery.cpp @@ -477,8 +477,8 @@ void executeQueryWithParallelReplicas( QueryPlanStepPtr analyzed_read_from_merge_tree) { auto logger = getLogger("executeQueryWithParallelReplicas"); - LOG_DEBUG(logger, "Executing read from {}, header {}, query ({}), stage {} with parallel replicas\n{}", - storage_id.getNameForLogs(), header.dumpStructure(), query_ast->formatForLogging(), processed_stage, StackTrace().toString()); + LOG_DEBUG(logger, "Executing read from {}, header {}, query ({}), stage {} with parallel replicas", + storage_id.getNameForLogs(), header.dumpStructure(), query_ast->formatForLogging(), processed_stage); const auto & settings = context->getSettingsRef(); diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 8d3c75fdabb..17277dfe8cd 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -274,7 +274,7 @@ FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & return res; } -FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options) +FiltersForTableExpressionMap collectFiltersForAnalysis(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options) { if (select_query_options.only_analyze) return {}; diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 834e572b167..5c08cc27aff 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -918,11 +918,11 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres /// It is just a safety check needed until we have a proper sending plan to replicas. /// If we have a non-trivial storage like View it might create its own Planner inside read(), run findTableForParallelReplicas() /// and find some other table that might be used for reading with parallel replicas. It will lead to errors. - // const bool other_table_already_chosen_for_reading_with_parallel_replicas - // = planner_context->getGlobalPlannerContext()->parallel_replicas_table - // && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; - // if (other_table_already_chosen_for_reading_with_parallel_replicas) - // planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); + const bool other_table_already_chosen_for_reading_with_parallel_replicas + = planner_context->getGlobalPlannerContext()->parallel_replicas_table + && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; + if (other_table_already_chosen_for_reading_with_parallel_replicas) + planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); storage->read( query_plan, @@ -934,8 +934,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres max_block_size, max_streams); - LOG_DEBUG(getLogger("dumpQueryPlan"), "\n{}", dumpQueryPlan(query_plan)); - auto parallel_replicas_enabled_for_storage = [](const StoragePtr & table, const Settings & query_settings) { if (!table->isMergeTree()) @@ -1255,8 +1253,6 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ const ColumnIdentifierSet & outer_scope_columns, PlannerContextPtr & planner_context) { - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Join expression: {}", join_table_expression->dumpTree()); - auto & join_node = join_table_expression->as(); if (left_join_tree_query_plan.from_stage != QueryProcessingStage::FetchColumns) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, @@ -1929,8 +1925,6 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, "Expected 1 query plan for JOIN TREE. Actual {}", query_plans_stack.size()); - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "JOIN query plan:\n{}", dumpQueryPlan(query_plans_stack.back().query_plan)); - return std::move(query_plans_stack.back()); } diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 58a7f48ee2b..d92500e82fc 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -250,7 +250,7 @@ const QueryNode * findQueryForParallelReplicas( return res; } -const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options) +const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options) { if (select_query_options.only_analyze) return nullptr; @@ -404,7 +404,7 @@ static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * que return nullptr; } -const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options) +const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options) { if (select_query_options.only_analyze) return nullptr; diff --git a/src/Planner/findQueryForParallelReplicas.h b/src/Planner/findQueryForParallelReplicas.h index cdce4ad0b47..83aa11c8c64 100644 --- a/src/Planner/findQueryForParallelReplicas.h +++ b/src/Planner/findQueryForParallelReplicas.h @@ -15,10 +15,10 @@ struct SelectQueryOptions; /// Find a query which can be executed with parallel replicas up to WithMergableStage. /// Returned query will always contain some (>1) subqueries, possibly with joins. -const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options); +const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options); /// Find a table from which we should read on follower replica. It's the left-most table within all JOINs and UNIONs. -const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, SelectQueryOptions & select_query_options); +const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tree_node, const SelectQueryOptions & select_query_options); struct JoinTreeQueryPlan; diff --git a/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh b/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh index 289a49c72f4..1ee3d729cb4 100755 --- a/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh +++ b/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh @@ -6,12 +6,15 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_CLIENT -q " + DROP TABLE IF EXISTS ids; CREATE TABLE ids (id UUID, whatever String) Engine=MergeTree ORDER BY tuple(); INSERT INTO ids VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', 'whatever'); + DROP TABLE IF EXISTS data; CREATE TABLE data (id UUID, event_time DateTime, status String) Engine=MergeTree ORDER BY tuple(); INSERT INTO data VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', '2000-01-01', 'CREATED'); + DROP TABLE IF EXISTS data2; CREATE TABLE data2 (id UUID, event_time DateTime, status String) Engine=MergeTree ORDER BY tuple(); INSERT INTO data2 VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', '2000-01-02', 'CREATED'); " From 0fda9bf238d261269b2dd7f47c79898ceaf931cb Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 29 Oct 2024 14:38:57 +0000 Subject: [PATCH 22/74] Fix 03080_incorrect_join_with_merge.sql --- src/Storages/buildQueryTreeForShard.cpp | 4 ++-- .../queries/0_stateless/03080_incorrect_join_with_merge.sql | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/Storages/buildQueryTreeForShard.cpp b/src/Storages/buildQueryTreeForShard.cpp index df9bfd049fb..8d8af134a05 100644 --- a/src/Storages/buildQueryTreeForShard.cpp +++ b/src/Storages/buildQueryTreeForShard.cpp @@ -366,8 +366,8 @@ QueryTreeNodePtr buildQueryTreeForShard(const PlannerContextPtr & planner_contex { QueryTreeNodePtr join_table_expression; const auto join_kind = join_node->getKind(); - const auto join_strictness = join_node->getStrictness(); - if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) + // const auto join_strictness = join_node->getStrictness(); + if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner /* && join_strictness == JoinStrictness::All*/)) { join_table_expression = join_node->getRightTableExpression(); } diff --git a/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql b/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql index a34c71a44e2..a743c5bdffb 100644 --- a/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql +++ b/tests/queries/0_stateless/03080_incorrect_join_with_merge.sql @@ -2,6 +2,7 @@ SET enable_analyzer=1; SET distributed_foreground_insert=1; +DROP TABLE IF EXISTS first_table_lr SYNC; CREATE TABLE first_table_lr ( id String, @@ -11,6 +12,7 @@ ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_03080/alter', ' ORDER BY id; +DROP TABLE IF EXISTS first_table; CREATE TABLE first_table ( id String, @@ -19,6 +21,7 @@ CREATE TABLE first_table ENGINE = Distributed('test_shard_localhost', currentDatabase(), 'first_table_lr'); +DROP TABLE IF EXISTS second_table_lr; CREATE TABLE second_table_lr ( id String, @@ -26,6 +29,7 @@ CREATE TABLE second_table_lr ) ENGINE = MergeTree() ORDER BY id; +DROP TABLE IF EXISTS second_table; CREATE TABLE second_table ( id String, @@ -36,6 +40,7 @@ ENGINE = Distributed('test_shard_localhost', currentDatabase(), 'second_table_lr INSERT INTO first_table VALUES ('1', '2'), ('3', '4'); INSERT INTO second_table VALUES ('1', '2'), ('3', '4'); +DROP TABLE IF EXISTS two_tables; CREATE TABLE two_tables ( id String, From 66f750ea6f12c08f99c7fecea700d8c7f1eaeeb7 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 29 Oct 2024 15:12:03 +0000 Subject: [PATCH 23/74] remove debug logs --- src/Interpreters/Aggregator.cpp | 3 --- src/Parsers/CreateQueryUUIDs.cpp | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index 2dd6513d498..bb9e22e5a1b 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1504,7 +1504,6 @@ bool Aggregator::executeOnBlock(Columns columns, && worth_convert_to_two_level) { size_t size = current_memory_usage + params.min_free_disk_space; - LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} {}", __FILE__, __LINE__, current_memory_usage, params.min_free_disk_space); writeToTemporaryFile(result, size); } @@ -1521,7 +1520,6 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, si size_t rows = data_variants.size(); std::unique_lock lk(tmp_files_mutex); - LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: max_temp_file_size {}", __FILE__, __LINE__, max_temp_file_size); auto & out_stream = tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size); lk.unlock(); @@ -2934,7 +2932,6 @@ bool Aggregator::mergeOnBlock(Block block, AggregatedDataVariants & result, bool && worth_convert_to_two_level) { size_t size = current_memory_usage + params.min_free_disk_space; - LOG_DEBUG(&Poco::Logger::get("XXXX"), "{}:{}: {} {}", __FILE__, __LINE__, current_memory_usage, params.min_free_disk_space); writeToTemporaryFile(result, size); } diff --git a/src/Parsers/CreateQueryUUIDs.cpp b/src/Parsers/CreateQueryUUIDs.cpp index c788cc7a025..14cf5761a11 100644 --- a/src/Parsers/CreateQueryUUIDs.cpp +++ b/src/Parsers/CreateQueryUUIDs.cpp @@ -31,7 +31,7 @@ CreateQueryUUIDs::CreateQueryUUIDs(const ASTCreateQuery & query, bool generate_r /// If we generate random UUIDs for already existing tables then those UUIDs will not be correct making those inner target table inaccessible. /// Thus it's not safe for example to replace /// "ATTACH MATERIALIZED VIEW mv AS SELECT a FROM b" with - /// "ATTACH MATERIALIZED VIEW mv TO INNER UUID "XXXX" AS SELECT a FROM b" + /// "ATTACH MATERIALIZED VIEW mv TO INNER UUID "248372b7-02c4-4c88-a5e1-282a83cc572a" AS SELECT a FROM b" /// This replacement is safe only for CREATE queries when inner target tables don't exist yet. if (!query.attach) { From bebef8d0d96e27c9823419b3a7f669d62c6a6a56 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 29 Oct 2024 23:58:39 +0000 Subject: [PATCH 24/74] Fix right joins again --- src/Planner/PlannerJoinTree.cpp | 38 +++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 5c08cc27aff..0007dc9d158 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -665,7 +665,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres bool is_single_table_expression, bool wrap_read_columns_in_subquery) { - const auto & query_context = planner_context->getQueryContext(); + auto query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); LOG_DEBUG( @@ -922,17 +922,33 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres = planner_context->getGlobalPlannerContext()->parallel_replicas_table && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; if (other_table_already_chosen_for_reading_with_parallel_replicas) - planner_context->getMutableQueryContext()->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); + { + chassert(query_context->canUseParallelReplicasOnFollower()); - storage->read( - query_plan, - columns_names, - storage_snapshot, - table_expression_query_info, - query_context, - from_stage, - max_block_size, - max_streams); + auto mutable_context = Context::createCopy(query_context); + mutable_context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); + storage->read( + query_plan, + columns_names, + storage_snapshot, + table_expression_query_info, + mutable_context, + from_stage, + max_block_size, + max_streams); + } + else + { + storage->read( + query_plan, + columns_names, + storage_snapshot, + table_expression_query_info, + query_context, + from_stage, + max_block_size, + max_streams); + } auto parallel_replicas_enabled_for_storage = [](const StoragePtr & table, const Settings & query_settings) { From 6004cb8ff4fc0b751f9cd0821a4d9214cfd63e3e Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 30 Oct 2024 11:08:21 +0000 Subject: [PATCH 25/74] Remove current_table_chosen_for_reading_with_parallel_replicas flag --- src/Planner/PlannerJoinTree.cpp | 8 +++----- src/Storages/SelectQueryInfo.h | 2 -- src/Storages/StorageMergeTree.cpp | 4 +--- src/Storages/StorageReplicatedMergeTree.cpp | 5 +---- 4 files changed, 5 insertions(+), 14 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 0007dc9d158..5e29c1a6a81 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -702,8 +702,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres table_expression_query_info.table_expression = table_expression; if (const auto & filter_actions = table_expression_data.getFilterActions()) table_expression_query_info.filter_actions_dag = std::make_shared(filter_actions->clone()); - table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas - = table_node == planner_context->getGlobalPlannerContext()->parallel_replicas_table; size_t max_streams = settings[Setting::max_threads]; size_t max_threads_execute_query = settings[Setting::max_threads]; @@ -918,10 +916,10 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres /// It is just a safety check needed until we have a proper sending plan to replicas. /// If we have a non-trivial storage like View it might create its own Planner inside read(), run findTableForParallelReplicas() /// and find some other table that might be used for reading with parallel replicas. It will lead to errors. - const bool other_table_already_chosen_for_reading_with_parallel_replicas + const bool other_table_chosen_for_reading_with_parallel_replicas = planner_context->getGlobalPlannerContext()->parallel_replicas_table - && !table_expression_query_info.current_table_chosen_for_reading_with_parallel_replicas; - if (other_table_already_chosen_for_reading_with_parallel_replicas) + && table_node != planner_context->getGlobalPlannerContext()->parallel_replicas_table; + if (other_table_chosen_for_reading_with_parallel_replicas) { chassert(query_context->canUseParallelReplicasOnFollower()); diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h index 7ad6a733c6f..f67274f227a 100644 --- a/src/Storages/SelectQueryInfo.h +++ b/src/Storages/SelectQueryInfo.h @@ -162,8 +162,6 @@ struct SelectQueryInfo /// It's guaranteed to be present in JOIN TREE of `query_tree` QueryTreeNodePtr table_expression; - bool current_table_chosen_for_reading_with_parallel_replicas = false; - /// Table expression modifiers for storage std::optional table_expression_modifiers; diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 40cd6e01dba..55f79a54f2e 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -276,9 +276,7 @@ void StorageMergeTree::read( } const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() - && local_context->getSettingsRef()[Setting::parallel_replicas_for_non_replicated_merge_tree] - && (!local_context->getSettingsRef()[Setting::allow_experimental_analyzer] - || query_info.current_table_chosen_for_reading_with_parallel_replicas); + && local_context->getSettingsRef()[Setting::parallel_replicas_for_non_replicated_merge_tree]; if (auto plan = reader.read( column_names, diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index fc3245eafcf..3f1d2bc6a1c 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5640,10 +5640,7 @@ void StorageReplicatedMergeTree::readLocalImpl( const size_t max_block_size, const size_t num_streams) { - const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() - && (!local_context->getSettingsRef()[Setting::allow_experimental_analyzer] - || query_info.current_table_chosen_for_reading_with_parallel_replicas); - + const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower(); auto plan = reader.read( column_names, storage_snapshot, query_info, local_context, max_block_size, num_streams, From b724f2c33141fb0348742d6b48c4b58763450ff7 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 30 Oct 2024 12:24:56 +0000 Subject: [PATCH 26/74] Fix FULL joins --- src/Planner/PlannerJoinTree.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 5e29c1a6a81..ac05f893cd2 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -916,13 +916,11 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres /// It is just a safety check needed until we have a proper sending plan to replicas. /// If we have a non-trivial storage like View it might create its own Planner inside read(), run findTableForParallelReplicas() /// and find some other table that might be used for reading with parallel replicas. It will lead to errors. - const bool other_table_chosen_for_reading_with_parallel_replicas - = planner_context->getGlobalPlannerContext()->parallel_replicas_table + const bool no_tables_or_another_table_chosen_for_reading_with_parallel_replicas_mode + = query_context->canUseParallelReplicasOnFollower() && table_node != planner_context->getGlobalPlannerContext()->parallel_replicas_table; - if (other_table_chosen_for_reading_with_parallel_replicas) + if (no_tables_or_another_table_chosen_for_reading_with_parallel_replicas_mode) { - chassert(query_context->canUseParallelReplicasOnFollower()); - auto mutable_context = Context::createCopy(query_context); mutable_context->setSetting("allow_experimental_parallel_reading_from_replicas", Field(0)); storage->read( @@ -984,7 +982,10 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan = std::move(query_plan_parallel_replicas); } } - else if (ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) + else if ( + ClusterProxy::canUseParallelReplicasOnInitiator(query_context) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == query_node) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); From 8245e3d7ef5530d55763700b0c1aeae1697dd26c Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 30 Oct 2024 21:23:29 +0000 Subject: [PATCH 27/74] Fix --- src/Planner/PlannerJoinTree.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index ac05f893cd2..481cb9b8649 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -982,10 +982,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan = std::move(query_plan_parallel_replicas); } } - else if ( - ClusterProxy::canUseParallelReplicasOnInitiator(query_context) - && planner_context->getGlobalPlannerContext()->parallel_replicas_node - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == query_node) + else if (ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); From 0808d7f0fb96e9f6c6536b9033cf2f7499cbb383 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Thu, 31 Oct 2024 12:26:46 +0000 Subject: [PATCH 28/74] Fix FULL JOINs again --- src/Planner/Planner.cpp | 3 ++- src/Planner/Planner.h | 1 + src/Planner/PlannerJoinTree.cpp | 15 ++++++++++++--- src/Planner/findParallelReplicasQuery.cpp | 8 +------- 4 files changed, 16 insertions(+), 11 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 17277dfe8cd..260462652fc 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1263,6 +1263,7 @@ Planner::Planner(const QueryTreeNodePtr & query_tree_, findQueryForParallelReplicas(query_tree, select_query_options), findTableForParallelReplicas(query_tree, select_query_options), collectFiltersForAnalysis(query_tree, select_query_options)))) + , root_planner(true) { } @@ -1537,7 +1538,7 @@ void Planner::buildPlanForQueryNode() JoinTreeQueryPlan join_tree_query_plan; if (planner_context->getMutableQueryContext()->canUseTaskBasedParallelReplicas() - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node && !root_planner) { join_tree_query_plan = buildQueryPlanForParallelReplicas(query_node, planner_context, select_query_info.storage_limits); } diff --git a/src/Planner/Planner.h b/src/Planner/Planner.h index ae78f05cbd4..bf11c9ef9cd 100644 --- a/src/Planner/Planner.h +++ b/src/Planner/Planner.h @@ -82,6 +82,7 @@ private: StorageLimitsList storage_limits; std::set used_row_policies; QueryNodeToPlanStepMapping query_node_to_plan_step_mapping; + bool root_planner = false; }; } diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 481cb9b8649..160d7f07d5b 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -659,6 +659,7 @@ std::unique_ptr createComputeAliasColumnsStep( } JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expression, + const QueryNode & parent_query_node, const SelectQueryInfo & select_query_info, const SelectQueryOptions & select_query_options, PlannerContextPtr & planner_context, @@ -982,7 +983,10 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan = std::move(query_plan_parallel_replicas); } } - else if (ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) + else if ( + ClusterProxy::canUseParallelReplicasOnInitiator(query_context) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &parent_query_node) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); @@ -1815,6 +1819,7 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, const ColumnIdentifierSet & outer_scope_columns, PlannerContextPtr & planner_context) { + const QueryNode & parent_query_node = query_node->as(); auto table_expressions_stack = buildTableExpressionsStack(query_node->as().getJoinTree()); size_t table_expressions_stack_size = table_expressions_stack.size(); bool is_single_table_expression = table_expressions_stack_size == 1; @@ -1850,7 +1855,9 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, * Examples: Distributed, LiveView, Merge storages. */ auto left_table_expression = table_expressions_stack.front(); - auto left_table_expression_query_plan = buildQueryPlanForTableExpression(left_table_expression, + auto left_table_expression_query_plan = buildQueryPlanForTableExpression( + left_table_expression, + parent_query_node, select_query_info, select_query_options, planner_context, @@ -1923,7 +1930,9 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, * table expression in subquery. */ bool is_remote = planner_context->getTableExpressionDataOrThrow(table_expression).isRemote(); - query_plans_stack.push_back(buildQueryPlanForTableExpression(table_expression, + query_plans_stack.push_back(buildQueryPlanForTableExpression( + table_expression, + parent_query_node, select_query_info, select_query_options, planner_context, diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index d92500e82fc..63c0ce8eb68 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -101,17 +101,11 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre auto join_strictness = join_node.getStrictness(); if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) - { query_tree_node = join_node.getLeftTableExpression().get(); - } else if (join_kind == JoinKind::Right) - { query_tree_node = join_node.getRightTableExpression().get(); - } else - { return {}; - } break; } @@ -275,7 +269,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr /// We don't have any subquery and storage can process parallel replicas by itself. if (stack.top() == query_tree_node.get()) - return nullptr; + return query_node; /// This is needed to avoid infinite recursion. auto mutable_context = Context::createCopy(context); From 752dfead2c5fc686b64d062b7f032196657295ff Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 1 Nov 2024 13:06:29 +0000 Subject: [PATCH 29/74] Only RIGHT JOINs test --- src/Planner/PlannerJoinTree.cpp | 17 +++-- src/Planner/findParallelReplicasQuery.cpp | 11 ++- .../03254_pr_join_on_dups.reference | 72 ------------------- .../0_stateless/03254_pr_join_on_dups.sql | 28 +------- 4 files changed, 26 insertions(+), 102 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 160d7f07d5b..d79aa626d5e 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -659,7 +659,7 @@ std::unique_ptr createComputeAliasColumnsStep( } JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expression, - const QueryNode & parent_query_node, + [[maybe_unused]] const QueryNode & parent_query_node, const SelectQueryInfo & select_query_info, const SelectQueryOptions & select_query_options, PlannerContextPtr & planner_context, @@ -958,6 +958,14 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres return true; }; + LOG_DEBUG( + getLogger(__PRETTY_FUNCTION__), + "parallel_replicas_node={} parent_query_node={}", + UInt64(planner_context->getGlobalPlannerContext()->parallel_replicas_node), + UInt64(&parent_query_node)); + + // const JoinNode * table_join_node = parent_query_node.getJoinTree()->as(); + /// query_plan can be empty if there is nothing to read if (query_plan.isInitialized() && parallel_replicas_enabled_for_storage(storage, settings)) { @@ -984,9 +992,10 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres } } else if ( - ClusterProxy::canUseParallelReplicasOnInitiator(query_context) - && planner_context->getGlobalPlannerContext()->parallel_replicas_node - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &parent_query_node) + ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) + // && (!table_join_node + // || (table_join_node && planner_context->getGlobalPlannerContext()->parallel_replicas_node + // && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &parent_query_node))) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 63c0ce8eb68..8d818daa575 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -265,11 +265,17 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr auto stack = getSupportingParallelReplicasQuery(query_tree_node.get()); /// Empty stack means that storage does not support parallel replicas. if (stack.empty()) + { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found"); return nullptr; + } /// We don't have any subquery and storage can process parallel replicas by itself. if (stack.top() == query_tree_node.get()) + { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); return query_node; + } /// This is needed to avoid infinite recursion. auto mutable_context = Context::createCopy(context); @@ -303,7 +309,10 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr new_stack.pop(); } } - + if (!res) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found 2"); + else + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query:\n{}", query_tree_node->dumpTree()); return res; } diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.reference b/tests/queries/0_stateless/03254_pr_join_on_dups.reference index 58602bafb5d..95cb0d8cae2 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.reference +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.reference @@ -88,34 +88,6 @@ right subs 4 l5 \N 4 r6 nr6 4 l6 \N 4 r6 nr6 9 l9 \N 9 r9 nr9 -full -0 \N 6 r7 nr7 -0 \N 7 r8 nr8 -1 l1 1 1 r1 \N -1 l1 1 1 r2 \N -2 l2 2 2 r3 \N -2 l3 3 2 r3 \N -3 l4 4 3 r4 \N -3 l4 4 3 r5 \N -4 l5 \N 4 r6 nr6 -4 l6 \N 4 r6 nr6 -5 l7 \N 0 \N -8 l8 \N 0 \N -9 l9 \N 9 r9 nr9 -full subs -0 \N 6 r7 nr7 -0 \N 7 r8 nr8 -1 l1 1 1 r1 \N -1 l1 1 1 r2 \N -2 l2 2 2 r3 \N -2 l3 3 2 r3 \N -3 l4 4 3 r4 \N -3 l4 4 3 r5 \N -4 l5 \N 4 r6 nr6 -4 l6 \N 4 r6 nr6 -5 l7 \N 0 \N -8 l8 \N 0 \N -9 l9 \N 9 r9 nr9 self inner 1 l1 1 1 l1 1 2 l2 2 2 l2 2 @@ -227,47 +199,3 @@ self right nullable vs not nullable 3 l4 4 2 l3 3 4 l5 \N 3 l4 4 4 l6 \N 3 l4 4 -self full -1 l1 1 1 l1 1 -2 l2 2 2 l2 2 -2 l2 2 2 l3 3 -2 l3 3 2 l2 2 -2 l3 3 2 l3 3 -3 l4 4 3 l4 4 -4 l5 \N 4 l5 \N -4 l5 \N 4 l6 \N -4 l6 \N 4 l5 \N -4 l6 \N 4 l6 \N -5 l7 \N 5 l7 \N -8 l8 \N 8 l8 \N -9 l9 \N 9 l9 \N -self full nullable -0 \N 4 l5 \N -0 \N 4 l6 \N -0 \N 5 l7 \N -0 \N 8 l8 \N -0 \N 9 l9 \N -1 l1 1 1 l1 1 -2 l2 2 2 l2 2 -2 l3 3 2 l3 3 -3 l4 4 3 l4 4 -4 l5 \N 0 \N -4 l6 \N 0 \N -5 l7 \N 0 \N -8 l8 \N 0 \N -9 l9 \N 0 \N -self full nullable vs not nullable -0 \N 4 l5 \N -0 \N 4 l6 \N -0 \N 5 l7 \N -0 \N 8 l8 \N -0 \N 9 l9 \N -1 l1 1 1 l1 1 -2 l2 2 2 l2 2 -2 l3 3 2 l2 2 -3 l4 4 2 l3 3 -4 l5 \N 3 l4 4 -4 l6 \N 3 l4 4 -5 l7 \N 0 \N -8 l8 \N 0 \N -9 l9 \N 0 \N diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 71695c0d486..22e94507c83 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -9,7 +9,7 @@ insert into X (id, x_a) values (4, 'l5'), (4, 'l6'), (5, 'l7'), (8, 'l8'), insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), (3, 'r5'); insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); -set enable_parallel_replicas = 2, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_local_plan=1; +set enable_parallel_replicas = 1, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_local_plan=1; select 'inner'; select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; @@ -29,15 +29,6 @@ select 'right'; select X.*, Y.* from X right join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; select 'right subs'; select s.*, j.* from (select * from X) as s right join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; ---select 'right expr'; ---select X.*, Y.* from X right join Y on (X.id + 1) = (Y.id + 1) order by id; - -select 'full'; -select X.*, Y.* from X full join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; -select 'full subs'; -select s.*, j.* from (select * from X) as s full join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; ---select 'full expr'; ---select X.*, Y.* from X full join Y on (X.id + 1) = (Y.id + 1) order by id; select 'self inner'; select X.*, s.* from X inner join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; @@ -45,7 +36,6 @@ select 'self inner nullable'; select X.*, s.* from X inner join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; select 'self inner nullable vs not nullable'; select X.*, s.* from X inner join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; --- TODO: s.y_b == '' instead of NULL select 'self inner nullable vs not nullable 2'; select Y.*, s.* from Y inner join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; @@ -55,7 +45,6 @@ select 'self left nullable'; select X.*, s.* from X left join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; select 'self left nullable vs not nullable'; select X.*, s.* from X left join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; --- TODO: s.y_b == '' instead of NULL select 'self left nullable vs not nullable 2'; select Y.*, s.* from Y left join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; @@ -65,17 +54,6 @@ select 'self right nullable'; select X.*, s.* from X right join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; select 'self right nullable vs not nullable'; select X.*, s.* from X right join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; ---select 'self right nullable vs not nullable 2'; ---select Y.*, s.* from Y right join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by id; -select 'self full'; -select X.*, s.* from X full join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; -select 'self full nullable'; -select X.*, s.* from X full join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; -select 'self full nullable vs not nullable'; -select X.*, s.* from X full join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; ---select 'self full nullable vs not nullable 2'; ---select Y.*, s.* from Y full join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by id; - --- drop table X; --- drop table Y; +drop table X sync; +drop table Y sync; From 31f761508875de1fdc678429b316e19556538eb4 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 1 Nov 2024 14:52:42 +0000 Subject: [PATCH 30/74] Fix --- src/Planner/Planner.cpp | 4 ++-- src/Planner/Planner.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 260462652fc..4b5a2b903c0 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1263,7 +1263,7 @@ Planner::Planner(const QueryTreeNodePtr & query_tree_, findQueryForParallelReplicas(query_tree, select_query_options), findTableForParallelReplicas(query_tree, select_query_options), collectFiltersForAnalysis(query_tree, select_query_options)))) - , root_planner(true) + // , root_planner(true) { } @@ -1538,7 +1538,7 @@ void Planner::buildPlanForQueryNode() JoinTreeQueryPlan join_tree_query_plan; if (planner_context->getMutableQueryContext()->canUseTaskBasedParallelReplicas() - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node && !root_planner) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node /* && !root_planner*/) { join_tree_query_plan = buildQueryPlanForParallelReplicas(query_node, planner_context, select_query_info.storage_limits); } diff --git a/src/Planner/Planner.h b/src/Planner/Planner.h index bf11c9ef9cd..8d771c343c3 100644 --- a/src/Planner/Planner.h +++ b/src/Planner/Planner.h @@ -82,7 +82,7 @@ private: StorageLimitsList storage_limits; std::set used_row_policies; QueryNodeToPlanStepMapping query_node_to_plan_step_mapping; - bool root_planner = false; + // bool root_planner = false; }; } From ac0902b08820dcd64cb41ba6bd34e4957fe8eadf Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 1 Nov 2024 15:57:44 +0000 Subject: [PATCH 31/74] Fix --- src/Planner/findParallelReplicasQuery.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 8d818daa575..e89f06d6cc3 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -274,7 +274,8 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr if (stack.top() == query_tree_node.get()) { LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); - return query_node; + return nullptr; + // return query_node; } /// This is needed to avoid infinite recursion. From 38a3c6707525fba84c190e6a7e42f791b2da5659 Mon Sep 17 00:00:00 2001 From: vdimir Date: Fri, 1 Nov 2024 18:17:08 +0000 Subject: [PATCH 32/74] revert unnecessary changes --- .../Algorithms/CollapsingSortedAlgorithm.cpp | 6 +- .../Algorithms/CollapsingSortedAlgorithm.h | 6 +- .../Algorithms/MergingSortedAlgorithm.cpp | 3 +- .../Algorithms/MergingSortedAlgorithm.h | 6 +- .../Algorithms/ReplacingSortedAlgorithm.cpp | 6 +- .../Algorithms/ReplacingSortedAlgorithm.h | 6 +- .../VersionedCollapsingAlgorithm.cpp | 6 +- .../Algorithms/VersionedCollapsingAlgorithm.h | 6 +- .../Merges/CollapsingSortedTransform.h | 2 +- .../Merges/MergingSortedTransform.cpp | 2 +- .../Merges/MergingSortedTransform.h | 2 +- .../Merges/ReplacingSortedTransform.h | 4 +- .../Merges/VersionedCollapsingTransform.h | 4 +- .../QueryPlan/BuildQueryPipelineSettings.h | 1 + src/QueryPipeline/QueryPipelineBuilder.h | 6 - src/QueryPipeline/QueryPlanResourceHolder.h | 2 - src/Storages/MergeTree/MergeTask.cpp | 129 +++++++++++++++--- src/Storages/MergeTree/MergeTask.h | 5 +- .../MergeTree/MergeTreeSelectProcessor.cpp | 5 +- 19 files changed, 134 insertions(+), 73 deletions(-) diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp index 1560e88ffef..07ee8f4ddef 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include @@ -30,18 +29,17 @@ CollapsingSortedAlgorithm::CollapsingSortedAlgorithm( size_t max_block_size_rows_, size_t max_block_size_bytes_, LoggerPtr log_, - std::shared_ptr temp_data_buffer_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes) : IMergingAlgorithmWithSharedChunks( header_, num_inputs, std::move(description_), - temp_data_buffer_.get(), + out_row_sources_buf_, max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) , sign_column_number(header_.getPositionByName(sign_column)) , only_positive_sign(only_positive_sign_) - , temp_data_buffer(temp_data_buffer_) , log(log_) { } diff --git a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h index b7bb9914cf8..99fd95d82d9 100644 --- a/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/CollapsingSortedAlgorithm.h @@ -11,8 +11,6 @@ namespace Poco namespace DB { -class TemporaryDataBuffer; - /** Merges several sorted inputs to one. * For each group of consecutive identical values of the primary key (the columns by which the data is sorted), * keeps no more than one row with the value of the column `sign_column = -1` ("negative row") @@ -37,7 +35,7 @@ public: size_t max_block_size_rows_, size_t max_block_size_bytes_, LoggerPtr log_, - std::shared_ptr temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false); const char * getName() const override { return "CollapsingSortedAlgorithm"; } @@ -64,8 +62,6 @@ private: PODArray current_row_sources; /// Sources of rows with the current primary key size_t count_incorrect_data = 0; /// To prevent too many error messages from writing to the log. - std::shared_ptr temp_data_buffer = nullptr; - LoggerPtr log; void reportIncorrectData(); diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index d4e4ba6aa5f..3a9cf7ee141 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -3,7 +3,6 @@ #include #include #include -#include namespace DB { @@ -16,7 +15,7 @@ MergingSortedAlgorithm::MergingSortedAlgorithm( size_t max_block_size_bytes_, SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_, - std::shared_ptr out_row_sources_buf_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes) : header(std::move(header_)) , merged_data(use_average_block_sizes, max_block_size_, max_block_size_bytes_) diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h index fc300e41026..c889668a38e 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h @@ -9,8 +9,6 @@ namespace DB { -class TemporaryDataBuffer; - /// Merges several sorted inputs into one sorted output. class MergingSortedAlgorithm final : public IMergingAlgorithm { @@ -23,7 +21,7 @@ public: size_t max_block_size_bytes_, SortingQueueStrategy sorting_queue_strategy_, UInt64 limit_ = 0, - std::shared_ptr out_row_sources_buf_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false); void addInput(); @@ -47,7 +45,7 @@ private: /// Used in Vertical merge algorithm to gather non-PK/non-index columns (on next step) /// If it is not nullptr then it should be populated during execution - std::shared_ptr out_row_sources_buf = nullptr; + WriteBuffer * out_row_sources_buf = nullptr; /// Chunks currently being merged. Inputs current_inputs; diff --git a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp index a3a33080f52..cd347d371d9 100644 --- a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.cpp @@ -5,7 +5,6 @@ #include #include #include -#include namespace DB { @@ -38,13 +37,12 @@ ReplacingSortedAlgorithm::ReplacingSortedAlgorithm( const String & version_column, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr temp_data_buffer_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes, bool cleanup_, bool enable_vertical_final_) - : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), temp_data_buffer_.get(), max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows, max_block_size_bytes)) + : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), out_row_sources_buf_, max_row_refs, std::make_unique(use_average_block_sizes, max_block_size_rows, max_block_size_bytes)) , cleanup(cleanup_), enable_vertical_final(enable_vertical_final_) - , temp_data_buffer(temp_data_buffer_) { if (!is_deleted_column.empty()) is_deleted_column_number = header_.getPositionByName(is_deleted_column); diff --git a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h index d3b9837a253..2f23f2a5c4d 100644 --- a/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/ReplacingSortedAlgorithm.h @@ -24,8 +24,6 @@ struct ChunkSelectFinalIndices : public ChunkInfoCloneable temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false, bool cleanup = false, bool enable_vertical_final_ = false); @@ -61,8 +59,6 @@ private: RowRef selected_row; /// Last row with maximum version for current primary key, may extend lifetime of chunk in input source size_t max_pos = 0; /// The position (into current_row_sources) of the row with the highest version. - std::shared_ptr temp_data_buffer = nullptr; - /// Sources of rows with the current primary key. PODArray current_row_sources; diff --git a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp index 1ceb1f46234..9f124c6ba18 100644 --- a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.cpp @@ -1,7 +1,6 @@ #include #include #include -#include namespace DB { @@ -15,13 +14,12 @@ VersionedCollapsingAlgorithm::VersionedCollapsingAlgorithm( const String & sign_column_, size_t max_block_size_rows_, size_t max_block_size_bytes_, - std::shared_ptr temp_data_buffer_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes) - : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), temp_data_buffer_.get(), MAX_ROWS_IN_MULTIVERSION_QUEUE, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) + : IMergingAlgorithmWithSharedChunks(header_, num_inputs, std::move(description_), out_row_sources_buf_, MAX_ROWS_IN_MULTIVERSION_QUEUE, std::make_unique(use_average_block_sizes, max_block_size_rows_, max_block_size_bytes_)) /// -1 for +1 in FixedSizeDequeWithGaps's internal buffer. 3 is a reasonable minimum size to collapse anything. , max_rows_in_queue(std::min(std::max(3, max_block_size_rows_), MAX_ROWS_IN_MULTIVERSION_QUEUE) - 1) , current_keys(max_rows_in_queue) - , temp_data_buffer(temp_data_buffer_) { sign_column_number = header_.getPositionByName(sign_column_); } diff --git a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h index 6f877459147..e6d20ddac75 100644 --- a/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h +++ b/src/Processors/Merges/Algorithms/VersionedCollapsingAlgorithm.h @@ -8,8 +8,6 @@ namespace DB { -class TemporaryDataBuffer; - /** Merges several sorted inputs to one. * For each group of consecutive identical values of the sorting key * (the columns by which the data is sorted, including specially specified version column), @@ -24,7 +22,7 @@ public: SortDescription description_, const String & sign_column_, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false); const char * getName() const override { return "VersionedCollapsingAlgorithm"; } @@ -39,8 +37,6 @@ private: FixedSizeDequeWithGaps current_keys; Int8 sign_in_queue = 0; - std::shared_ptr temp_data_buffer = nullptr; - std::queue current_row_sources; /// Sources of rows with the current primary key void insertGap(size_t gap_size); diff --git a/src/Processors/Merges/CollapsingSortedTransform.h b/src/Processors/Merges/CollapsingSortedTransform.h index 9b09c802783..99fb700abf1 100644 --- a/src/Processors/Merges/CollapsingSortedTransform.h +++ b/src/Processors/Merges/CollapsingSortedTransform.h @@ -23,7 +23,7 @@ public: bool only_positive_sign, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr out_row_sources_buf_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false) : IMergingTransform( num_inputs, header, header, /*have_all_inputs_=*/ true, /*limit_hint_=*/ 0, /*always_read_till_end_=*/ false, diff --git a/src/Processors/Merges/MergingSortedTransform.cpp b/src/Processors/Merges/MergingSortedTransform.cpp index 13330dcff6d..d2895a2a2e9 100644 --- a/src/Processors/Merges/MergingSortedTransform.cpp +++ b/src/Processors/Merges/MergingSortedTransform.cpp @@ -20,7 +20,7 @@ MergingSortedTransform::MergingSortedTransform( SortingQueueStrategy sorting_queue_strategy, UInt64 limit_, bool always_read_till_end_, - std::shared_ptr out_row_sources_buf_, + WriteBuffer * out_row_sources_buf_, bool use_average_block_sizes, bool have_all_inputs_) : IMergingTransform( diff --git a/src/Processors/Merges/MergingSortedTransform.h b/src/Processors/Merges/MergingSortedTransform.h index fb8e5ce74e3..6e52450efa7 100644 --- a/src/Processors/Merges/MergingSortedTransform.h +++ b/src/Processors/Merges/MergingSortedTransform.h @@ -20,7 +20,7 @@ public: SortingQueueStrategy sorting_queue_strategy, UInt64 limit_ = 0, bool always_read_till_end_ = false, - std::shared_ptr out_row_sources_buf_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false, bool have_all_inputs_ = true); diff --git a/src/Processors/Merges/ReplacingSortedTransform.h b/src/Processors/Merges/ReplacingSortedTransform.h index a9d9f4fb619..dc262aab9ee 100644 --- a/src/Processors/Merges/ReplacingSortedTransform.h +++ b/src/Processors/Merges/ReplacingSortedTransform.h @@ -21,7 +21,7 @@ public: const String & is_deleted_column, const String & version_column, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false, bool cleanup = false, bool enable_vertical_final = false) @@ -34,7 +34,7 @@ public: version_column, max_block_size_rows, max_block_size_bytes, - temp_data_buffer_, + out_row_sources_buf_, use_average_block_sizes, cleanup, enable_vertical_final) diff --git a/src/Processors/Merges/VersionedCollapsingTransform.h b/src/Processors/Merges/VersionedCollapsingTransform.h index 0bdccd4795d..32b5d7bf343 100644 --- a/src/Processors/Merges/VersionedCollapsingTransform.h +++ b/src/Processors/Merges/VersionedCollapsingTransform.h @@ -21,7 +21,7 @@ public: SortDescription description_, const String & sign_column_, size_t max_block_size_rows, size_t max_block_size_bytes, - std::shared_ptr temp_data_buffer_ = nullptr, + WriteBuffer * out_row_sources_buf_ = nullptr, bool use_average_block_sizes = false) : IMergingTransform( num_inputs, header, header, /*have_all_inputs_=*/ true, /*limit_hint_=*/ 0, /*always_read_till_end_=*/ false, @@ -31,7 +31,7 @@ public: sign_column_, max_block_size_rows, max_block_size_bytes, - temp_data_buffer_, + out_row_sources_buf_, use_average_block_sizes) { } diff --git a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h index 1c03a4d74cd..d99f9a7d1f1 100644 --- a/src/Processors/QueryPlan/BuildQueryPipelineSettings.h +++ b/src/Processors/QueryPlan/BuildQueryPipelineSettings.h @@ -20,6 +20,7 @@ struct BuildQueryPipelineSettings ExpressionActionsSettings actions_settings; QueryStatusPtr process_list_element; ProgressCallback progress_callback = nullptr; + TemporaryFileLookupPtr temporary_file_lookup; const ExpressionActionsSettings & getActionsSettings() const { return actions_settings; } static BuildQueryPipelineSettings fromContext(ContextPtr from); diff --git a/src/QueryPipeline/QueryPipelineBuilder.h b/src/QueryPipeline/QueryPipelineBuilder.h index 1e274a97a08..a9e5b1535c0 100644 --- a/src/QueryPipeline/QueryPipelineBuilder.h +++ b/src/QueryPipeline/QueryPipelineBuilder.h @@ -197,12 +197,6 @@ public: void setQueryIdHolder(std::shared_ptr query_id_holder) { resources.query_id_holders.emplace_back(std::move(query_id_holder)); } void addContext(ContextPtr context) { resources.interpreter_context.emplace_back(std::move(context)); } - template - void addResource(Resource resource, std::vector QueryPlanResourceHolder::*field) - { - (resources.*field).push_back(std::move(resource)); - } - /// Convert query pipeline to pipe. static Pipe getPipe(QueryPipelineBuilder pipeline, QueryPlanResourceHolder & resources); static QueryPipeline getPipeline(QueryPipelineBuilder builder); diff --git a/src/QueryPipeline/QueryPlanResourceHolder.h b/src/QueryPipeline/QueryPlanResourceHolder.h index ee2ecc25cd5..10f7f39ab09 100644 --- a/src/QueryPipeline/QueryPlanResourceHolder.h +++ b/src/QueryPipeline/QueryPlanResourceHolder.h @@ -13,7 +13,6 @@ class QueryPlan; class Context; struct QueryIdHolder; -class TemporaryDataBuffer; struct QueryPlanResourceHolder { @@ -34,7 +33,6 @@ struct QueryPlanResourceHolder std::vector storage_holders; std::vector table_locks; std::vector> query_id_holders; - std::vector> rows_sources_temporary_file; }; } diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 9c2bd59e7cb..e73bc18557c 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -65,6 +65,11 @@ namespace ProfileEvents extern const Event MergeProjectionStageExecuteMilliseconds; } +namespace CurrentMetrics +{ + extern const Metric TemporaryFilesForMerge; +} + namespace DB { namespace Setting @@ -124,6 +129,66 @@ static ColumnsStatistics getStatisticsForColumns( return all_statistics; } + +/// Manages the "rows_sources" temporary file that is used during vertical merge. +class RowsSourcesTemporaryFile : public ITemporaryFileLookup +{ +public: + /// A logical name of the temporary file under which it will be known to the plan steps that use it. + static constexpr auto FILE_ID = "rows_sources"; + + explicit RowsSourcesTemporaryFile(TemporaryDataOnDiskScopePtr temporary_data_on_disk_) + : temporary_data_on_disk(temporary_data_on_disk_->childScope(CurrentMetrics::TemporaryFilesForMerge)) + { + } + + WriteBuffer & getTemporaryFileForWriting(const String & name) override + { + if (name != FILE_ID) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected temporary file name requested: {}", name); + + if (tmp_data_buffer) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file was already requested for writing, there musto be only one writer"); + + tmp_data_buffer = std::make_unique(temporary_data_on_disk.get()); + return *tmp_data_buffer; + } + + std::unique_ptr getTemporaryFileForReading(const String & name) override + { + if (name != FILE_ID) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected temporary file name requested: {}", name); + + if (!finalized) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file is not finalized yet"); + + /// tmp_disk might not create real file if no data was written to it. + if (final_size == 0) + return std::make_unique(); + + /// Reopen the file for each read so that multiple reads can be performed in parallel and there is no need to seek to the beginning. + return tmp_data_buffer->read(); + } + + /// Returns written data size in bytes + size_t finalizeWriting() + { + if (!tmp_data_buffer) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file was not requested for writing"); + + auto stat = tmp_data_buffer->finishWriting(); + finalized = true; + final_size = stat.uncompressed_size; + return final_size; + } + +private: + std::unique_ptr tmp_data_buffer; + TemporaryDataOnDiskScopePtr temporary_data_on_disk; + bool finalized = false; + size_t final_size = 0; +}; + static void addMissedColumnsToSerializationInfos( size_t num_rows_in_parts, const Names & part_columns, @@ -425,7 +490,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const } case MergeAlgorithm::Vertical: { - ctx->rows_sources_temporary_file = std::make_unique(global_ctx->context->getTempDataOnDisk().get()); + ctx->rows_sources_temporary_file = std::make_shared(global_ctx->context->getTempDataOnDisk()); std::map local_merged_column_to_size; for (const auto & part : global_ctx->future_part->parts) @@ -802,11 +867,24 @@ bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const if (global_ctx->chosen_merge_algorithm != MergeAlgorithm::Vertical) return false; + size_t sum_input_rows_exact = global_ctx->merge_list_element_ptr->rows_read; + size_t input_rows_filtered = *global_ctx->input_rows_filtered; global_ctx->merge_list_element_ptr->columns_written = global_ctx->merging_columns.size(); global_ctx->merge_list_element_ptr->progress.store(ctx->column_sizes->keyColumnsWeight(), std::memory_order_relaxed); /// Ensure data has written to disk. - ctx->rows_sources_temporary_file->finishWriting(); + size_t rows_sources_count = ctx->rows_sources_temporary_file->finalizeWriting(); + /// In special case, when there is only one source part, and no rows were skipped, we may have + /// skipped writing rows_sources file. Otherwise rows_sources_count must be equal to the total + /// number of input rows. + /// Note that only one byte index is written for each row, so number of rows is equals to the number of bytes written. + if ((rows_sources_count > 0 || global_ctx->future_part->parts.size() > 1) && sum_input_rows_exact != rows_sources_count + input_rows_filtered) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Number of rows in source parts ({}) excluding filtered rows ({}) differs from number " + "of bytes written to rows_sources file ({}). It is a bug.", + sum_input_rows_exact, input_rows_filtered, rows_sources_count); + ctx->it_name_and_type = global_ctx->gathering_columns.cbegin(); @@ -838,12 +916,12 @@ class ColumnGathererStep : public ITransformingStep public: ColumnGathererStep( const Header & input_header_, - std::unique_ptr rows_sources_read_buf_, + const String & rows_sources_temporary_file_name_, UInt64 merge_block_size_rows_, UInt64 merge_block_size_bytes_, bool is_result_sparse_) : ITransformingStep(input_header_, input_header_, getTraits()) - , rows_sources_read_buf(std::move(rows_sources_read_buf_)) + , rows_sources_temporary_file_name(rows_sources_temporary_file_name_) , merge_block_size_rows(merge_block_size_rows_) , merge_block_size_bytes(merge_block_size_bytes_) , is_result_sparse(is_result_sparse_) @@ -851,13 +929,15 @@ public: String getName() const override { return "ColumnGatherer"; } - void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & /* pipeline_settings */) override + void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & pipeline_settings) override { - const auto & header = pipeline.getHeader(); + const auto &header = pipeline.getHeader(); const auto input_streams_count = pipeline.getNumStreams(); - if (!rows_sources_read_buf) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary data buffer for rows sources is not set"); + if (!pipeline_settings.temporary_file_lookup) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file lookup is not set in pipeline settings for vertical merge"); + + auto rows_sources_read_buf = pipeline_settings.temporary_file_lookup->getTemporaryFileForReading(rows_sources_temporary_file_name); auto transform = std::make_unique( header, @@ -892,7 +972,7 @@ private: } MergeTreeData::MergingParams merging_params{}; - std::unique_ptr rows_sources_read_buf; + const String rows_sources_temporary_file_name; const UInt64 merge_block_size_rows; const UInt64 merge_block_size_bytes; const bool is_result_sparse; @@ -943,7 +1023,7 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic const auto data_settings = global_ctx->data->getSettings(); auto merge_step = std::make_unique( merge_column_query_plan.getCurrentHeader(), - ctx->rows_sources_temporary_file->read(), + RowsSourcesTemporaryFile::FILE_ID, (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], is_result_sparse); @@ -972,9 +1052,9 @@ MergeTask::VerticalMergeRuntimeContext::PreparedColumnPipeline MergeTask::Vertic } auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); + pipeline_settings.temporary_file_lookup = ctx->rows_sources_temporary_file; auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_column_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); - builder->addResource(ctx->rows_sources_temporary_file, &QueryPlanResourceHolder::rows_sources_temporary_file); return {QueryPipelineBuilder::getPipeline(std::move(*builder)), std::move(indexes_to_recalc)}; } @@ -1347,7 +1427,7 @@ public: const SortDescription & sort_description_, const Names partition_key_columns_, const MergeTreeData::MergingParams & merging_params_, - std::shared_ptr rows_sources_temporary_file_, + const String & rows_sources_temporary_file_name_, UInt64 merge_block_size_rows_, UInt64 merge_block_size_bytes_, bool blocks_are_granules_size_, @@ -1357,7 +1437,7 @@ public: , sort_description(sort_description_) , partition_key_columns(partition_key_columns_) , merging_params(merging_params_) - , rows_sources_temporary_file(rows_sources_temporary_file_) + , rows_sources_temporary_file_name(rows_sources_temporary_file_name_) , merge_block_size_rows(merge_block_size_rows_) , merge_block_size_bytes(merge_block_size_bytes_) , blocks_are_granules_size(blocks_are_granules_size_) @@ -1367,7 +1447,7 @@ public: String getName() const override { return "MergeParts"; } - void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & /* pipeline_settings */) override + void transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & pipeline_settings) override { /// The order of the streams is important: when the key is matched, the elements go in the order of the source stream number. /// In the merged part, the lines with the same key must be in the ascending order of the identifier of original part, @@ -1377,6 +1457,14 @@ public: const auto & header = pipeline.getHeader(); const auto input_streams_count = pipeline.getNumStreams(); + WriteBuffer * rows_sources_write_buf = nullptr; + if (!rows_sources_temporary_file_name.empty()) + { + if (!pipeline_settings.temporary_file_lookup) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Temporary file lookup is not set in pipeline settings for vertical merge"); + rows_sources_write_buf = &pipeline_settings.temporary_file_lookup->getTemporaryFileForWriting(rows_sources_temporary_file_name); + } + switch (merging_params.mode) { case MergeTreeData::MergingParams::Ordinary: @@ -1389,14 +1477,14 @@ public: SortingQueueStrategy::Default, /* limit_= */0, /* always_read_till_end_= */false, - rows_sources_temporary_file, + rows_sources_write_buf, blocks_are_granules_size); break; case MergeTreeData::MergingParams::Collapsing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.sign_column, false, - merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size); + merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size); break; case MergeTreeData::MergingParams::Summing: @@ -1411,7 +1499,7 @@ public: case MergeTreeData::MergingParams::Replacing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.is_deleted_column, merging_params.version_column, - merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size, + merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size, cleanup); break; @@ -1424,7 +1512,7 @@ public: case MergeTreeData::MergingParams::VersionedCollapsing: merged_transform = std::make_shared( header, input_streams_count, sort_description, merging_params.sign_column, - merge_block_size_rows, merge_block_size_bytes, rows_sources_temporary_file, blocks_are_granules_size); + merge_block_size_rows, merge_block_size_bytes, rows_sources_write_buf, blocks_are_granules_size); break; } @@ -1466,7 +1554,7 @@ private: const SortDescription sort_description; const Names partition_key_columns; const MergeTreeData::MergingParams merging_params{}; - std::shared_ptr rows_sources_temporary_file; + const String rows_sources_temporary_file_name; const UInt64 merge_block_size_rows; const UInt64 merge_block_size_bytes; const bool blocks_are_granules_size; @@ -1635,7 +1723,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const sort_description, partition_key_columns, global_ctx->merging_params, - (is_vertical_merge ? ctx->rows_sources_temporary_file : nullptr), /// rows_sources' temporary file is used only for vertical merge + (is_vertical_merge ? RowsSourcesTemporaryFile::FILE_ID : ""), /// rows_sources' temporary file is used only for vertical merge (*data_settings)[MergeTreeSetting::merge_max_block_size], (*data_settings)[MergeTreeSetting::merge_max_block_size_bytes], ctx->blocks_are_granules_size, @@ -1700,6 +1788,7 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() const { auto pipeline_settings = BuildQueryPipelineSettings::fromContext(global_ctx->context); + pipeline_settings.temporary_file_lookup = ctx->rows_sources_temporary_file; auto optimization_settings = QueryPlanOptimizationSettings::fromContext(global_ctx->context); auto builder = merge_parts_query_plan.buildQueryPipeline(optimization_settings, pipeline_settings); diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index a6969e3aa48..53792165987 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -42,6 +42,7 @@ namespace DB class MergeTask; using MergeTaskPtr = std::shared_ptr; +class RowsSourcesTemporaryFile; /** * Overview of the merge algorithm @@ -243,7 +244,7 @@ private: bool force_ttl{false}; CompressionCodecPtr compression_codec{nullptr}; size_t sum_input_rows_upper_bound{0}; - std::shared_ptr rows_sources_temporary_file; + std::shared_ptr rows_sources_temporary_file; std::optional column_sizes{}; /// For projections to rebuild @@ -322,7 +323,7 @@ private: struct VerticalMergeRuntimeContext : public IStageRuntimeContext { /// Begin dependencies from previous stage - std::shared_ptr rows_sources_temporary_file; + std::shared_ptr rows_sources_temporary_file; std::optional column_sizes; CompressionCodecPtr compression_codec; std::list::const_iterator it_name_and_type; diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index 5e9674fb5d6..5efd33ce09a 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -111,11 +111,10 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor( } if (!prewhere_actions.steps.empty()) - LOG_TRACE(log, "PREWHERE condition was split into {} steps", prewhere_actions.steps.size()); + LOG_TRACE(log, "PREWHERE condition was split into {} steps: {}", prewhere_actions.steps.size(), prewhere_actions.dumpConditions()); if (prewhere_info) - LOG_TEST(log, "Original PREWHERE DAG:{}\n{}\nPREWHERE actions:\n{}", - prewhere_actions.dumpConditions(), + LOG_TEST(log, "Original PREWHERE DAG:\n{}\nPREWHERE actions:\n{}", prewhere_info->prewhere_actions.dumpDAG(), (!prewhere_actions.steps.empty() ? prewhere_actions.dump() : std::string(""))); } From 6d5c707d2cfc029528ba1a32ceb4cd313e198147 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 1 Nov 2024 21:32:07 +0000 Subject: [PATCH 33/74] Cleanup --- src/Planner/findParallelReplicasQuery.cpp | 14 +++++++------- .../02771_parallel_replicas_analyzer.sql | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index e89f06d6cc3..5db67d7c793 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -97,8 +97,8 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre case QueryTreeNodeType::JOIN: { const auto & join_node = query_tree_node->as(); - auto join_kind = join_node.getKind(); - auto join_strictness = join_node.getStrictness(); + const auto join_kind = join_node.getKind(); + const auto join_strictness = join_node.getStrictness(); if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All)) query_tree_node = join_node.getLeftTableExpression().get(); @@ -266,7 +266,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr /// Empty stack means that storage does not support parallel replicas. if (stack.empty()) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found"); + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found"); return nullptr; } @@ -310,10 +310,10 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr new_stack.pop(); } } - if (!res) - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found 2"); - else - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query:\n{}", query_tree_node->dumpTree()); + // if (!res) + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found 2"); + // else + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query:\n{}", query_tree_node->dumpTree()); return res; } diff --git a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql index 081077ba460..a2d26a8fc78 100644 --- a/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql +++ b/tests/queries/0_stateless/02771_parallel_replicas_analyzer.sql @@ -1,5 +1,5 @@ -- Tags: zookeeper -DROP TABLE IF EXISTS join_inner_table__fuzz_146_replicated; +DROP TABLE IF EXISTS join_inner_table__fuzz_146_replicated SYNC; CREATE TABLE join_inner_table__fuzz_146_replicated ( `id` UUID, @@ -52,4 +52,4 @@ WHERE GROUP BY is_initial_query, query ORDER BY is_initial_query DESC, c, query; -DROP TABLE join_inner_table__fuzz_146_replicated; +DROP TABLE join_inner_table__fuzz_146_replicated SYNC; From 1e3f08ab3e48d666cd5e3b02cfecf50915738377 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Sat, 2 Nov 2024 19:44:03 +0000 Subject: [PATCH 34/74] Only with analyzer --- tests/queries/0_stateless/03254_pr_join_on_dups.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 22e94507c83..5f2f209d0b0 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -9,7 +9,7 @@ insert into X (id, x_a) values (4, 'l5'), (4, 'l6'), (5, 'l7'), (8, 'l8'), insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), (3, 'r5'); insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); -set enable_parallel_replicas = 1, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_local_plan=1; +set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; select 'inner'; select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; From 2530fd233f3c4d81ff7ad6f18ec0e3a73320c8d0 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Sat, 2 Nov 2024 21:36:02 +0000 Subject: [PATCH 35/74] Added 03261_pr_semi_anti_join --- .../03261_pr_semi_anti_join.reference | 16 +++++++++++ .../0_stateless/03261_pr_semi_anti_join.sql | 27 +++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 tests/queries/0_stateless/03261_pr_semi_anti_join.reference create mode 100644 tests/queries/0_stateless/03261_pr_semi_anti_join.sql diff --git a/tests/queries/0_stateless/03261_pr_semi_anti_join.reference b/tests/queries/0_stateless/03261_pr_semi_anti_join.reference new file mode 100644 index 00000000000..782147f1f6f --- /dev/null +++ b/tests/queries/0_stateless/03261_pr_semi_anti_join.reference @@ -0,0 +1,16 @@ +semi left +2 a3 2 b1 +2 a6 2 b1 +4 a5 4 b3 +semi right +2 a3 2 b1 +2 a3 2 b2 +4 a5 4 b3 +4 a5 4 b4 +4 a5 4 b5 +anti left +0 a1 0 +1 a2 1 +3 a4 3 +anti right +0 5 b6 diff --git a/tests/queries/0_stateless/03261_pr_semi_anti_join.sql b/tests/queries/0_stateless/03261_pr_semi_anti_join.sql new file mode 100644 index 00000000000..d2ea3725d6b --- /dev/null +++ b/tests/queries/0_stateless/03261_pr_semi_anti_join.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t1 SYNC; +DROP TABLE IF EXISTS t2 SYNC; +create table X (id Int32, x_a String, x_b Nullable(Int32)) engine ReplicatedMergeTree('/clickhouse/{database}/X', '1') order by tuple(); + +CREATE TABLE t1 (x UInt32, s String) engine ReplicatedMergeTree('/clickhouse/{database}/t1', '1') order by tuple(); +CREATE TABLE t2 (x UInt32, s String) engine ReplicatedMergeTree('/clickhouse/{database}/t2', '1') order by tuple(); + +INSERT INTO t1 (x, s) VALUES (0, 'a1'), (1, 'a2'), (2, 'a3'), (3, 'a4'), (4, 'a5'), (2, 'a6'); +INSERT INTO t2 (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); + +SET join_use_nulls = 0; +set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; + +SELECT 'semi left'; +SELECT t1.*, t2.* FROM t1 SEMI LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'semi right'; +SELECT t1.*, t2.* FROM t1 SEMI RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'anti left'; +SELECT t1.*, t2.* FROM t1 ANTI LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'anti right'; +SELECT t1.*, t2.* FROM t1 ANTI RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +DROP TABLE t1 SYNC; +DROP TABLE t2 SYNC; From 935a29485c60038b14e4e8c87c8e021fc05f7928 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 4 Nov 2024 14:32:54 +0000 Subject: [PATCH 36/74] Fix logs --- src/Planner/findParallelReplicasQuery.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 5db67d7c793..314a7f06137 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -273,7 +273,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr /// We don't have any subquery and storage can process parallel replicas by itself. if (stack.top() == query_tree_node.get()) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); return nullptr; // return query_node; } @@ -427,10 +427,10 @@ const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tr return nullptr; const auto * res = findTableForParallelReplicas(query_tree_node.get()); - if (res) - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); - else - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not table found"); + // if (res) + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); + // else + // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not table found"); return res; } From a6b55563c73ff10b42569d17ee24457ffff91e4e Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 4 Nov 2024 18:32:05 +0000 Subject: [PATCH 37/74] Fix FULL joins --- src/Planner/PlannerJoinTree.cpp | 37 +++++----- .../03254_pr_join_on_dups.reference | 72 +++++++++++++++++++ .../0_stateless/03254_pr_join_on_dups.sql | 12 ++++ 3 files changed, 103 insertions(+), 18 deletions(-) diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index d79aa626d5e..c2acbd661c8 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -659,7 +659,7 @@ std::unique_ptr createComputeAliasColumnsStep( } JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expression, - [[maybe_unused]] const QueryNode & parent_query_node, + const QueryTreeNodePtr & parent_join_tree, const SelectQueryInfo & select_query_info, const SelectQueryOptions & select_query_options, PlannerContextPtr & planner_context, @@ -958,17 +958,22 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres return true; }; - LOG_DEBUG( - getLogger(__PRETTY_FUNCTION__), - "parallel_replicas_node={} parent_query_node={}", - UInt64(planner_context->getGlobalPlannerContext()->parallel_replicas_node), - UInt64(&parent_query_node)); - - // const JoinNode * table_join_node = parent_query_node.getJoinTree()->as(); - /// query_plan can be empty if there is nothing to read if (query_plan.isInitialized() && parallel_replicas_enabled_for_storage(storage, settings)) { + const bool allow_parallel_replicas_for_table_expression = [](const QueryTreeNodePtr & join_tree_node) + { + const JoinNode * join_node = join_tree_node->as(); + if (!join_node) + return true; + + const auto join_kind = join_node->getKind(); + if (join_kind == JoinKind::Left || join_kind == JoinKind::Right || join_kind == JoinKind::Inner) + return true; + + return false; + }(parent_join_tree); + if (query_context->canUseParallelReplicasCustomKey() && query_context->getClientInfo().distributed_depth == 0) { if (auto cluster = query_context->getClusterForParallelReplicas(); @@ -991,11 +996,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres query_plan = std::move(query_plan_parallel_replicas); } } - else if ( - ClusterProxy::canUseParallelReplicasOnInitiator(query_context)) - // && (!table_join_node - // || (table_join_node && planner_context->getGlobalPlannerContext()->parallel_replicas_node - // && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &parent_query_node))) + else if (ClusterProxy::canUseParallelReplicasOnInitiator(query_context) && allow_parallel_replicas_for_table_expression) { // (1) find read step QueryPlan::Node * node = query_plan.getRootNode(); @@ -1828,8 +1829,8 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, const ColumnIdentifierSet & outer_scope_columns, PlannerContextPtr & planner_context) { - const QueryNode & parent_query_node = query_node->as(); - auto table_expressions_stack = buildTableExpressionsStack(query_node->as().getJoinTree()); + const QueryTreeNodePtr & join_tree_node = query_node->as().getJoinTree(); + auto table_expressions_stack = buildTableExpressionsStack(join_tree_node); size_t table_expressions_stack_size = table_expressions_stack.size(); bool is_single_table_expression = table_expressions_stack_size == 1; @@ -1866,7 +1867,7 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, auto left_table_expression = table_expressions_stack.front(); auto left_table_expression_query_plan = buildQueryPlanForTableExpression( left_table_expression, - parent_query_node, + join_tree_node, select_query_info, select_query_options, planner_context, @@ -1941,7 +1942,7 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node, bool is_remote = planner_context->getTableExpressionDataOrThrow(table_expression).isRemote(); query_plans_stack.push_back(buildQueryPlanForTableExpression( table_expression, - parent_query_node, + join_tree_node, select_query_info, select_query_options, planner_context, diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.reference b/tests/queries/0_stateless/03254_pr_join_on_dups.reference index 95cb0d8cae2..58602bafb5d 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.reference +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.reference @@ -88,6 +88,34 @@ right subs 4 l5 \N 4 r6 nr6 4 l6 \N 4 r6 nr6 9 l9 \N 9 r9 nr9 +full +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 +full subs +0 \N 6 r7 nr7 +0 \N 7 r8 nr8 +1 l1 1 1 r1 \N +1 l1 1 1 r2 \N +2 l2 2 2 r3 \N +2 l3 3 2 r3 \N +3 l4 4 3 r4 \N +3 l4 4 3 r5 \N +4 l5 \N 4 r6 nr6 +4 l6 \N 4 r6 nr6 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 9 r9 nr9 self inner 1 l1 1 1 l1 1 2 l2 2 2 l2 2 @@ -199,3 +227,47 @@ self right nullable vs not nullable 3 l4 4 2 l3 3 4 l5 \N 3 l4 4 4 l6 \N 3 l4 4 +self full +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l2 2 2 l3 3 +2 l3 3 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 4 l5 \N +4 l5 \N 4 l6 \N +4 l6 \N 4 l5 \N +4 l6 \N 4 l6 \N +5 l7 \N 5 l7 \N +8 l8 \N 8 l8 \N +9 l9 \N 9 l9 \N +self full nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l3 3 +3 l4 4 3 l4 4 +4 l5 \N 0 \N +4 l6 \N 0 \N +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N +self full nullable vs not nullable +0 \N 4 l5 \N +0 \N 4 l6 \N +0 \N 5 l7 \N +0 \N 8 l8 \N +0 \N 9 l9 \N +1 l1 1 1 l1 1 +2 l2 2 2 l2 2 +2 l3 3 2 l2 2 +3 l4 4 2 l3 3 +4 l5 \N 3 l4 4 +4 l6 \N 3 l4 4 +5 l7 \N 0 \N +8 l8 \N 0 \N +9 l9 \N 0 \N diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 5f2f209d0b0..222f7693090 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -30,6 +30,11 @@ select X.*, Y.* from X right join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, select 'right subs'; select s.*, j.* from (select * from X) as s right join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +select 'full'; +select X.*, Y.* from X full join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'full subs'; +select s.*, j.* from (select * from X) as s full join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; + select 'self inner'; select X.*, s.* from X inner join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; select 'self inner nullable'; @@ -55,5 +60,12 @@ select X.*, s.* from X right join (select * from X) as s on X.x_b = s.x_b order select 'self right nullable vs not nullable'; select X.*, s.* from X right join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable'; +select X.*, s.* from X full join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable vs not nullable'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; + drop table X sync; drop table Y sync; From 8c5ab63345f385a75caa943f4b50169a13e3b470 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 4 Nov 2024 22:37:57 +0000 Subject: [PATCH 38/74] Cleanup --- src/Planner/Planner.cpp | 3 +-- src/Planner/Planner.h | 1 - src/Planner/PlannerJoinTree.cpp | 6 ------ src/Planner/findParallelReplicasQuery.cpp | 22 +--------------------- 4 files changed, 2 insertions(+), 30 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 4b5a2b903c0..17277dfe8cd 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -1263,7 +1263,6 @@ Planner::Planner(const QueryTreeNodePtr & query_tree_, findQueryForParallelReplicas(query_tree, select_query_options), findTableForParallelReplicas(query_tree, select_query_options), collectFiltersForAnalysis(query_tree, select_query_options)))) - // , root_planner(true) { } @@ -1538,7 +1537,7 @@ void Planner::buildPlanForQueryNode() JoinTreeQueryPlan join_tree_query_plan; if (planner_context->getMutableQueryContext()->canUseTaskBasedParallelReplicas() - && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node /* && !root_planner*/) + && planner_context->getGlobalPlannerContext()->parallel_replicas_node == &query_node) { join_tree_query_plan = buildQueryPlanForParallelReplicas(query_node, planner_context, select_query_info.storage_limits); } diff --git a/src/Planner/Planner.h b/src/Planner/Planner.h index 8d771c343c3..ae78f05cbd4 100644 --- a/src/Planner/Planner.h +++ b/src/Planner/Planner.h @@ -82,7 +82,6 @@ private: StorageLimitsList storage_limits; std::set used_row_policies; QueryNodeToPlanStepMapping query_node_to_plan_step_mapping; - // bool root_planner = false; }; } diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index c2acbd661c8..c1b8f999f22 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -669,12 +669,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres auto query_context = planner_context->getQueryContext(); const auto & settings = query_context->getSettingsRef(); - LOG_DEBUG( - getLogger(__PRETTY_FUNCTION__), - "pr_enabled={} table_expression:\n{}", - settings[Setting::allow_experimental_parallel_reading_from_replicas].toString(), - table_expression->dumpTree()); - auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression); QueryProcessingStage::Enum from_stage = QueryProcessingStage::Enum::FetchColumns; diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 314a7f06137..bda96f0c31f 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -265,18 +265,11 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr auto stack = getSupportingParallelReplicasQuery(query_tree_node.get()); /// Empty stack means that storage does not support parallel replicas. if (stack.empty()) - { - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found"); return nullptr; - } /// We don't have any subquery and storage can process parallel replicas by itself. if (stack.top() == query_tree_node.get()) - { - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query itself:\n{}", query_tree_node->dumpTree()); return nullptr; - // return query_node; - } /// This is needed to avoid infinite recursion. auto mutable_context = Context::createCopy(context); @@ -310,17 +303,11 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr new_stack.pop(); } } - // if (!res) - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not found 2"); - // else - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Query:\n{}", query_tree_node->dumpTree()); return res; } static const TableNode * findTableForParallelReplicas(const IQueryTreeNode * query_tree_node) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "\n{}", StackTrace().toString()); - std::stack join_nodes; while (query_tree_node || !join_nodes.empty()) { @@ -426,12 +413,7 @@ const TableNode * findTableForParallelReplicas(const QueryTreeNodePtr & query_tr if (!context->canUseParallelReplicasOnFollower()) return nullptr; - const auto * res = findTableForParallelReplicas(query_tree_node.get()); - // if (res) - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Table found {}", res->getStorageID().getFullTableName()); - // else - // LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Not table found"); - return res; + return findTableForParallelReplicas(query_tree_node.get()); } JoinTreeQueryPlan buildQueryPlanForParallelReplicas( @@ -439,8 +421,6 @@ JoinTreeQueryPlan buildQueryPlanForParallelReplicas( const PlannerContextPtr & planner_context, std::shared_ptr storage_limits) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "\n{}", StackTrace().toString()); - auto processed_stage = QueryProcessingStage::WithMergeableState; auto context = planner_context->getQueryContext(); From e198b205092dcb0bec14b8a3a08763cc68a4a1b9 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Tue, 29 Oct 2024 21:09:03 +0000 Subject: [PATCH 39/74] CI: Stateless Tests with praktika --- .github/workflows/pr.yaml | 168 ++++++++++++++++++-- ci/__init__.py | 0 ci/docker/stateless-test/Dockerfile | 107 +++++++++++++ ci/docker/stateless-test/requirements.txt | 5 + ci/jobs/__init__.py | 0 ci/jobs/build_clickhouse.py | 65 ++++++-- ci/jobs/fast_test.py | 117 +------------- ci/jobs/functional_stateless_tests.py | 119 +++++++++++++- ci/jobs/scripts/__init__.py | 0 ci/jobs/scripts/clickhouse_proc.py | 144 +++++++++++++++++ ci/jobs/scripts/functional_tests_results.py | 3 + ci/praktika/_settings.py | 3 + ci/praktika/hook_html.py | 110 ++++++++++--- ci/praktika/job.py | 46 +++++- ci/praktika/json.html | 156 +++++++++++------- ci/praktika/mangle.py | 1 - ci/praktika/native_jobs.py | 5 +- ci/praktika/param.py | 8 + ci/praktika/result.py | 19 ++- ci/praktika/runner.py | 20 ++- ci/praktika/s3.py | 2 +- ci/praktika/workflow.py | 1 + ci/praktika/yaml_generator.py | 3 + ci/settings/definitions.py | 38 +++-- ci/workflows/pull_request.py | 53 ++++-- tests/clickhouse-test | 11 +- tests/config/config.d/ssl_certs.xml | 4 +- tests/config/install.sh | 24 ++- tests/docker_scripts/setup_minio.sh | 28 ++-- 29 files changed, 955 insertions(+), 305 deletions(-) create mode 100644 ci/__init__.py create mode 100644 ci/docker/stateless-test/Dockerfile create mode 100644 ci/docker/stateless-test/requirements.txt create mode 100644 ci/jobs/__init__.py create mode 100644 ci/jobs/scripts/__init__.py create mode 100644 ci/jobs/scripts/clickhouse_proc.py create mode 100644 ci/praktika/param.py diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 34c794f6088..0c3f74aeac8 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -30,6 +30,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -68,6 +71,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -106,6 +112,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -144,6 +153,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -172,16 +184,19 @@ jobs: python3 -m praktika run --job '''Fast test''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - build_amd64_debug: + build_amd_debug: runs-on: [builder] needs: [config_workflow, docker_builds] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgYW1kNjQgZGVidWc=') }} - name: "Build amd64 debug" + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} + name: "Build (amd_debug)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -205,21 +220,24 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Build amd64 debug''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Build (amd_debug)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Build amd64 debug''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Build (amd_debug)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - stateless_tests_amd_debug: + build_amd_release: runs-on: [builder] - needs: [config_workflow, docker_builds, build_amd64_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKQ==') }} - name: "Stateless tests (amd, debug)" + needs: [config_workflow, docker_builds] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} + name: "Build (amd_release)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | @@ -243,14 +261,137 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd, debug)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Build (amd_release)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Stateless tests (amd, debug)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Build (amd_release)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_debug_parallel_1_2: + runs-on: [builder] + needs: [config_workflow, docker_builds, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAocGFyYWxsZWwgMS8yKQ==') }} + name: "Stateless tests (amd, debug) (parallel 1/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 1/2)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 1/2)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_debug_parallel_2_2: + runs-on: [builder] + needs: [config_workflow, docker_builds, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAocGFyYWxsZWwgMi8yKQ==') }} + name: "Stateless tests (amd, debug) (parallel 2/2)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 2/2)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 2/2)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_debug_non_parallel: + runs-on: [style-checker] + needs: [config_workflow, docker_builds, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAobm9uLXBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd, debug) (non-parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd, debug) (non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd, debug) (non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi finish_workflow: runs-on: [ci_services] - needs: [config_workflow, docker_builds, style_check, fast_test, build_amd64_debug, stateless_tests_amd_debug] + needs: [config_workflow, docker_builds, style_check, fast_test, build_amd_debug, build_amd_release, stateless_tests_amd_debug_parallel_1_2, stateless_tests_amd_debug_parallel_2_2, stateless_tests_amd_debug_non_parallel] if: ${{ !cancelled() }} name: "Finish Workflow" outputs: @@ -258,6 +399,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{ github.event.pull_reguest.head.sha }} - name: Prepare env script run: | diff --git a/ci/__init__.py b/ci/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ci/docker/stateless-test/Dockerfile b/ci/docker/stateless-test/Dockerfile new file mode 100644 index 00000000000..4abd8204f1d --- /dev/null +++ b/ci/docker/stateless-test/Dockerfile @@ -0,0 +1,107 @@ +# docker build -t clickhouse/stateless-test . +FROM ubuntu:22.04 + +# ARG for quick switch to a given ubuntu mirror +ARG apt_archive="http://archive.ubuntu.com" +RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list + +ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz" + +# moreutils - provides ts fo FT +# expect, bzip2 - requried by FT +# bsdmainutils - provides hexdump for FT + +# golang version 1.13 on Ubuntu 20 is enough for tests +RUN apt-get update -y \ + && env DEBIAN_FRONTEND=noninteractive \ + apt-get install --yes --no-install-recommends \ + awscli \ + brotli \ + lz4 \ + expect \ + moreutils \ + bzip2 \ + bsdmainutils \ + golang \ + lsof \ + mysql-client=8.0* \ + ncdu \ + netcat-openbsd \ + nodejs \ + npm \ + odbcinst \ + openjdk-11-jre-headless \ + openssl \ + postgresql-client \ + python3 \ + python3-pip \ + qemu-user-static \ + sqlite3 \ + sudo \ + tree \ + unixodbc \ + rustc \ + cargo \ + zstd \ + file \ + jq \ + pv \ + zip \ + unzip \ + p7zip-full \ + curl \ + wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* + +ARG PROTOC_VERSION=25.1 +RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip \ + && unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d /usr/local \ + && rm protoc-${PROTOC_VERSION}-linux-x86_64.zip + +COPY requirements.txt / +RUN pip3 install --no-cache-dir -r /requirements.txt + +RUN mkdir -p /tmp/clickhouse-odbc-tmp \ + && cd /tmp/clickhouse-odbc-tmp \ + && curl -L ${odbc_driver_url} | tar --strip-components=1 -xz clickhouse-odbc-1.1.6-Linux \ + && mkdir /usr/local/lib64 -p \ + && cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib64/ \ + && odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \ + && odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \ + && sed -i 's"=libclickhouseodbc"=/usr/local/lib64/libclickhouseodbc"' /etc/odbcinst.ini \ + && rm -rf /tmp/clickhouse-odbc-tmp + +ENV TZ=Europe/Amsterdam +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +ENV NUM_TRIES=1 + +# Unrelated to vars in setup_minio.sh, but should be the same there +# to have the same binaries for local running scenario +ARG MINIO_SERVER_VERSION=2024-08-03T04-33-23Z +ARG MINIO_CLIENT_VERSION=2024-07-31T15-58-33Z +ARG TARGETARCH + +# Download Minio-related binaries +RUN arch=${TARGETARCH:-amd64} \ + && curl -L "https://dl.min.io/server/minio/release/linux-${arch}/archive/minio.RELEASE.${MINIO_SERVER_VERSION}" -o /minio \ + && curl -L "https://dl.min.io/client/mc/release/linux-${arch}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -o /mc \ + && chmod +x /mc /minio + +ENV MINIO_ROOT_USER="clickhouse" +ENV MINIO_ROOT_PASSWORD="clickhouse" + +# for minio to work without root +RUN chmod 777 /home +ENV HOME="/home" +ENV TEMP_DIR="/tmp/praktika" +ENV PATH="/wd/tests:/tmp/praktika/input:$PATH" + +RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \ + && tar -xvf hadoop-3.3.1.tar.gz \ + && rm -rf hadoop-3.3.1.tar.gz + + +RUN npm install -g azurite@3.30.0 \ + && npm install -g tslib && npm install -g node diff --git a/ci/docker/stateless-test/requirements.txt b/ci/docker/stateless-test/requirements.txt new file mode 100644 index 00000000000..d556d23485f --- /dev/null +++ b/ci/docker/stateless-test/requirements.txt @@ -0,0 +1,5 @@ +Jinja2==3.1.3 +numpy==1.26.4 +requests==2.32.3 +pandas==1.5.3 +scipy==1.12.0 diff --git a/ci/jobs/__init__.py b/ci/jobs/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index cfa358b4059..3db88938f23 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -1,5 +1,6 @@ import argparse +from praktika.param import get_param from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils @@ -14,7 +15,9 @@ class JobStages(metaclass=MetaClasses.WithIter): def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") parser.add_argument( - "BUILD_TYPE", help="Type: __" + "--build-type", + help="Type: __", + default=None, ) parser.add_argument( "--param", @@ -24,6 +27,18 @@ def parse_args(): return parser.parse_args() +CMAKE_CMD = """cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA \ +-DCMAKE_BUILD_TYPE={BUILD_TYPE} \ +-DSANITIZE={SANITIZER} \ +-DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DENABLE_TESTS=0 \ +-DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr \ +-DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON \ +{AUX_DEFS} \ +-DCMAKE_C_COMPILER=clang-18 -DCMAKE_CXX_COMPILER=clang++-18 \ +-DCOMPILER_CACHE={CACHE_TYPE} \ +-DENABLE_BUILD_PROFILING=1 {DIR}""" + + def main(): args = parse_args() @@ -42,20 +57,45 @@ def main(): cmake_build_type = "Release" sanitizer = "" - if "debug" in args.BUILD_TYPE.lower(): - print("Build type set: debug") - cmake_build_type = "Debug" + if args.build_type and get_param(): + assert ( + False + ), "Build type must provided via job parameter (CI case) or via --build-type input argument not both" - if "asan" in args.BUILD_TYPE.lower(): - print("Sanitizer set: address") - sanitizer = "address" + build_type = args.build_type or get_param() + assert ( + build_type + ), "build_type must be provided either as input argument or as a parameter of parametrized job in CI" + build_type = build_type.lower() # if Environment.is_local_run(): # build_cache_type = "disabled" # else: - build_cache_type = "sccache" + CACHE_TYPE = "sccache" + + if "debug" in build_type: + print("Build type set: debug") + BUILD_TYPE = "Debug" + AUX_DEFS = " -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1 " + elif "release" in build_type: + print("Build type set: release") + BUILD_TYPE = "None" + AUX_DEFS = " -DENABLE_TESTS=1 " + + if "asan" in build_type: + print("Sanitizer set: address") + SANITIZER = "address" + else: + SANITIZER = "" + + cmake_cmd = CMAKE_CMD.format( + BUILD_TYPE=BUILD_TYPE, + CACHE_TYPE=CACHE_TYPE, + SANITIZER=SANITIZER, + AUX_DEFS=AUX_DEFS, + DIR=Utils.cwd(), + ) - current_directory = Utils.cwd() build_dir = f"{Settings.TEMP_DIR}/build" res = True @@ -75,12 +115,7 @@ def main(): results.append( Result.create_from_command_execution( name="Cmake configuration", - command=f"cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE={cmake_build_type} \ - -DSANITIZE={sanitizer} -DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DENABLE_TESTS=0 \ - -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr \ - -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON \ - -DCMAKE_C_COMPILER=clang-18 -DCMAKE_CXX_COMPILER=clang++-18 -DCOMPILER_CACHE={build_cache_type} -DENABLE_TESTS=1 \ - -DENABLE_BUILD_PROFILING=1 {current_directory}", + command=cmake_cmd, workdir=build_dir, with_log=True, ) diff --git a/ci/jobs/fast_test.py b/ci/jobs/fast_test.py index dc5e1c975a6..cb7d925fead 100644 --- a/ci/jobs/fast_test.py +++ b/ci/jobs/fast_test.py @@ -1,120 +1,13 @@ import argparse -import threading -from pathlib import Path from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils +from ci.jobs.scripts.clickhouse_proc import ClickHouseProc from ci.jobs.scripts.functional_tests_results import FTResultsProcessor -class ClickHouseProc: - def __init__(self): - self.ch_config_dir = f"{Settings.TEMP_DIR}/etc/clickhouse-server" - self.pid_file = f"{self.ch_config_dir}/clickhouse-server.pid" - self.config_file = f"{self.ch_config_dir}/config.xml" - self.user_files_path = f"{self.ch_config_dir}/user_files" - self.test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt" - self.command = f"clickhouse-server --config-file {self.config_file} --pid-file {self.pid_file} -- --path {self.ch_config_dir} --user_files_path {self.user_files_path} --top_level_domains_path {self.ch_config_dir}/top_level_domains --keeper_server.storage_path {self.ch_config_dir}/coordination" - self.proc = None - self.pid = 0 - nproc = int(Utils.cpu_count() / 2) - self.fast_test_command = f"clickhouse-test --hung-check --fast-tests-only --no-random-settings --no-random-merge-tree-settings --no-long --testname --shard --zookeeper --check-zookeeper-session --order random --print-time --report-logs-stats --jobs {nproc} -- '' | ts '%Y-%m-%d %H:%M:%S' \ - | tee -a \"{self.test_output_file}\"" - # TODO: store info in case of failure - self.info = "" - self.info_file = "" - - Utils.set_env("CLICKHOUSE_CONFIG_DIR", self.ch_config_dir) - Utils.set_env("CLICKHOUSE_CONFIG", self.config_file) - Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path) - Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas") - - def start(self): - print("Starting ClickHouse server") - Shell.check(f"rm {self.pid_file}") - - def run_clickhouse(): - self.proc = Shell.run_async( - self.command, verbose=True, suppress_output=True - ) - - thread = threading.Thread(target=run_clickhouse) - thread.daemon = True # Allow program to exit even if thread is still running - thread.start() - - # self.proc = Shell.run_async(self.command, verbose=True) - - started = False - try: - for _ in range(5): - pid = Shell.get_output(f"cat {self.pid_file}").strip() - if not pid: - Utils.sleep(1) - continue - started = True - print(f"Got pid from fs [{pid}]") - _ = int(pid) - break - except Exception: - pass - - if not started: - stdout = self.proc.stdout.read().strip() if self.proc.stdout else "" - stderr = self.proc.stderr.read().strip() if self.proc.stderr else "" - Utils.print_formatted_error("Failed to start ClickHouse", stdout, stderr) - return False - - print(f"ClickHouse server started successfully, pid [{pid}]") - return True - - def wait_ready(self): - res, out, err = 0, "", "" - attempts = 30 - delay = 2 - for attempt in range(attempts): - res, out, err = Shell.get_res_stdout_stderr( - 'clickhouse-client --query "select 1"', verbose=True - ) - if out.strip() == "1": - print("Server ready") - break - else: - print(f"Server not ready, wait") - Utils.sleep(delay) - else: - Utils.print_formatted_error( - f"Server not ready after [{attempts*delay}s]", out, err - ) - return False - return True - - def run_fast_test(self): - if Path(self.test_output_file).exists(): - Path(self.test_output_file).unlink() - exit_code = Shell.run(self.fast_test_command) - return exit_code == 0 - - def terminate(self): - print("Terminate ClickHouse process") - timeout = 10 - if self.proc: - Utils.terminate_process_group(self.proc.pid) - - self.proc.terminate() - try: - self.proc.wait(timeout=10) - print(f"Process {self.proc.pid} terminated gracefully.") - except Exception: - print( - f"Process {self.proc.pid} did not terminate in {timeout} seconds, killing it..." - ) - Utils.terminate_process_group(self.proc.pid, force=True) - self.proc.wait() # Wait for the process to be fully killed - print(f"Process {self.proc} was killed.") - - def clone_submodules(): submodules_to_update = [ "contrib/sysroot", @@ -240,7 +133,7 @@ def main(): Shell.check(f"rm -rf {build_dir} && mkdir -p {build_dir}") results.append( Result.create_from_command_execution( - name="Checkout Submodules for Minimal Build", + name="Checkout Submodules", command=clone_submodules, ) ) @@ -295,8 +188,8 @@ def main(): if res and JobStages.CONFIG in stages: commands = [ f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server", - f"cp {current_directory}/programs/server/config.xml {current_directory}/programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", - f"{current_directory}/tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client", + f"cp ./programs/server/config.xml ./programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", + f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --fast-test", # f"cp -a {current_directory}/programs/server/config.d/log_to_console.xml {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/", f"rm -f {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/secure_ports.xml", update_path_ch_config, @@ -310,7 +203,7 @@ def main(): ) res = results[-1].is_ok() - CH = ClickHouseProc() + CH = ClickHouseProc(fast_test=True) if res and JobStages.TEST in stages: stop_watch_ = Utils.Stopwatch() step_name = "Start ClickHouse Server" diff --git a/ci/jobs/functional_stateless_tests.py b/ci/jobs/functional_stateless_tests.py index dfdd5821a19..d77522ed73a 100644 --- a/ci/jobs/functional_stateless_tests.py +++ b/ci/jobs/functional_stateless_tests.py @@ -1,31 +1,78 @@ import argparse +import os +from pathlib import Path +from praktika.param import get_param from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils +from ci.jobs.scripts.clickhouse_proc import ClickHouseProc +from ci.jobs.scripts.functional_tests_results import FTResultsProcessor +from ci.settings.definitions import azure_secret + class JobStages(metaclass=MetaClasses.WithIter): - CHECKOUT_SUBMODULES = "checkout" - CMAKE = "cmake" - BUILD = "build" + INSTALL_CLICKHOUSE = "install" + START = "start" + TEST = "test" def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") - parser.add_argument("BUILD_TYPE", help="Type: ") + parser.add_argument( + "BUILD_TYPE", help="Type: __" + ) parser.add_argument("--param", help="Optional custom job start stage", default=None) return parser.parse_args() +def run_stateless_test( + no_parallel: bool, no_sequiential: bool, batch_num: int, batch_total: int +): + assert not (no_parallel and no_sequiential) + test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt" + aux = "" + nproc = int(Utils.cpu_count() / 2) + if batch_num and batch_total: + aux = f"--run-by-hash-total {batch_total} --run-by-hash-num {batch_num-1}" + statless_test_command = f"clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \ + --no-drop-if-fail --capture-client-stacktrace --queries /repo/tests/queries --test-runs 1 --hung-check \ + {'--no-parallel' if no_parallel else ''} {'--no-sequential' if no_sequiential else ''} \ + --print-time --jobs {nproc} --report-coverage --report-logs-stats {aux} \ + --queries ./tests/queries -- '' | ts '%Y-%m-%d %H:%M:%S' \ + | tee -a \"{test_output_file}\"" + if Path(test_output_file).exists(): + Path(test_output_file).unlink() + Shell.run(statless_test_command, verbose=True) + + def main(): args = parse_args() + params = get_param().split(" ") + parallel_or_sequential = None + no_parallel = False + no_sequential = False + if params: + parallel_or_sequential = params[0] + if len(params) > 1: + batch_num, total_batches = map(int, params[1].split("/")) + else: + batch_num, total_batches = 0, 0 + if parallel_or_sequential: + no_parallel = parallel_or_sequential == "non-parallel" + no_sequential = parallel_or_sequential == "parallel" + + os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output( + f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value", + verbose=True, + ) stop_watch = Utils.Stopwatch() stages = list(JobStages) - stage = args.param or JobStages.CHECKOUT_SUBMODULES + stage = args.param or JobStages.INSTALL_CLICKHOUSE if stage: assert stage in JobStages, f"--param must be one of [{list(JobStages)}]" print(f"Job will start from stage [{stage}]") @@ -36,9 +83,65 @@ def main(): res = True results = [] - if res and JobStages.CHECKOUT_SUBMODULES in stages: - info = Shell.get_output(f"ls -l {Settings.INPUT_DIR}") - results.append(Result(name="TEST", status=Result.Status.SUCCESS, info=info)) + Utils.add_to_PATH(f"{Settings.INPUT_DIR}:tests") + + if res and JobStages.INSTALL_CLICKHOUSE in stages: + commands = [ + f"chmod +x {Settings.INPUT_DIR}/clickhouse", + f"ln -sf {Settings.INPUT_DIR}/clickhouse {Settings.INPUT_DIR}/clickhouse-server", + f"ln -sf {Settings.INPUT_DIR}/clickhouse {Settings.INPUT_DIR}/clickhouse-client", + f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server", + f"cp programs/server/config.xml programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", + f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --s3-storage", + # update_path_ch_config, + f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml", + f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml", + f"clickhouse-server --version", + ] + results.append( + Result.create_from_command_execution( + name="Install ClickHouse", command=commands, with_log=True + ) + ) + res = results[-1].is_ok() + + CH = ClickHouseProc() + if res and JobStages.START in stages: + stop_watch_ = Utils.Stopwatch() + step_name = "Start ClickHouse Server" + print(step_name) + res = res and CH.start_minio() + res = res and CH.start() + res = res and CH.wait_ready() + results.append( + Result.create_from( + name=step_name, + status=res, + stopwatch=stop_watch_, + files=( + [ + "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.log", + "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.err.log", + ] + if not res + else [] + ), + ) + ) + res = results[-1].is_ok() + + if res and JobStages.TEST in stages: + stop_watch_ = Utils.Stopwatch() + step_name = "Tests" + print(step_name) + run_stateless_test( + no_parallel=no_parallel, + no_sequiential=no_sequential, + batch_num=batch_num, + batch_total=total_batches, + ) + results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run()) + results[-1].set_timing(stopwatch=stop_watch_) res = results[-1].is_ok() Result.create_from(results=results, stopwatch=stop_watch).complete_job() diff --git a/ci/jobs/scripts/__init__.py b/ci/jobs/scripts/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ci/jobs/scripts/clickhouse_proc.py b/ci/jobs/scripts/clickhouse_proc.py new file mode 100644 index 00000000000..cc822eab693 --- /dev/null +++ b/ci/jobs/scripts/clickhouse_proc.py @@ -0,0 +1,144 @@ +import threading +import time +from pathlib import Path + +from praktika.settings import Settings +from praktika.utils import Shell, Utils + + +class ClickHouseProc: + BACKUPS_XML = """ + + + local + {CH_RUNTIME_DIR}/var/lib/clickhouse/disks/backups/ + + +""" + + def __init__(self, fast_test=False): + self.ch_config_dir = f"{Settings.TEMP_DIR}/etc/clickhouse-server" + self.pid_file = f"{self.ch_config_dir}/clickhouse-server.pid" + self.config_file = f"{self.ch_config_dir}/config.xml" + self.user_files_path = f"{self.ch_config_dir}/user_files" + self.test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt" + self.command = f"clickhouse-server --config-file {self.config_file} --pid-file {self.pid_file} -- --path {self.ch_config_dir} --user_files_path {self.user_files_path} --top_level_domains_path {self.ch_config_dir}/top_level_domains --keeper_server.storage_path {self.ch_config_dir}/coordination" + self.proc = None + self.pid = 0 + nproc = int(Utils.cpu_count() / 2) + self.fast_test_command = f"clickhouse-test --hung-check --fast-tests-only --no-random-settings --no-random-merge-tree-settings --no-long --testname --shard --zookeeper --check-zookeeper-session --order random --print-time --report-logs-stats --jobs {nproc} -- '' | ts '%Y-%m-%d %H:%M:%S' \ + | tee -a \"{self.test_output_file}\"" + # TODO: store info in case of failure + self.info = "" + self.info_file = "" + + self.minio_cmd = f"tests/docker_scripts/setup_minio.sh stateless 2>&1 > {Settings.OUTPUT_DIR}/minio.log" + + Utils.set_env("CLICKHOUSE_CONFIG_DIR", self.ch_config_dir) + Utils.set_env("CLICKHOUSE_CONFIG", self.config_file) + Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path) + Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas") + + if not fast_test: + with open(f"{self.ch_config_dir}/config.d/backups.xml", "w") as file: + file.write(self.BACKUPS_XML) + + self.minio_proc = None + + def start_minio(self): + print("Starting minio") + + def run_minio(): + self.minio_proc = Shell.run_async( + self.minio_cmd, verbose=True, suppress_output=True + ) + + thread = threading.Thread(target=run_minio) + thread.daemon = True # Allow program to exit even if thread is still running + thread.start() + time.sleep(5) + return thread.is_alive() + + def start(self): + print("Starting ClickHouse server") + Shell.check(f"rm {self.pid_file}") + + def run_clickhouse(): + self.proc = Shell.run_async( + self.command, verbose=True, suppress_output=False + ) + + thread = threading.Thread(target=run_clickhouse) + thread.daemon = True # Allow program to exit even if thread is still running + thread.start() + + started = False + try: + for _ in range(5): + pid = Shell.get_output(f"cat {self.pid_file}").strip() + if not pid: + Utils.sleep(1) + continue + started = True + print(f"Got pid from fs [{pid}]") + _ = int(pid) + break + except Exception: + pass + + if not started: + stdout = self.proc.stdout.read().strip() if self.proc.stdout else "" + stderr = self.proc.stderr.read().strip() if self.proc.stderr else "" + Utils.print_formatted_error("Failed to start ClickHouse", stdout, stderr) + return False + + print(f"ClickHouse server started successfully, pid [{pid}]") + return True + + def wait_ready(self): + res, out, err = 0, "", "" + attempts = 30 + delay = 2 + for attempt in range(attempts): + res, out, err = Shell.get_res_stdout_stderr( + 'clickhouse-client --query "select 1"', verbose=True + ) + if out.strip() == "1": + print("Server ready") + break + else: + print(f"Server not ready, wait") + Utils.sleep(delay) + else: + Utils.print_formatted_error( + f"Server not ready after [{attempts*delay}s]", out, err + ) + return False + return True + + def run_fast_test(self): + if Path(self.test_output_file).exists(): + Path(self.test_output_file).unlink() + exit_code = Shell.run(self.fast_test_command) + return exit_code == 0 + + def terminate(self): + print("Terminate ClickHouse process") + timeout = 10 + if self.proc: + Utils.terminate_process_group(self.proc.pid) + + self.proc.terminate() + try: + self.proc.wait(timeout=10) + print(f"Process {self.proc.pid} terminated gracefully.") + except Exception: + print( + f"Process {self.proc.pid} did not terminate in {timeout} seconds, killing it..." + ) + Utils.terminate_process_group(self.proc.pid, force=True) + self.proc.wait() # Wait for the process to be fully killed + print(f"Process {self.proc} was killed.") + + if self.minio_proc: + Utils.terminate_process_group(self.minio_proc.pid) diff --git a/ci/jobs/scripts/functional_tests_results.py b/ci/jobs/scripts/functional_tests_results.py index aba3e4f7f5b..06989fb0a44 100755 --- a/ci/jobs/scripts/functional_tests_results.py +++ b/ci/jobs/scripts/functional_tests_results.py @@ -232,6 +232,8 @@ class FTResultsProcessor: else: pass + info = f"Total: {s.total - s.skipped}, Failed: {s.failed}" + # TODO: !!! # def test_result_comparator(item): # # sort by status then by check name @@ -253,6 +255,7 @@ class FTResultsProcessor: results=test_results, status=state, files=[self.tests_output_file], + info=info, with_info_from_results=False, ) diff --git a/ci/praktika/_settings.py b/ci/praktika/_settings.py index 1777257f484..17da1519e37 100644 --- a/ci/praktika/_settings.py +++ b/ci/praktika/_settings.py @@ -80,6 +80,8 @@ class _Settings: CI_DB_TABLE_NAME = "" CI_DB_INSERT_TIMEOUT_SEC = 5 + DISABLE_MERGE_COMMIT = True + _USER_DEFINED_SETTINGS = [ "S3_ARTIFACT_PATH", @@ -112,6 +114,7 @@ _USER_DEFINED_SETTINGS = [ "SECRET_GH_APP_PEM_KEY", "SECRET_GH_APP_ID", "MAIN_BRANCH", + "DISABLE_MERGE_COMMIT", ] diff --git a/ci/praktika/hook_html.py b/ci/praktika/hook_html.py index f4bd4435511..cea84192d0d 100644 --- a/ci/praktika/hook_html.py +++ b/ci/praktika/hook_html.py @@ -11,50 +11,112 @@ from praktika.result import Result, ResultInfo from praktika.runtime import RunConfig from praktika.s3 import S3 from praktika.settings import Settings -from praktika.utils import Shell, Utils +from praktika.utils import Utils @dataclasses.dataclass class GitCommit: - date: str - message: str + # date: str + # message: str sha: str @staticmethod - def from_json(json_data: str) -> List["GitCommit"]: + def from_json(file) -> List["GitCommit"]: commits = [] + json_data = None try: - data = json.loads(json_data) - + with open(file, "r", encoding="utf-8") as f: + json_data = json.load(f) commits = [ GitCommit( - message=commit["messageHeadline"], - sha=commit["oid"], - date=commit["committedDate"], + # message=commit["messageHeadline"], + sha=commit["sha"], + # date=commit["committedDate"], ) - for commit in data.get("commits", []) + for commit in json_data ] except Exception as e: print( - f"ERROR: Failed to deserialize commit's data: [{json_data}], ex: [{e}]" + f"ERROR: Failed to deserialize commit's data [{json_data}], ex: [{e}]" ) return commits + @classmethod + def update_s3_data(cls): + env = _Environment.get() + sha = env.SHA + if not sha: + print("WARNING: Failed to retrieve commit sha") + return + commits = cls.pull_from_s3() + for commit in commits: + if sha == commit.sha: + print( + f"INFO: Sha already present in commits data [{sha}] - skip data update" + ) + return + commits.append(GitCommit(sha=sha)) + cls.push_to_s3(commits) + return + + @classmethod + def dump(cls, commits): + commits_ = [] + for commit in commits: + commits_.append(dataclasses.asdict(commit)) + with open(cls.file_name(), "w", encoding="utf8") as f: + json.dump(commits_, f) + + @classmethod + def pull_from_s3(cls): + local_path = Path(cls.file_name()) + file_name = local_path.name + env = _Environment.get() + s3_path = f"{Settings.HTML_S3_PATH}/{cls.get_s3_prefix(pr_number=env.PR_NUMBER, branch=env.BRANCH)}/{file_name}" + if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path): + print(f"WARNING: failed to cp file [{s3_path}] from s3") + return [] + return cls.from_json(local_path) + + @classmethod + def push_to_s3(cls, commits): + print(f"INFO: push commits data to s3, commits num [{len(commits)}]") + cls.dump(commits) + local_path = Path(cls.file_name()) + file_name = local_path.name + env = _Environment.get() + s3_path = f"{Settings.HTML_S3_PATH}/{cls.get_s3_prefix(pr_number=env.PR_NUMBER, branch=env.BRANCH)}/{file_name}" + if not S3.copy_file_to_s3(s3_path=s3_path, local_path=local_path, text=True): + print(f"WARNING: failed to cp file [{local_path}] to s3") + + @classmethod + def get_s3_prefix(cls, pr_number, branch): + prefix = "" + assert pr_number or branch + if pr_number and pr_number > 0: + prefix += f"{pr_number}" + else: + prefix += f"{branch}" + return prefix + + @classmethod + def file_name(cls): + return f"{Settings.TEMP_DIR}/commits.json" + + # def _get_pr_commits(pr_number): + # res = [] + # if not pr_number: + # return res + # output = Shell.get_output(f"gh pr view {pr_number} --json commits") + # if output: + # res = GitCommit.from_json(output) + # return res + class HtmlRunnerHooks: @classmethod def configure(cls, _workflow): - - def _get_pr_commits(pr_number): - res = [] - if not pr_number: - return res - output = Shell.get_output(f"gh pr view {pr_number} --json commits") - if output: - res = GitCommit.from_json(output) - return res - # generate pending Results for all jobs in the workflow if _workflow.enable_cache: skip_jobs = RunConfig.from_fs(_workflow.name).cache_success @@ -106,11 +168,9 @@ class HtmlRunnerHooks: Utils.raise_with_error( "Failed to set both GH commit status and PR comment with Workflow Status, cannot proceed" ) - if env.PR_NUMBER: - commits = _get_pr_commits(env.PR_NUMBER) - # TODO: upload commits data to s3 to visualise it on a report page - print(commits) + # TODO: enable for branch, add commit number limiting + GitCommit.update_s3_data() @classmethod def pre_run(cls, _workflow, _job): diff --git a/ci/praktika/job.py b/ci/praktika/job.py index d0d4232cfa2..99eb08938b8 100644 --- a/ci/praktika/job.py +++ b/ci/praktika/job.py @@ -52,30 +52,57 @@ class Job: self, parameter: Optional[List[Any]] = None, runs_on: Optional[List[List[str]]] = None, + provides: Optional[List[List[str]]] = None, + requires: Optional[List[List[str]]] = None, timeout: Optional[List[int]] = None, ): assert ( parameter or runs_on ), "Either :parameter or :runs_on must be non empty list for parametrisation" + if runs_on: + assert isinstance(runs_on, list) and isinstance(runs_on[0], list) if not parameter: parameter = [None] * len(runs_on) if not runs_on: runs_on = [None] * len(parameter) if not timeout: timeout = [None] * len(parameter) + if not provides: + provides = [None] * len(parameter) + if not requires: + requires = [None] * len(parameter) assert ( - len(parameter) == len(runs_on) == len(timeout) - ), "Parametrization lists must be of the same size" + len(parameter) + == len(runs_on) + == len(timeout) + == len(provides) + == len(requires) + ), f"Parametrization lists must be of the same size [{len(parameter)}, {len(runs_on)}, {len(timeout)}, {len(provides)}, {len(requires)}]" res = [] - for parameter_, runs_on_, timeout_ in zip(parameter, runs_on, timeout): + for parameter_, runs_on_, timeout_, provides_, requires_ in zip( + parameter, runs_on, timeout, provides, requires + ): obj = copy.deepcopy(self) + assert ( + not obj.provides + ), "Job.Config.provides must be empty for parametrized jobs" if parameter_: obj.parameter = parameter_ if runs_on_: obj.runs_on = runs_on_ if timeout_: obj.timeout = timeout_ + if provides_: + assert ( + not obj.provides + ), "Job.Config.provides must be empty for parametrized jobs" + obj.provides = provides_ + if requires_: + assert ( + not obj.requires + ), "Job.Config.requires and parametrize(requires=...) are both set" + obj.requires = requires_ obj.name = obj.get_job_name_with_parameter() res.append(obj) return res @@ -84,13 +111,16 @@ class Job: name, parameter, runs_on = self.name, self.parameter, self.runs_on res = name name_params = [] - if isinstance(parameter, list) or isinstance(parameter, dict): - name_params.append(json.dumps(parameter)) - elif parameter is not None: - name_params.append(parameter) - if runs_on: + if parameter: + if isinstance(parameter, list) or isinstance(parameter, dict): + name_params.append(json.dumps(parameter)) + else: + name_params.append(parameter) + elif runs_on: assert isinstance(runs_on, list) name_params.append(json.dumps(runs_on)) + else: + assert False if name_params: name_params = [str(param) for param in name_params] res += f" ({', '.join(name_params)})" diff --git a/ci/praktika/json.html b/ci/praktika/json.html index af03ed702f8..f86a7b27ecb 100644 --- a/ci/praktika/json.html +++ b/ci/praktika/json.html @@ -200,10 +200,7 @@ } th.name-column, td.name-column { - max-width: 400px; /* Set the maximum width for the column */ - white-space: nowrap; /* Prevent text from wrapping */ - overflow: hidden; /* Hide the overflowed text */ - text-overflow: ellipsis; /* Show ellipsis (...) for overflowed text */ + min-width: 350px; } th.status-column, td.status-column { @@ -364,7 +361,6 @@ } function addKeyValueToStatus(key, value, options = null) { - const statusContainer = document.getElementById('status-container'); let keyValuePair = document.createElement('div'); @@ -374,27 +370,40 @@ keyElement.className = 'json-key'; keyElement.textContent = key + ':'; - let valueElement - if (value) { - valueElement = document.createElement('div'); - valueElement.className = 'json-value'; - valueElement.textContent = value; - } else if (options) { + let valueElement; + + if (options) { + // Create dropdown if options are provided valueElement = document.createElement('select'); valueElement.className = 'dropdown-value'; + + options.forEach(optionValue => { + const option = document.createElement('option'); + option.value = optionValue; + option.textContent = optionValue.slice(0, 10); + + // Set the initially selected option + if (optionValue === value) { + option.selected = true; + } + + valueElement.appendChild(option); + }); + + // Update the URL parameter when the selected value changes valueElement.addEventListener('change', (event) => { const selectedValue = event.target.value; updateUrlParameter(key, selectedValue); }); - options.forEach(optionValue => { - const option = document.createElement('option'); - option.value = optionValue; - option.textContent = optionValue; - valueElement.appendChild(option); - }); + } else { + // Create a simple text display if no options are provided + valueElement = document.createElement('div'); + valueElement.className = 'json-value'; + valueElement.textContent = value || 'N/A'; // Display 'N/A' if value is null } - keyValuePair.appendChild(keyElement) - keyValuePair.appendChild(valueElement) + + keyValuePair.appendChild(keyElement); + keyValuePair.appendChild(valueElement); statusContainer.appendChild(keyValuePair); } @@ -518,12 +527,12 @@ const columns = ['name', 'status', 'start_time', 'duration', 'info']; const columnSymbols = { - name: '📂', - status: '⏯️', + name: '🗂️', + status: '🧾', start_time: '🕒', duration: '⏳', - info: 'ℹ️', - files: '📄' + info: '📝', + files: '📎' }; function createResultsTable(results, nest_level) { @@ -532,16 +541,14 @@ const thead = document.createElement('thead'); const tbody = document.createElement('tbody'); - // Get the current URL parameters - const currentUrl = new URL(window.location.href); - // Create table headers based on the fixed columns const headerRow = document.createElement('tr'); columns.forEach(column => { const th = document.createElement('th'); - th.textContent = th.textContent = columnSymbols[column] || column; + th.textContent = columnSymbols[column] || column; th.style.cursor = 'pointer'; // Make headers clickable - th.addEventListener('click', () => sortTable(results, column, tbody, nest_level)); // Add click event to sort the table + th.setAttribute('data-sort-direction', 'asc'); // Default sort direction + th.addEventListener('click', () => sortTable(results, column, columnSymbols[column] || column, tbody, nest_level, columns)); // Add click event to sort the table headerRow.appendChild(th); }); thead.appendChild(headerRow); @@ -605,39 +612,33 @@ }); } - function sortTable(results, key, tbody, nest_level) { + function sortTable(results, column, key, tbody, nest_level, columns) { // Find the table header element for the given key - let th = null; - const tableHeaders = document.querySelectorAll('th'); // Select all table headers - tableHeaders.forEach(header => { - if (header.textContent.trim().toLowerCase() === key.toLowerCase()) { - th = header; - } - }); + const tableHeaders = document.querySelectorAll('th'); + let th = Array.from(tableHeaders).find(header => header.textContent === key); if (!th) { console.error(`No table header found for key: ${key}`); return; } - // Determine the current sort direction - let ascending = th.getAttribute('data-sort-direction') === 'asc' ? false : true; + const ascending = th.getAttribute('data-sort-direction') === 'asc'; + th.setAttribute('data-sort-direction', ascending ? 'desc' : 'asc'); - // Toggle the sort direction for the next click - th.setAttribute('data-sort-direction', ascending ? 'asc' : 'desc'); - - // Sort the results array by the given key results.sort((a, b) => { - if (a[key] < b[key]) return ascending ? -1 : 1; - if (a[key] > b[key]) return ascending ? 1 : -1; + if (a[column] < b[column]) return ascending ? -1 : 1; + if (a[column] > b[column]) return ascending ? 1 : -1; return 0; }); + // Clear the existing rows in tbody + tbody.innerHTML = ''; + // Re-populate the table with sorted data populateTableRows(tbody, results, columns, nest_level); } - function loadJSON(PR, sha, nameParams) { + function loadResultsJSON(PR, sha, nameParams) { const infoElement = document.getElementById('info-container'); let lastModifiedTime = null; const task = nameParams[0].toLowerCase(); @@ -753,22 +754,61 @@ } }); - if (PR) { - addKeyValueToStatus("PR", PR) - } else { - console.error("TODO") - } - addKeyValueToStatus("sha", null, [sha, 'lala']); - if (nameParams[1]) { - addKeyValueToStatus("job", nameParams[1]); - } - addKeyValueToStatus("workflow", nameParams[0]); + let path_commits_json = ''; + let commitsArray = []; - if (PR && sha && root_name) { - loadJSON(PR, sha, nameParams); + if (PR) { + addKeyValueToStatus("PR", PR); + const baseUrl = window.location.origin + window.location.pathname.replace('/json.html', ''); + path_commits_json = `${baseUrl}/${encodeURIComponent(PR)}/commits.json`; } else { - document.getElementById('title').textContent = 'Error: Missing required URL parameters: PR, sha, or name_0'; + // Placeholder for a different path when PR is missing + console.error("PR parameter is missing. Setting alternate commits path."); + path_commits_json = '/path/to/alternative/commits.json'; } + + function loadCommitsArray(path) { + return fetch(path, { cache: "no-cache" }) + .then(response => { + if (!response.ok) { + console.error(`HTTP error! status: ${response.status}`) + return []; + } + return response.json(); + }) + .then(data => { + if (Array.isArray(data) && data.every(item => typeof item === 'object' && item.hasOwnProperty('sha'))) { + return data.map(item => item.sha); + } else { + throw new Error('Invalid data format: expected array of objects with a "sha" key'); + } + }) + .catch(error => { + console.error('Error loading commits JSON:', error); + return []; // Return an empty array if an error occurs + }); + } + + loadCommitsArray(path_commits_json) + .then(data => { + commitsArray = data; + }) + .finally(() => { + // Proceed with the rest of the initialization + addKeyValueToStatus("sha", sha || "latest", commitsArray.concat(["latest"])); + + if (nameParams[1]) { + addKeyValueToStatus("job", nameParams[1]); + } + addKeyValueToStatus("workflow", nameParams[0]); + + // Check if all required parameters are present to load JSON + if (PR && sha && root_name) { + loadResultsJSON(PR, sha, nameParams); + } else { + document.getElementById('title').textContent = 'Error: Missing required URL parameters: PR, sha, or name_0'; + } + }); } window.onload = init; diff --git a/ci/praktika/mangle.py b/ci/praktika/mangle.py index 89fc52cf849..bca33f9e660 100644 --- a/ci/praktika/mangle.py +++ b/ci/praktika/mangle.py @@ -58,7 +58,6 @@ def _update_workflow_artifacts(workflow): artifact_job = {} for job in workflow.jobs: for artifact_name in job.provides: - assert artifact_name not in artifact_job artifact_job[artifact_name] = job.name for artifact in workflow.artifacts: artifact._provided_by = artifact_job[artifact.name] diff --git a/ci/praktika/native_jobs.py b/ci/praktika/native_jobs.py index f7fd4ca190b..16ffa9056e9 100644 --- a/ci/praktika/native_jobs.py +++ b/ci/praktika/native_jobs.py @@ -151,7 +151,7 @@ def _config_workflow(workflow: Workflow.Config, job_name): status = Result.Status.ERROR print("ERROR: ", info) else: - Shell.check(f"{Settings.PYTHON_INTERPRETER} -m praktika --generate") + assert Shell.check(f"{Settings.PYTHON_INTERPRETER} -m praktika yaml") exit_code, output, err = Shell.get_res_stdout_stderr( f"git diff-index HEAD -- {Settings.WORKFLOW_PATH_PREFIX}" ) @@ -250,6 +250,9 @@ def _config_workflow(workflow: Workflow.Config, job_name): info_lines.append(job_name + ": " + info) results.append(result_) + if workflow.enable_merge_commit: + assert False, "NOT implemented" + # config: if workflow.dockers: print("Calculate docker's digests") diff --git a/ci/praktika/param.py b/ci/praktika/param.py new file mode 100644 index 00000000000..f5727198e0d --- /dev/null +++ b/ci/praktika/param.py @@ -0,0 +1,8 @@ +from praktika._environment import _Environment + + +# TODO: find better place and/or right storage for parameter +def get_param(): + env = _Environment.get() + assert env.PARAMETER + return env.PARAMETER diff --git a/ci/praktika/result.py b/ci/praktika/result.py index 2ba8309ad60..f473cf3ed05 100644 --- a/ci/praktika/result.py +++ b/ci/praktika/result.py @@ -1,7 +1,6 @@ import dataclasses import datetime import sys -from collections.abc import Container from pathlib import Path from typing import Any, Dict, List, Optional @@ -68,8 +67,9 @@ class Result(MetaClasses.Serializable): if isinstance(status, bool): status = Result.Status.SUCCESS if status else Result.Status.FAILED if not results and not status: - print("ERROR: Either .results or .status must be provided") - raise + Utils.raise_with_error( + f"Either .results ({results}) or .status ({status}) must be provided" + ) if not name: name = _Environment.get().JOB_NAME if not name: @@ -78,10 +78,10 @@ class Result(MetaClasses.Serializable): result_status = status or Result.Status.SUCCESS infos = [] if info: - if isinstance(info, Container): - infos += info + if isinstance(info, str): + infos += [info] else: - infos.append(info) + infos += info if results and not status: for result in results: if result.status not in (Result.Status.SUCCESS, Result.Status.FAILED): @@ -112,7 +112,7 @@ class Result(MetaClasses.Serializable): return self.status not in (Result.Status.PENDING, Result.Status.RUNNING) def is_running(self): - return self.status not in (Result.Status.RUNNING,) + return self.status in (Result.Status.RUNNING,) def is_ok(self): return self.status in (Result.Status.SKIPPED, Result.Status.SUCCESS) @@ -180,6 +180,11 @@ class Result(MetaClasses.Serializable): ) return self + def set_timing(self, stopwatch: Utils.Stopwatch): + self.start_time = stopwatch.start_time + self.duration = stopwatch.duration + return self + def update_sub_result(self, result: "Result"): assert self.results, "BUG?" for i, result_ in enumerate(self.results): diff --git a/ci/praktika/runner.py b/ci/praktika/runner.py index 823c7e0f36d..5db1a89ce99 100644 --- a/ci/praktika/runner.py +++ b/ci/praktika/runner.py @@ -125,15 +125,24 @@ class Runner: return 0 def _run(self, workflow, job, docker="", no_docker=False, param=None): + # re-set envs for local run + env = _Environment.get() + env.JOB_NAME = job.name + env.PARAMETER = job.parameter + env.dump() + if param: if not isinstance(param, str): Utils.raise_with_error( f"Custom param for local tests must be of type str, got [{type(param)}]" ) - env = _Environment.get() - env.dump() if job.run_in_docker and not no_docker: + job.run_in_docker, docker_settings = ( + job.run_in_docker.split("+")[0], + job.run_in_docker.split("+")[1:], + ) + from_root = "root" in docker_settings if ":" in job.run_in_docker: docker_name, docker_tag = job.run_in_docker.split(":") print( @@ -145,7 +154,7 @@ class Runner: RunConfig.from_fs(workflow.name).digest_dockers[job.run_in_docker], ) docker = docker or f"{docker_name}:{docker_tag}" - cmd = f"docker run --rm --user \"$(id -u):$(id -g)\" -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}" + cmd = f"docker run --rm --name praktika {'--user $(id -u):$(id -g)' if not from_root else ''} -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}" else: cmd = job.command @@ -226,7 +235,8 @@ class Runner: print(info) result.set_info(info).set_status(Result.Status.ERROR).dump() - result.set_files(files=[Settings.RUN_LOG]) + if not result.is_ok(): + result.set_files(files=[Settings.RUN_LOG]) result.update_duration().dump() if result.info and result.status != Result.Status.SUCCESS: @@ -329,7 +339,7 @@ class Runner: workflow, job, pr=pr, branch=branch, sha=sha ) - if res: + if res and (not local_run or pr or sha or branch): res = False print(f"=== Pre run script [{job.name}], workflow [{workflow.name}] ===") try: diff --git a/ci/praktika/s3.py b/ci/praktika/s3.py index 8cfb70a9076..04a08622dcd 100644 --- a/ci/praktika/s3.py +++ b/ci/praktika/s3.py @@ -52,7 +52,7 @@ class S3: cmd += " --content-type text/plain" res = cls.run_command_with_retries(cmd) if not res: - raise + raise RuntimeError() bucket = s3_path.split("/")[0] endpoint = Settings.S3_BUCKET_TO_HTTP_ENDPOINT[bucket] assert endpoint diff --git a/ci/praktika/workflow.py b/ci/praktika/workflow.py index 41e8056f9ef..8c5ec12440f 100644 --- a/ci/praktika/workflow.py +++ b/ci/praktika/workflow.py @@ -31,6 +31,7 @@ class Workflow: enable_report: bool = False enable_merge_ready_status: bool = False enable_cidb: bool = False + enable_merge_commit: bool = False def is_event_pull_request(self): return self.event == Workflow.Event.PULL_REQUEST diff --git a/ci/praktika/yaml_generator.py b/ci/praktika/yaml_generator.py index fb918b4ddba..1422a835663 100644 --- a/ci/praktika/yaml_generator.py +++ b/ci/praktika/yaml_generator.py @@ -80,6 +80,9 @@ jobs: steps: - name: Checkout code uses: actions/checkout@v4 + with: + clear-repository: true + ref: ${{{{ github.event.pull_reguest.head.sha }}}} {JOB_ADDONS} - name: Prepare env script run: | diff --git a/ci/settings/definitions.py b/ci/settings/definitions.py index c67bdee015b..33173756924 100644 --- a/ci/settings/definitions.py +++ b/ci/settings/definitions.py @@ -8,23 +8,30 @@ class RunnerLabels: CI_SERVICES = "ci_services" CI_SERVICES_EBS = "ci_services_ebs" BUILDER = "builder" + STYLE_CHECKER = "style-checker" BASE_BRANCH = "master" +azure_secret = Secret.Config( + name="azure_connection_string", + type=Secret.Type.AWS_SSM_VAR, +) + SECRETS = [ Secret.Config( name="dockerhub_robot_password", type=Secret.Type.AWS_SSM_VAR, ), - Secret.Config( - name="woolenwolf_gh_app.clickhouse-app-id", - type=Secret.Type.AWS_SSM_SECRET, - ), - Secret.Config( - name="woolenwolf_gh_app.clickhouse-app-key", - type=Secret.Type.AWS_SSM_SECRET, - ), + azure_secret, + # Secret.Config( + # name="woolenwolf_gh_app.clickhouse-app-id", + # type=Secret.Type.AWS_SSM_SECRET, + # ), + # Secret.Config( + # name="woolenwolf_gh_app.clickhouse-app-key", + # type=Secret.Type.AWS_SSM_SECRET, + # ), ] DOCKERS = [ @@ -118,12 +125,12 @@ DOCKERS = [ # platforms=Docker.Platforms.arm_amd, # depends_on=["clickhouse/test-base"], # ), - # Docker.Config( - # name="clickhouse/stateless-test", - # path="./ci/docker/test/stateless", - # platforms=Docker.Platforms.arm_amd, - # depends_on=["clickhouse/test-base"], - # ), + Docker.Config( + name="clickhouse/stateless-test", + path="./ci/docker/stateless-test", + platforms=Docker.Platforms.arm_amd, + depends_on=[], + ), # Docker.Config( # name="clickhouse/stateful-test", # path="./ci/docker/test/stateful", @@ -230,5 +237,6 @@ DOCKERS = [ class JobNames: STYLE_CHECK = "Style Check" FAST_TEST = "Fast test" - BUILD_AMD_DEBUG = "Build amd64 debug" + BUILD = "Build" + BUILD_AMD_DEBUG = "Build (amd, debug)" STATELESS_TESTS = "Stateless tests (amd, debug)" diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index c7715b40fca..10dd77a0414 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -13,7 +13,8 @@ from ci.settings.definitions import ( class ArtifactNames: - ch_debug_binary = "clickhouse_debug_binary" + CH_AMD_DEBUG = "CH_AMD_DEBUG" + CH_AMD_RELEASE = "CH_AMD_RELEASE" style_check_job = Job.Config( @@ -37,10 +38,10 @@ fast_test_job = Job.Config( ), ) -job_build_amd_debug = Job.Config( - name=JobNames.BUILD_AMD_DEBUG, +amd_build_jobs = Job.Config( + name=JobNames.BUILD, runs_on=[RunnerLabels.BUILDER], - command="python3 ./ci/jobs/build_clickhouse.py amd_debug", + command="python3 ./ci/jobs/build_clickhouse.py", run_in_docker="clickhouse/fasttest", digest_config=Job.CacheDigestConfig( include_paths=[ @@ -56,20 +57,30 @@ job_build_amd_debug = Job.Config( "./tests/ci/version_helper.py", ], ), - provides=[ArtifactNames.ch_debug_binary], +).parametrize( + parameter=["amd_debug", "amd_release"], + provides=[[ArtifactNames.CH_AMD_DEBUG], [ArtifactNames.CH_AMD_RELEASE]], ) -stateless_tests_job = Job.Config( +statless_batch_num = 2 +stateless_tests_amd_debug_jobs = Job.Config( name=JobNames.STATELESS_TESTS, runs_on=[RunnerLabels.BUILDER], command="python3 ./ci/jobs/functional_stateless_tests.py amd_debug", - run_in_docker="clickhouse/fasttest:latest", + run_in_docker="clickhouse/stateless-test", digest_config=Job.CacheDigestConfig( include_paths=[ "./ci/jobs/functional_stateless_tests.py", ], ), - requires=[ArtifactNames.ch_debug_binary], + requires=[ArtifactNames.CH_AMD_DEBUG], +).parametrize( + parameter=[ + f"parallel {i+1}/{statless_batch_num}" for i in range(statless_batch_num) + ] + + ["non-parallel"], + runs_on=[[RunnerLabels.BUILDER] for _ in range(statless_batch_num)] + + [[RunnerLabels.STYLE_CHECKER]], ) workflow = Workflow.Config( @@ -79,15 +90,20 @@ workflow = Workflow.Config( jobs=[ style_check_job, fast_test_job, - job_build_amd_debug, - stateless_tests_job, + *amd_build_jobs, + *stateless_tests_amd_debug_jobs, ], artifacts=[ Artifact.Config( - name=ArtifactNames.ch_debug_binary, + name=ArtifactNames.CH_AMD_DEBUG, type=Artifact.Type.S3, path=f"{Settings.TEMP_DIR}/build/programs/clickhouse", - ) + ), + Artifact.Config( + name=ArtifactNames.CH_AMD_RELEASE, + type=Artifact.Type.S3, + path=f"{Settings.TEMP_DIR}/build/programs/clickhouse", + ), ], dockers=DOCKERS, secrets=SECRETS, @@ -101,8 +117,11 @@ WORKFLOWS = [ ] # type: List[Workflow.Config] -if __name__ == "__main__": - # local job test inside praktika environment - from praktika.runner import Runner - - Runner().run(workflow, fast_test_job, docker="fasttest", local_run=True) +# if __name__ == "__main__": +# # local job test inside praktika environment +# from praktika.runner import Runner +# from praktika.digest import Digest +# +# print(Digest().calc_job_digest(amd_debug_build_job)) +# +# Runner().run(workflow, fast_test_job, docker="fasttest", local_run=True) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 100a6358dcf..3396b10814a 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -2153,9 +2153,9 @@ class TestSuite: self.sequential_tests = [] self.parallel_tests = [] for test_name in self.all_tests: - if self.is_sequential_test(test_name): + if self.is_sequential_test(test_name) and not args.no_sequential: self.sequential_tests.append(test_name) - else: + elif not args.no_parallel: self.parallel_tests.append(test_name) def is_sequential_test(self, test_name): @@ -3290,7 +3290,10 @@ def parse_args(): help='Replace random database name with "default" in stderr', ) parser.add_argument( - "--parallel", default="1/1", help="One parallel test run number/total" + "--no-sequential", action="store_true", help="Not run no-parallel" + ) + parser.add_argument( + "--no-parallel", action="store_true", help="Run only no-parallel" ) parser.add_argument( "-j", "--jobs", default=1, nargs="?", type=int, help="Run all tests in parallel" @@ -3339,7 +3342,7 @@ def parse_args(): parser.add_argument( "--sequential", nargs="+", - help="Run these tests sequentially even if --parallel specified", + help="Run all tests sequentially", ) parser.add_argument( "--no-long", action="store_true", dest="no_long", help="Do not run long tests" diff --git a/tests/config/config.d/ssl_certs.xml b/tests/config/config.d/ssl_certs.xml index 26b679f39df..c20fef89e00 100644 --- a/tests/config/config.d/ssl_certs.xml +++ b/tests/config/config.d/ssl_certs.xml @@ -1,8 +1,8 @@ - /etc/clickhouse-server/server.crt - /etc/clickhouse-server/server.key + /tmp/praktika/etc/clickhouse-server/server.crt + /tmp/praktika/etc/clickhouse-server/server.key diff --git a/tests/config/install.sh b/tests/config/install.sh index be47298f6a4..cdae5741fce 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -9,6 +9,21 @@ DEST_SERVER_PATH="${1:-/etc/clickhouse-server}" DEST_CLIENT_PATH="${2:-/etc/clickhouse-client}" SRC_PATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +shift # DEST_SERVER_PATH +shift # DEST_CLIENT_PATH + +FAST_TEST=0 +S3_STORAGE=0 + +while [[ "$#" -gt 0 ]]; do + case $1 in + --fast-test) FAST_TEST=1 ;; + --s3-storage) S3_STORAGE=1 ;; + *) echo "Unknown option: $1" ; exit 1 ;; + esac + shift +done + echo "Going to install test configs from $SRC_PATH into $DEST_SERVER_PATH" mkdir -p $DEST_SERVER_PATH/config.d/ @@ -72,9 +87,8 @@ ln -sf $SRC_PATH/config.d/serverwide_trace_collector.xml $DEST_SERVER_PATH/confi ln -sf $SRC_PATH/config.d/rocksdb.xml $DEST_SERVER_PATH/config.d/ # Not supported with fasttest. -if [ "${DEST_SERVER_PATH}" = "/etc/clickhouse-server" ] -then - ln -sf $SRC_PATH/config.d/legacy_geobase.xml $DEST_SERVER_PATH/config.d/ +if [ "${DEST_SERVER_PATH}" = "/etc/clickhouse-server" ] || [ "$FAST_TEST" != "1" ]; then + ln -sf "$SRC_PATH/config.d/legacy_geobase.xml" "$DEST_SERVER_PATH/config.d/" fi ln -sf $SRC_PATH/users.d/log_queries.xml $DEST_SERVER_PATH/users.d/ @@ -185,7 +199,7 @@ elif [[ "$USE_AZURE_STORAGE_FOR_MERGE_TREE" == "1" ]]; then ln -sf $SRC_PATH/config.d/azure_storage_policy_by_default.xml $DEST_SERVER_PATH/config.d/ fi -if [[ -n "$EXPORT_S3_STORAGE_POLICIES" ]]; then +if [[ "$EXPORT_S3_STORAGE_POLICIES" == "1" ]] || [[ "$S3_STORAGE" = "1" ]]; then ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf_02944.xml $DEST_SERVER_PATH/config.d/ @@ -195,7 +209,7 @@ if [[ -n "$EXPORT_S3_STORAGE_POLICIES" ]]; then ln -sf $SRC_PATH/users.d/s3_cache_new.xml $DEST_SERVER_PATH/users.d/ fi -if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then +if [[ "$USE_DATABASE_REPLICATED" == "1" ]]; then ln -sf $SRC_PATH/users.d/database_replicated.xml $DEST_SERVER_PATH/users.d/ ln -sf $SRC_PATH/config.d/database_replicated.xml $DEST_SERVER_PATH/config.d/ rm /etc/clickhouse-server/config.d/zookeeper.xml diff --git a/tests/docker_scripts/setup_minio.sh b/tests/docker_scripts/setup_minio.sh index 40e93e713a1..837c05a9c5d 100755 --- a/tests/docker_scripts/setup_minio.sh +++ b/tests/docker_scripts/setup_minio.sh @@ -5,6 +5,12 @@ set -euxf -o pipefail export MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse} export MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse} +if [ -d "$TEMP_DIR" ]; then + cd "$TEMP_DIR" + # add / for minio mc in docker + PATH="/:.:$PATH" +fi + usage() { echo $"Usage: $0 (default path: /usr/share/clickhouse-test)" exit 1 @@ -70,9 +76,10 @@ download_minio() { } start_minio() { + pwd mkdir -p ./minio_data - ./minio --version - ./minio server --address ":11111" ./minio_data & + minio --version + minio server --address ":11111" ./minio_data & wait_for_it lsof -i :11111 sleep 5 @@ -80,12 +87,14 @@ start_minio() { setup_minio() { local test_type=$1 - ./mc alias set clickminio http://localhost:11111 clickhouse clickhouse - ./mc admin user add clickminio test testtest - ./mc admin policy attach clickminio readwrite --user=test - ./mc mb --ignore-existing clickminio/test + echo "setup_minio(), test_type=$test_type" + mc alias set clickminio http://localhost:11111 clickhouse clickhouse + mc admin user add clickminio test testtest + mc admin policy attach clickminio readwrite --user=test ||: + mc mb --ignore-existing clickminio/test if [ "$test_type" = "stateless" ]; then - ./mc anonymous set public clickminio/test + echo "Create @test bucket in minio" + mc anonymous set public clickminio/test fi } @@ -95,12 +104,13 @@ upload_data() { local query_dir=$1 local test_path=$2 local data_path=${test_path}/queries/${query_dir}/data_minio + echo "upload_data() data_path=$data_path" # iterating over globs will cause redundant file variable to be # a path to a file, not a filename # shellcheck disable=SC2045 if [ -d "${data_path}" ]; then - ./mc cp --recursive "${data_path}"/ clickminio/test/ + mc cp --recursive "${data_path}"/ clickminio/test/ fi } @@ -138,7 +148,7 @@ wait_for_it() { main() { local query_dir query_dir=$(check_arg "$@") - if [ ! -f ./minio ]; then + if ! (minio --version && mc --version); then download_minio fi start_minio From 98ee0893318bcfd4e0d63b564f513b37579bd3c8 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 5 Nov 2024 17:31:47 +0000 Subject: [PATCH 40/74] Cleanup --- tests/queries/0_stateless/03261_pr_semi_anti_join.sql | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/03261_pr_semi_anti_join.sql b/tests/queries/0_stateless/03261_pr_semi_anti_join.sql index d2ea3725d6b..2d671756d6e 100644 --- a/tests/queries/0_stateless/03261_pr_semi_anti_join.sql +++ b/tests/queries/0_stateless/03261_pr_semi_anti_join.sql @@ -1,6 +1,5 @@ DROP TABLE IF EXISTS t1 SYNC; DROP TABLE IF EXISTS t2 SYNC; -create table X (id Int32, x_a String, x_b Nullable(Int32)) engine ReplicatedMergeTree('/clickhouse/{database}/X', '1') order by tuple(); CREATE TABLE t1 (x UInt32, s String) engine ReplicatedMergeTree('/clickhouse/{database}/t1', '1') order by tuple(); CREATE TABLE t2 (x UInt32, s String) engine ReplicatedMergeTree('/clickhouse/{database}/t2', '1') order by tuple(); From c1345d38c8e987838704a4ae7da6cb05af8257c2 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 6 Nov 2024 15:44:22 +0000 Subject: [PATCH 41/74] Fix flakiness in 03254_pr_join_on_dups --- src/Interpreters/IJoin.h | 1 - src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp | 1 - tests/queries/0_stateless/03254_pr_join_on_dups.sql | 1 + 3 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Interpreters/IJoin.h b/src/Interpreters/IJoin.h index 8f648de2538..5a83137ca2a 100644 --- a/src/Interpreters/IJoin.h +++ b/src/Interpreters/IJoin.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include diff --git a/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp b/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp index c0b31864eac..35d340b4bbf 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp @@ -16,7 +16,6 @@ #include #include -#include namespace DB::QueryPlanOptimizations { diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 222f7693090..166910d496f 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -10,6 +10,7 @@ insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; +set min_bytes_to_use_direct_io = 0; -- min_bytes_to_use_direct_io > 0 is broken and leads to unexpected results, https://github.com/ClickHouse/ClickHouse/issues/65690 select 'inner'; select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; From a828e3e923ef06666d4582c34868750bbbee3e6a Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Tue, 5 Nov 2024 12:59:14 +0000 Subject: [PATCH 42/74] test --- .github/workflows/pr.yaml | 108 ++++++++++++++++---------- ci/jobs/build_clickhouse.py | 30 +++---- ci/jobs/fast_test.py | 2 + ci/jobs/functional_stateless_tests.py | 92 +++++++++++++--------- ci/jobs/scripts/clickhouse_proc.py | 39 +++------- ci/praktika/_environment.py | 12 +-- ci/praktika/cidb.py | 2 +- ci/praktika/digest.py | 20 ++--- ci/praktika/environment.py | 3 - ci/praktika/hook_html.py | 20 ++--- ci/praktika/job.py | 1 + ci/praktika/json.html | 22 +++--- ci/praktika/mangle.py | 55 +++++++------ ci/praktika/native_jobs.py | 6 +- ci/praktika/param.py | 8 -- ci/praktika/result.py | 6 -- ci/praktika/runner.py | 7 +- ci/praktika/utils.py | 27 +++---- ci/praktika/validator.py | 89 ++++++++++----------- ci/praktika/yaml_generator.py | 11 +-- ci/settings/definitions.py | 5 +- ci/workflows/pull_request.py | 54 +++++++++---- tests/clickhouse-test | 30 +++---- tests/config/install.sh | 2 +- tests/docker_scripts/setup_minio.sh | 6 +- 25 files changed, 334 insertions(+), 323 deletions(-) delete mode 100644 ci/praktika/environment.py delete mode 100644 ci/praktika/param.py diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 0c3f74aeac8..51bb9b52d10 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -31,8 +31,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -72,8 +71,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -113,8 +111,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -154,8 +151,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -195,8 +191,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -236,8 +231,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -266,19 +260,18 @@ jobs: python3 -m praktika run --job '''Build (amd_release)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - stateless_tests_amd_debug_parallel_1_2: + stateless_tests_amd_debugparallel: runs-on: [builder] needs: [config_workflow, docker_builds, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAocGFyYWxsZWwgMS8yKQ==') }} - name: "Stateless tests (amd, debug) (parallel 1/2)" + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcscGFyYWxsZWwp') }} + name: "Stateless tests (amd_debug,parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -302,24 +295,63 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 1/2)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_debug,parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 1/2)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_debug,parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - stateless_tests_amd_debug_parallel_2_2: + stateless_tests_amd_debugnon_parallel: + runs-on: [func-tester] + needs: [config_workflow, docker_builds, build_amd_debug] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsbm9uLXBhcmFsbGVsKQ==') }} + name: "Stateless tests (amd_debug,non-parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (amd_debug,non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (amd_debug,non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_amd_releaseparallel: runs-on: [builder] - needs: [config_workflow, docker_builds, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAocGFyYWxsZWwgMi8yKQ==') }} - name: "Stateless tests (amd, debug) (parallel 2/2)" + needs: [config_workflow, docker_builds, build_amd_release] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfcmVsZWFzZSxwYXJhbGxlbCk=') }} + name: "Stateless tests (amd_release,parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -343,24 +375,23 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 2/2)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_release,parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Stateless tests (amd, debug) (parallel 2/2)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_release,parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi - stateless_tests_amd_debug_non_parallel: - runs-on: [style-checker] - needs: [config_workflow, docker_builds, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWQsIGRlYnVnKSAobm9uLXBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd, debug) (non-parallel)" + stateless_tests_amd_releasenon_parallel: + runs-on: [func-tester] + needs: [config_workflow, docker_builds, build_amd_release] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfcmVsZWFzZSxub24tcGFyYWxsZWwp') }} + name: "Stateless tests (amd_release,non-parallel)" outputs: data: ${{ steps.run.outputs.DATA }} steps: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | @@ -384,14 +415,14 @@ jobs: . /tmp/praktika_setup_env.sh set -o pipefail if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd, debug) (non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_release,non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log else - python3 -m praktika run --job '''Stateless tests (amd, debug) (non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + python3 -m praktika run --job '''Stateless tests (amd_release,non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi finish_workflow: runs-on: [ci_services] - needs: [config_workflow, docker_builds, style_check, fast_test, build_amd_debug, build_amd_release, stateless_tests_amd_debug_parallel_1_2, stateless_tests_amd_debug_parallel_2_2, stateless_tests_amd_debug_non_parallel] + needs: [config_workflow, docker_builds, style_check, fast_test, build_amd_debug, build_amd_release, stateless_tests_amd_debugparallel, stateless_tests_amd_debugnon_parallel, stateless_tests_amd_releaseparallel, stateless_tests_amd_releasenon_parallel] if: ${{ !cancelled() }} name: "Finish Workflow" outputs: @@ -400,8 +431,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{ github.event.pull_reguest.head.sha }} + ref: ${{ github.head_ref }} - name: Prepare env script run: | diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index 3db88938f23..1e6d2c648a7 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -1,6 +1,5 @@ import argparse -from praktika.param import get_param from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils @@ -16,8 +15,7 @@ def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") parser.add_argument( "--build-type", - help="Type: __", - default=None, + help="Type: ,,", ) parser.add_argument( "--param", @@ -30,7 +28,7 @@ def parse_args(): CMAKE_CMD = """cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA \ -DCMAKE_BUILD_TYPE={BUILD_TYPE} \ -DSANITIZE={SANITIZER} \ --DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DENABLE_TESTS=0 \ +-DENABLE_CHECK_HEAVY_BUILDS=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 \ -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr \ -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON \ {AUX_DEFS} \ @@ -54,33 +52,26 @@ def main(): stages.pop(0) stages.insert(0, stage) - cmake_build_type = "Release" - sanitizer = "" - - if args.build_type and get_param(): - assert ( - False - ), "Build type must provided via job parameter (CI case) or via --build-type input argument not both" - - build_type = args.build_type or get_param() + build_type = args.build_type assert ( build_type ), "build_type must be provided either as input argument or as a parameter of parametrized job in CI" build_type = build_type.lower() - # if Environment.is_local_run(): - # build_cache_type = "disabled" - # else: CACHE_TYPE = "sccache" if "debug" in build_type: print("Build type set: debug") BUILD_TYPE = "Debug" - AUX_DEFS = " -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1 " + AUX_DEFS = ( + " -DENABLE_TESTS=1 -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1 " + ) elif "release" in build_type: print("Build type set: release") - BUILD_TYPE = "None" - AUX_DEFS = " -DENABLE_TESTS=1 " + BUILD_TYPE = "RelWithDebInfo" + AUX_DEFS = " -DENABLE_TESTS=0 " + else: + assert False if "asan" in build_type: print("Sanitizer set: address") @@ -136,6 +127,7 @@ def main(): Shell.check(f"ls -l {build_dir}/programs/") res = results[-1].is_ok() + Result.create_from(results=results, stopwatch=stop_watch).complete_job() diff --git a/ci/jobs/fast_test.py b/ci/jobs/fast_test.py index cb7d925fead..03a4c0cd496 100644 --- a/ci/jobs/fast_test.py +++ b/ci/jobs/fast_test.py @@ -215,11 +215,13 @@ def main(): ) if res and JobStages.TEST in stages: + stop_watch_ = Utils.Stopwatch() step_name = "Tests" print(step_name) res = res and CH.run_fast_test() if res: results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run()) + results[-1].set_timing(stopwatch=stop_watch_) CH.terminate() diff --git a/ci/jobs/functional_stateless_tests.py b/ci/jobs/functional_stateless_tests.py index d77522ed73a..0481086d80a 100644 --- a/ci/jobs/functional_stateless_tests.py +++ b/ci/jobs/functional_stateless_tests.py @@ -1,15 +1,13 @@ import argparse -import os +import time from pathlib import Path -from praktika.param import get_param from praktika.result import Result from praktika.settings import Settings from praktika.utils import MetaClasses, Shell, Utils from ci.jobs.scripts.clickhouse_proc import ClickHouseProc from ci.jobs.scripts.functional_tests_results import FTResultsProcessor -from ci.settings.definitions import azure_secret class JobStages(metaclass=MetaClasses.WithIter): @@ -21,9 +19,14 @@ class JobStages(metaclass=MetaClasses.WithIter): def parse_args(): parser = argparse.ArgumentParser(description="ClickHouse Build Job") parser.add_argument( - "BUILD_TYPE", help="Type: __" + "--ch-path", help="Path to clickhouse binary", default=f"{Settings.INPUT_DIR}" ) - parser.add_argument("--param", help="Optional custom job start stage", default=None) + parser.add_argument( + "--test-options", + help="Comma separated option(s): parallel|non-parallel|BATCH_NUM/BTATCH_TOT|..", + default="", + ) + parser.add_argument("--param", help="Optional job start stage", default=None) return parser.parse_args() @@ -50,28 +53,31 @@ def run_stateless_test( def main(): args = parse_args() - params = get_param().split(" ") - parallel_or_sequential = None - no_parallel = False - no_sequential = False - if params: - parallel_or_sequential = params[0] - if len(params) > 1: - batch_num, total_batches = map(int, params[1].split("/")) - else: - batch_num, total_batches = 0, 0 - if parallel_or_sequential: - no_parallel = parallel_or_sequential == "non-parallel" - no_sequential = parallel_or_sequential == "parallel" + test_options = args.test_options.split(",") + no_parallel = "non-parallel" in test_options + no_sequential = "parallel" in test_options + batch_num, total_batches = 0, 0 + for to in test_options: + if "/" in to: + batch_num, total_batches = map(int, to.split("/")) - os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output( - f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value", - verbose=True, - ) + # os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output( + # f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value", + # verbose=True, + # strict=True + # ) + + ch_path = args.ch_path + assert Path( + ch_path + "/clickhouse" + ).is_file(), f"clickhouse binary not found under [{ch_path}]" stop_watch = Utils.Stopwatch() stages = list(JobStages) + + logs_to_attach = [] + stage = args.param or JobStages.INSTALL_CLICKHOUSE if stage: assert stage in JobStages, f"--param must be one of [{list(JobStages)}]" @@ -83,19 +89,22 @@ def main(): res = True results = [] - Utils.add_to_PATH(f"{Settings.INPUT_DIR}:tests") + Utils.add_to_PATH(f"{ch_path}:tests") if res and JobStages.INSTALL_CLICKHOUSE in stages: commands = [ - f"chmod +x {Settings.INPUT_DIR}/clickhouse", - f"ln -sf {Settings.INPUT_DIR}/clickhouse {Settings.INPUT_DIR}/clickhouse-server", - f"ln -sf {Settings.INPUT_DIR}/clickhouse {Settings.INPUT_DIR}/clickhouse-client", + f"chmod +x {ch_path}/clickhouse", + f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-server", + f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-client", f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server", f"cp programs/server/config.xml programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --s3-storage", # update_path_ch_config, - f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml", - f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml", + # f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml", + # f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml", + f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done", + f"for file in /tmp/praktika/etc/clickhouse-server/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done", + f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|local_disk|{Settings.TEMP_DIR}/local_disk|g' $(readlink -f $file); done", f"clickhouse-server --version", ] results.append( @@ -110,22 +119,27 @@ def main(): stop_watch_ = Utils.Stopwatch() step_name = "Start ClickHouse Server" print(step_name) - res = res and CH.start_minio() + minio_log = "/tmp/praktika/output/minio.log" + res = res and CH.start_minio(log_file_path=minio_log) + logs_to_attach += [minio_log] + time.sleep(10) + Shell.check("ps -ef | grep minio", verbose=True) + res = res and Shell.check( + "aws s3 ls s3://test --endpoint-url http://localhost:11111/", verbose=True + ) res = res and CH.start() res = res and CH.wait_ready() + if res: + print("ch started") + logs_to_attach += [ + "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.log", + "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.err.log", + ] results.append( Result.create_from( name=step_name, status=res, stopwatch=stop_watch_, - files=( - [ - "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.log", - "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.err.log", - ] - if not res - else [] - ), ) ) res = results[-1].is_ok() @@ -144,7 +158,9 @@ def main(): results[-1].set_timing(stopwatch=stop_watch_) res = results[-1].is_ok() - Result.create_from(results=results, stopwatch=stop_watch).complete_job() + Result.create_from( + results=results, stopwatch=stop_watch, files=logs_to_attach if not res else [] + ).complete_job() if __name__ == "__main__": diff --git a/ci/jobs/scripts/clickhouse_proc.py b/ci/jobs/scripts/clickhouse_proc.py index cc822eab693..c43283e75e0 100644 --- a/ci/jobs/scripts/clickhouse_proc.py +++ b/ci/jobs/scripts/clickhouse_proc.py @@ -1,5 +1,4 @@ -import threading -import time +import subprocess from pathlib import Path from praktika.settings import Settings @@ -39,39 +38,25 @@ class ClickHouseProc: Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path) Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas") - if not fast_test: - with open(f"{self.ch_config_dir}/config.d/backups.xml", "w") as file: - file.write(self.BACKUPS_XML) + # if not fast_test: + # with open(f"{self.ch_config_dir}/config.d/backups.xml", "w") as file: + # file.write(self.BACKUPS_XML) self.minio_proc = None - def start_minio(self): - print("Starting minio") - - def run_minio(): - self.minio_proc = Shell.run_async( - self.minio_cmd, verbose=True, suppress_output=True + def start_minio(self, log_file_path): + command = ["tests/docker_scripts/setup_minio.sh", "stateless", "./tests"] + with open(log_file_path, "w") as log_file: + process = subprocess.Popen( + command, stdout=log_file, stderr=subprocess.STDOUT ) - - thread = threading.Thread(target=run_minio) - thread.daemon = True # Allow program to exit even if thread is still running - thread.start() - time.sleep(5) - return thread.is_alive() + print(f"Started setup_minio.sh asynchronously with PID {process.pid}") + return True def start(self): print("Starting ClickHouse server") Shell.check(f"rm {self.pid_file}") - - def run_clickhouse(): - self.proc = Shell.run_async( - self.command, verbose=True, suppress_output=False - ) - - thread = threading.Thread(target=run_clickhouse) - thread.daemon = True # Allow program to exit even if thread is still running - thread.start() - + self.proc = subprocess.Popen(self.command, stderr=subprocess.STDOUT, shell=True) started = False try: for _ in range(5): diff --git a/ci/praktika/_environment.py b/ci/praktika/_environment.py index 4ac8ad319f9..1c6b547ddde 100644 --- a/ci/praktika/_environment.py +++ b/ci/praktika/_environment.py @@ -30,7 +30,6 @@ class _Environment(MetaClasses.Serializable): INSTANCE_ID: str INSTANCE_LIFE_CYCLE: str LOCAL_RUN: bool = False - PARAMETER: Any = None REPORT_INFO: List[str] = dataclasses.field(default_factory=list) name = "environment" @@ -172,18 +171,15 @@ class _Environment(MetaClasses.Serializable): # TODO: find a better place for the function. This file should not import praktika.settings # as it's requires reading users config, that's why imports nested inside the function - def get_report_url(self): + def get_report_url(self, settings): import urllib - from praktika.settings import Settings - from praktika.utils import Utils - - path = Settings.HTML_S3_PATH - for bucket, endpoint in Settings.S3_BUCKET_TO_HTTP_ENDPOINT.items(): + path = settings.HTML_S3_PATH + for bucket, endpoint in settings.S3_BUCKET_TO_HTTP_ENDPOINT.items(): if bucket in path: path = path.replace(bucket, endpoint) break - REPORT_URL = f"https://{path}/{Path(Settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}" + REPORT_URL = f"https://{path}/{Path(settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}" return REPORT_URL def is_local_run(self): diff --git a/ci/praktika/cidb.py b/ci/praktika/cidb.py index 087845ec762..53088c102cd 100644 --- a/ci/praktika/cidb.py +++ b/ci/praktika/cidb.py @@ -52,7 +52,7 @@ class CIDB: check_status=result.status, check_duration_ms=int(result.duration * 1000), check_start_time=Utils.timestamp_to_str(result.start_time), - report_url=env.get_report_url(), + report_url=env.get_report_url(settings=Settings), pull_request_url=env.CHANGE_URL, base_ref=env.BASE_BRANCH, base_repo=env.REPOSITORY, diff --git a/ci/praktika/digest.py b/ci/praktika/digest.py index 93b62b13dc0..a1f2eecf9b6 100644 --- a/ci/praktika/digest.py +++ b/ci/praktika/digest.py @@ -31,6 +31,9 @@ class Digest: cache_key = self._hash_digest_config(config) if cache_key in self.digest_cache: + print( + f"calc digest for job [{job_config.name}]: hash_key [{cache_key}] - from cache" + ) return self.digest_cache[cache_key] included_files = Utils.traverse_paths( @@ -38,12 +41,9 @@ class Digest: job_config.digest_config.exclude_paths, sorted=True, ) - print( f"calc digest for job [{job_config.name}]: hash_key [{cache_key}], include [{len(included_files)}] files" ) - # Sort files to ensure consistent hash calculation - included_files.sort() # Calculate MD5 hash res = "" @@ -52,11 +52,11 @@ class Digest: print(f"NOTE: empty digest config [{config}] - return dummy digest") else: hash_md5 = hashlib.md5() - for file_path in included_files: - res = self._calc_file_digest(file_path, hash_md5) - assert res - self.digest_cache[cache_key] = res - return res + for i, file_path in enumerate(included_files): + hash_md5 = self._calc_file_digest(file_path, hash_md5) + digest = hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] + self.digest_cache[cache_key] = digest + return digest def calc_docker_digest( self, @@ -103,10 +103,10 @@ class Digest: print( f"WARNING: No valid file resolved by link {file_path} -> {resolved_path} - skipping digest calculation" ) - return hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] + return hash_md5 with open(resolved_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) - return hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] + return hash_md5 diff --git a/ci/praktika/environment.py b/ci/praktika/environment.py deleted file mode 100644 index 8f53aa6230b..00000000000 --- a/ci/praktika/environment.py +++ /dev/null @@ -1,3 +0,0 @@ -from praktika._environment import _Environment - -Environment = _Environment.get() diff --git a/ci/praktika/hook_html.py b/ci/praktika/hook_html.py index cea84192d0d..ca2692d1b22 100644 --- a/ci/praktika/hook_html.py +++ b/ci/praktika/hook_html.py @@ -1,6 +1,5 @@ import dataclasses import json -import urllib.parse from pathlib import Path from typing import List @@ -132,17 +131,9 @@ class HtmlRunnerHooks: result = Result.generate_skipped(job.name) results.append(result) summary_result = Result.generate_pending(_workflow.name, results=results) - summary_result.aux_links.append(env.CHANGE_URL) - summary_result.aux_links.append(env.RUN_URL) + summary_result.links.append(env.CHANGE_URL) + summary_result.links.append(env.RUN_URL) summary_result.start_time = Utils.timestamp() - page_url = "/".join( - ["https:/", Settings.HTML_S3_PATH, str(Path(Settings.HTML_PAGE_FILE).name)] - ) - for bucket, endpoint in Settings.S3_BUCKET_TO_HTTP_ENDPOINT.items(): - page_url = page_url.replace(bucket, endpoint) - # TODO: add support for non-PRs (use branch?) - page_url += f"?PR={env.PR_NUMBER}&sha=latest&name_0={urllib.parse.quote(env.WORKFLOW_NAME, safe='')}" - summary_result.html_link = page_url # clean the previous latest results in PR if any if env.PR_NUMBER: @@ -152,13 +143,14 @@ class HtmlRunnerHooks: unlock=False, ) + page_url = env.get_report_url(settings=Settings) print(f"CI Status page url [{page_url}]") res1 = GH.post_commit_status( name=_workflow.name, status=Result.Status.PENDING, description="", - url=page_url, + url=env.get_report_url(settings=Settings), ) res2 = GH.post_pr_comment( comment_body=f"Workflow [[{_workflow.name}]({page_url})], commit [{_Environment.get().SHA[:8]}]", @@ -248,11 +240,11 @@ class HtmlRunnerHooks: ) if workflow_result.status != old_status: print( - f"Update GH commit status [{result.name}]: [{old_status} -> {workflow_result.status}], link [{workflow_result.html_link}]" + f"Update GH commit status [{result.name}]: [{old_status} -> {workflow_result.status}]" ) GH.post_commit_status( name=workflow_result.name, status=GH.convert_to_gh_status(workflow_result.status), description="", - url=workflow_result.html_link, + url=env.get_report_url(settings=Settings), ) diff --git a/ci/praktika/job.py b/ci/praktika/job.py index 99eb08938b8..595a86456e9 100644 --- a/ci/praktika/job.py +++ b/ci/praktika/job.py @@ -89,6 +89,7 @@ class Job: ), "Job.Config.provides must be empty for parametrized jobs" if parameter_: obj.parameter = parameter_ + obj.command = obj.command.format(PARAMETER=parameter_) if runs_on_: obj.runs_on = runs_on_ if timeout_: diff --git a/ci/praktika/json.html b/ci/praktika/json.html index f86a7b27ecb..4e15a67ba76 100644 --- a/ci/praktika/json.html +++ b/ci/praktika/json.html @@ -663,20 +663,20 @@ let targetData = navigatePath(data, nameParams); let nest_level = nameParams.length; + // Add footer links from top-level Result + if (Array.isArray(data.links) && data.links.length > 0) { + data.links.forEach(link => { + const a = document.createElement('a'); + a.href = link; + a.textContent = link.split('/').pop(); + a.target = '_blank'; + footerRight.appendChild(a); + }); + } + if (targetData) { infoElement.style.display = 'none'; - // Handle footer links if present - if (Array.isArray(data.aux_links) && data.aux_links.length > 0) { - data.aux_links.forEach(link => { - const a = document.createElement('a'); - a.href = link; - a.textContent = link.split('/').pop(); - a.target = '_blank'; - footerRight.appendChild(a); - }); - } - addStatusToStatus(targetData.status, targetData.start_time, targetData.duration) // Handle links diff --git a/ci/praktika/mangle.py b/ci/praktika/mangle.py index bca33f9e660..b16d52fbbbf 100644 --- a/ci/praktika/mangle.py +++ b/ci/praktika/mangle.py @@ -14,35 +14,34 @@ def _get_workflows(name=None, file=None): """ res = [] - with ContextManager.cd(): - directory = Path(_Settings.WORKFLOWS_DIRECTORY) - for py_file in directory.glob("*.py"): - if file and file not in str(py_file): - continue - module_name = py_file.name.removeprefix(".py") - spec = importlib.util.spec_from_file_location( - module_name, f"{_Settings.WORKFLOWS_DIRECTORY}/{module_name}" - ) - assert spec - foo = importlib.util.module_from_spec(spec) - assert spec.loader - spec.loader.exec_module(foo) - try: - for workflow in foo.WORKFLOWS: - if name: - if name == workflow.name: - print(f"Read workflow [{name}] config from [{module_name}]") - res = [workflow] - break - else: - continue + directory = Path(_Settings.WORKFLOWS_DIRECTORY) + for py_file in directory.glob("*.py"): + if file and file not in str(py_file): + continue + module_name = py_file.name.removeprefix(".py") + spec = importlib.util.spec_from_file_location( + module_name, f"{_Settings.WORKFLOWS_DIRECTORY}/{module_name}" + ) + assert spec + foo = importlib.util.module_from_spec(spec) + assert spec.loader + spec.loader.exec_module(foo) + try: + for workflow in foo.WORKFLOWS: + if name: + if name == workflow.name: + print(f"Read workflow [{name}] config from [{module_name}]") + res = [workflow] + break else: - res += foo.WORKFLOWS - print(f"Read workflow configs from [{module_name}]") - except Exception as e: - print( - f"WARNING: Failed to add WORKFLOWS config from [{module_name}], exception [{e}]" - ) + continue + else: + res += foo.WORKFLOWS + print(f"Read workflow configs from [{module_name}]") + except Exception as e: + print( + f"WARNING: Failed to add WORKFLOWS config from [{module_name}], exception [{e}]" + ) if not res: Utils.raise_with_error(f"Failed to find workflow [{name or file}]") diff --git a/ci/praktika/native_jobs.py b/ci/praktika/native_jobs.py index 16ffa9056e9..58af211988b 100644 --- a/ci/praktika/native_jobs.py +++ b/ci/praktika/native_jobs.py @@ -342,7 +342,7 @@ def _finish_workflow(workflow, job_name): f"NOTE: Result for [{result.name}] has not ok status [{result.status}]" ) ready_for_merge_status = Result.Status.FAILED - failed_results.append(result.name.split("(", maxsplit=1)[0]) # cut name + failed_results.append(result.name) if failed_results: ready_for_merge_description = f"failed: {', '.join(failed_results)}" @@ -362,9 +362,7 @@ def _finish_workflow(workflow, job_name): unlock=False, ) # no lock - no unlock - Result.from_fs(job_name).set_status(Result.Status.SUCCESS).set_info( - ready_for_merge_description - ) + Result.from_fs(job_name).set_status(Result.Status.SUCCESS) if __name__ == "__main__": diff --git a/ci/praktika/param.py b/ci/praktika/param.py deleted file mode 100644 index f5727198e0d..00000000000 --- a/ci/praktika/param.py +++ /dev/null @@ -1,8 +0,0 @@ -from praktika._environment import _Environment - - -# TODO: find better place and/or right storage for parameter -def get_param(): - env = _Environment.get() - assert env.PARAMETER - return env.PARAMETER diff --git a/ci/praktika/result.py b/ci/praktika/result.py index f473cf3ed05..842deacbcbd 100644 --- a/ci/praktika/result.py +++ b/ci/praktika/result.py @@ -26,10 +26,6 @@ class Result(MetaClasses.Serializable): files (List[str]): A list of file paths or names related to the result. links (List[str]): A list of URLs related to the result (e.g., links to reports or resources). info (str): Additional information about the result. Free-form text. - # TODO: rename - aux_links (List[str]): A list of auxiliary links that provide additional context for the result. - # TODO: remove - html_link (str): A direct link to an HTML representation of the result (e.g., a detailed report page). Inner Class: Status: Defines possible statuses for the task, such as "success", "failure", etc. @@ -51,8 +47,6 @@ class Result(MetaClasses.Serializable): files: List[str] = dataclasses.field(default_factory=list) links: List[str] = dataclasses.field(default_factory=list) info: str = "" - aux_links: List[str] = dataclasses.field(default_factory=list) - html_link: str = "" @staticmethod def create_from( diff --git a/ci/praktika/runner.py b/ci/praktika/runner.py index 5db1a89ce99..1ac8748d1c0 100644 --- a/ci/praktika/runner.py +++ b/ci/praktika/runner.py @@ -80,7 +80,6 @@ class Runner: print("Read GH Environment") env = _Environment.from_env() env.JOB_NAME = job.name - env.PARAMETER = job.parameter env.dump() print(env) @@ -128,7 +127,6 @@ class Runner: # re-set envs for local run env = _Environment.get() env.JOB_NAME = job.name - env.PARAMETER = job.parameter env.dump() if param: @@ -143,6 +141,7 @@ class Runner: job.run_in_docker.split("+")[1:], ) from_root = "root" in docker_settings + settings = [s for s in docker_settings if s.startswith("--")] if ":" in job.run_in_docker: docker_name, docker_tag = job.run_in_docker.split(":") print( @@ -154,9 +153,11 @@ class Runner: RunConfig.from_fs(workflow.name).digest_dockers[job.run_in_docker], ) docker = docker or f"{docker_name}:{docker_tag}" - cmd = f"docker run --rm --name praktika {'--user $(id -u):$(id -g)' if not from_root else ''} -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {docker} {job.command}" + cmd = f"docker run --rm --name praktika {'--user $(id -u):$(id -g)' if not from_root else ''} -e PYTHONPATH='{Settings.DOCKER_WD}:{Settings.DOCKER_WD}/ci' --volume ./:{Settings.DOCKER_WD} --volume {Settings.TEMP_DIR}:{Settings.TEMP_DIR} --workdir={Settings.DOCKER_WD} {' '.join(settings)} {docker} {job.command}" else: cmd = job.command + python_path = os.getenv("PYTHONPATH", ":") + os.environ["PYTHONPATH"] = f".:{python_path}" if param: print(f"Custom --param [{param}] will be passed to job's script") diff --git a/ci/praktika/utils.py b/ci/praktika/utils.py index b96c78e4fa7..62eb13b3e19 100644 --- a/ci/praktika/utils.py +++ b/ci/praktika/utils.py @@ -81,25 +81,26 @@ class MetaClasses: class ContextManager: @staticmethod @contextmanager - def cd(to: Optional[Union[Path, str]] = None) -> Iterator[None]: + def cd(to: Optional[Union[Path, str]]) -> Iterator[None]: """ changes current working directory to @path or `git root` if @path is None :param to: :return: """ - if not to: - try: - to = Shell.get_output_or_raise("git rev-parse --show-toplevel") - except: - pass - if not to: - if Path(_Settings.DOCKER_WD).is_dir(): - to = _Settings.DOCKER_WD - if not to: - assert False, "FIX IT" - assert to + # if not to: + # try: + # to = Shell.get_output_or_raise("git rev-parse --show-toplevel") + # except: + # pass + # if not to: + # if Path(_Settings.DOCKER_WD).is_dir(): + # to = _Settings.DOCKER_WD + # if not to: + # assert False, "FIX IT" + # assert to old_pwd = os.getcwd() - os.chdir(to) + if to: + os.chdir(to) try: yield finally: diff --git a/ci/praktika/validator.py b/ci/praktika/validator.py index 29edc0a27ed..d612881b819 100644 --- a/ci/praktika/validator.py +++ b/ci/praktika/validator.py @@ -119,61 +119,58 @@ class Validator: def validate_file_paths_in_run_command(cls, workflow: Workflow.Config) -> None: if not Settings.VALIDATE_FILE_PATHS: return - with ContextManager.cd(): - for job in workflow.jobs: - run_command = job.command - command_parts = run_command.split(" ") - for part in command_parts: - if ">" in part: - return - if "/" in part: - assert ( - Path(part).is_file() or Path(part).is_dir() - ), f"Apparently run command [{run_command}] for job [{job}] has invalid path [{part}]. Setting to disable check: VALIDATE_FILE_PATHS" + for job in workflow.jobs: + run_command = job.command + command_parts = run_command.split(" ") + for part in command_parts: + if ">" in part: + return + if "/" in part: + assert ( + Path(part).is_file() or Path(part).is_dir() + ), f"Apparently run command [{run_command}] for job [{job}] has invalid path [{part}]. Setting to disable check: VALIDATE_FILE_PATHS" @classmethod def validate_file_paths_in_digest_configs(cls, workflow: Workflow.Config) -> None: if not Settings.VALIDATE_FILE_PATHS: return - with ContextManager.cd(): - for job in workflow.jobs: - if not job.digest_config: - continue - for include_path in chain( - job.digest_config.include_paths, job.digest_config.exclude_paths - ): - if "*" in include_path: - assert glob.glob( - include_path, recursive=True - ), f"Apparently file glob [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS" - else: - assert ( - Path(include_path).is_file() or Path(include_path).is_dir() - ), f"Apparently file path [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS" + for job in workflow.jobs: + if not job.digest_config: + continue + for include_path in chain( + job.digest_config.include_paths, job.digest_config.exclude_paths + ): + if "*" in include_path: + assert glob.glob( + include_path, recursive=True + ), f"Apparently file glob [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS" + else: + assert ( + Path(include_path).is_file() or Path(include_path).is_dir() + ), f"Apparently file path [{include_path}] in job [{job.name}] digest_config [{job.digest_config}] invalid, workflow [{workflow.name}]. Setting to disable check: VALIDATE_FILE_PATHS" @classmethod def validate_requirements_txt_files(cls, workflow: Workflow.Config) -> None: - with ContextManager.cd(): - for job in workflow.jobs: - if job.job_requirements: - if job.job_requirements.python_requirements_txt: - path = Path(job.job_requirements.python_requirements_txt) - message = f"File with py requirement [{path}] does not exist" - if job.name in ( - Settings.DOCKER_BUILD_JOB_NAME, - Settings.CI_CONFIG_JOB_NAME, - Settings.FINISH_WORKFLOW_JOB_NAME, - ): - message += '\n If all requirements already installed on your runners - add setting INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS""' - message += "\n If requirements needs to be installed - add requirements file (Settings.INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS):" - message += "\n echo jwt==1.3.1 > ./ci/requirements.txt" - message += ( - "\n echo requests==2.32.3 >> ./ci/requirements.txt" - ) - message += "\n echo https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl >> ./ci/requirements.txt" - cls.evaluate_check( - path.is_file(), message, job.name, workflow.name + for job in workflow.jobs: + if job.job_requirements: + if job.job_requirements.python_requirements_txt: + path = Path(job.job_requirements.python_requirements_txt) + message = f"File with py requirement [{path}] does not exist" + if job.name in ( + Settings.DOCKER_BUILD_JOB_NAME, + Settings.CI_CONFIG_JOB_NAME, + Settings.FINISH_WORKFLOW_JOB_NAME, + ): + message += '\n If all requirements already installed on your runners - add setting INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS""' + message += "\n If requirements needs to be installed - add requirements file (Settings.INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS):" + message += "\n echo jwt==1.3.1 > ./ci/requirements.txt" + message += ( + "\n echo requests==2.32.3 >> ./ci/requirements.txt" ) + message += "\n echo https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl >> ./ci/requirements.txt" + cls.evaluate_check( + path.is_file(), message, job.name, workflow.name + ) @classmethod def validate_dockers(cls, workflow: Workflow.Config): diff --git a/ci/praktika/yaml_generator.py b/ci/praktika/yaml_generator.py index 1422a835663..f56715755e8 100644 --- a/ci/praktika/yaml_generator.py +++ b/ci/praktika/yaml_generator.py @@ -81,8 +81,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 with: - clear-repository: true - ref: ${{{{ github.event.pull_reguest.head.sha }}}} + ref: ${{{{ github.head_ref }}}} {JOB_ADDONS} - name: Prepare env script run: | @@ -191,12 +190,10 @@ jobs: False ), f"Workflow event not yet supported [{workflow_config.event}]" - with ContextManager.cd(): - with open(self._get_workflow_file_name(workflow_config.name), "w") as f: - f.write(yaml_workflow_str) + with open(self._get_workflow_file_name(workflow_config.name), "w") as f: + f.write(yaml_workflow_str) - with ContextManager.cd(): - Shell.check("git add ./.github/workflows/*.yaml") + Shell.check("git add ./.github/workflows/*.yaml") class PullRequestPushYamlGen: diff --git a/ci/settings/definitions.py b/ci/settings/definitions.py index 33173756924..99fec8b5402 100644 --- a/ci/settings/definitions.py +++ b/ci/settings/definitions.py @@ -8,7 +8,7 @@ class RunnerLabels: CI_SERVICES = "ci_services" CI_SERVICES_EBS = "ci_services_ebs" BUILDER = "builder" - STYLE_CHECKER = "style-checker" + FUNC_TESTER_AMD = "func-tester" BASE_BRANCH = "master" @@ -238,5 +238,4 @@ class JobNames: STYLE_CHECK = "Style Check" FAST_TEST = "Fast test" BUILD = "Build" - BUILD_AMD_DEBUG = "Build (amd, debug)" - STATELESS_TESTS = "Stateless tests (amd, debug)" + STATELESS = "Stateless tests" diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index 10dd77a0414..0d505ae27c4 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -41,8 +41,9 @@ fast_test_job = Job.Config( amd_build_jobs = Job.Config( name=JobNames.BUILD, runs_on=[RunnerLabels.BUILDER], - command="python3 ./ci/jobs/build_clickhouse.py", + command="python3 ./ci/jobs/build_clickhouse.py --build-type {PARAMETER}", run_in_docker="clickhouse/fasttest", + timeout=3600 * 2, digest_config=Job.CacheDigestConfig( include_paths=[ "./src", @@ -55,6 +56,7 @@ amd_build_jobs = Job.Config( "./docker/packager/packager", "./rust", "./tests/ci/version_helper.py", + "./ci/jobs/build_clickhouse.py", ], ), ).parametrize( @@ -62,27 +64,53 @@ amd_build_jobs = Job.Config( provides=[[ArtifactNames.CH_AMD_DEBUG], [ArtifactNames.CH_AMD_RELEASE]], ) -statless_batch_num = 2 -stateless_tests_amd_debug_jobs = Job.Config( - name=JobNames.STATELESS_TESTS, +stateless_tests_jobs = Job.Config( + name=JobNames.STATELESS, runs_on=[RunnerLabels.BUILDER], - command="python3 ./ci/jobs/functional_stateless_tests.py amd_debug", - run_in_docker="clickhouse/stateless-test", + command="python3 ./ci/jobs/functional_stateless_tests.py --test-options {PARAMETER}", + run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined+--volume=/tmp/praktika:/var/lib/clickhouse", digest_config=Job.CacheDigestConfig( include_paths=[ "./ci/jobs/functional_stateless_tests.py", ], ), - requires=[ArtifactNames.CH_AMD_DEBUG], ).parametrize( parameter=[ - f"parallel {i+1}/{statless_batch_num}" for i in range(statless_batch_num) - ] - + ["non-parallel"], - runs_on=[[RunnerLabels.BUILDER] for _ in range(statless_batch_num)] - + [[RunnerLabels.STYLE_CHECKER]], + "amd_debug,parallel", + "amd_debug,non-parallel", + "amd_release,parallel", + "amd_release,non-parallel", + ], + runs_on=[ + [RunnerLabels.BUILDER], + [RunnerLabels.FUNC_TESTER_AMD], + [RunnerLabels.BUILDER], + [RunnerLabels.FUNC_TESTER_AMD], + ], + requires=[ + [ArtifactNames.CH_AMD_DEBUG], + [ArtifactNames.CH_AMD_DEBUG], + [ArtifactNames.CH_AMD_RELEASE], + [ArtifactNames.CH_AMD_RELEASE], + ], ) +# stateless_tests_amd_release_jobs = Job.Config( +# name=JobNames.STATELESS_AMD_RELEASE, +# runs_on=[RunnerLabels.BUILDER], +# command="python3 ./ci/jobs/functional_stateless_tests.py --test-options {PARAMETER}", +# run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined+--volume=/tmp/praktika:/var/lib/clickhouse", +# digest_config=Job.CacheDigestConfig( +# include_paths=[ +# "./ci/jobs/functional_stateless_tests.py", +# ], +# ), +# requires=[ArtifactNames.CH_AMD_RELEASE], +# ).parametrize( +# parameter=["parallel", "non-parallel"], +# runs_on=[[RunnerLabels.BUILDER], [RunnerLabels.FUNC_TESTER_AMD]], +# ) + workflow = Workflow.Config( name="PR", event=Workflow.Event.PULL_REQUEST, @@ -91,7 +119,7 @@ workflow = Workflow.Config( style_check_job, fast_test_job, *amd_build_jobs, - *stateless_tests_amd_debug_jobs, + *stateless_tests_jobs, ], artifacts=[ Artifact.Config( diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 3396b10814a..a0ec080ed75 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -2619,14 +2619,14 @@ def run_tests_process(*args, **kwargs): def do_run_tests(jobs, test_suite: TestSuite): - if jobs > 1 and len(test_suite.parallel_tests) > 0: - print( - "Found", - len(test_suite.parallel_tests), - "parallel tests and", - len(test_suite.sequential_tests), - "sequential tests", - ) + print( + "Found", + len(test_suite.parallel_tests), + "parallel tests and", + len(test_suite.sequential_tests), + "sequential tests", + ) + if test_suite.parallel_tests: tests_n = len(test_suite.parallel_tests) jobs = min(jobs, tests_n) @@ -2639,6 +2639,7 @@ def do_run_tests(jobs, test_suite: TestSuite): # It makes it more difficult to detect real flaky tests, # because the distribution and the amount # of failures will be nearly the same for all tests from the group. + # TODO: add shuffle for sequential tests random.shuffle(test_suite.parallel_tests) batch_size = len(test_suite.parallel_tests) // jobs @@ -2684,6 +2685,7 @@ def do_run_tests(jobs, test_suite: TestSuite): if not p.is_alive(): processes.remove(p) + if test_suite.sequential_tests: run_tests_array( ( test_suite.sequential_tests, @@ -2693,17 +2695,7 @@ def do_run_tests(jobs, test_suite: TestSuite): ) ) - return len(test_suite.sequential_tests) + len(test_suite.parallel_tests) - num_tests = len(test_suite.all_tests) - run_tests_array( - ( - test_suite.all_tests, - num_tests, - test_suite, - False, - ) - ) - return num_tests + return len(test_suite.sequential_tests) + len(test_suite.parallel_tests) def is_test_from_dir(suite_dir, case): diff --git a/tests/config/install.sh b/tests/config/install.sh index cdae5741fce..9630977b9c1 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -200,7 +200,7 @@ elif [[ "$USE_AZURE_STORAGE_FOR_MERGE_TREE" == "1" ]]; then fi if [[ "$EXPORT_S3_STORAGE_POLICIES" == "1" ]] || [[ "$S3_STORAGE" = "1" ]]; then - ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/ + #ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf_02944.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf_02963.xml $DEST_SERVER_PATH/config.d/ diff --git a/tests/docker_scripts/setup_minio.sh b/tests/docker_scripts/setup_minio.sh index 837c05a9c5d..88839c39674 100755 --- a/tests/docker_scripts/setup_minio.sh +++ b/tests/docker_scripts/setup_minio.sh @@ -4,8 +4,10 @@ set -euxf -o pipefail export MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse} export MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse} +TEST_DIR=${2:-/repo/tests/} if [ -d "$TEMP_DIR" ]; then + TEST_DIR=$(readlink -f $TEST_DIR) cd "$TEMP_DIR" # add / for minio mc in docker PATH="/:.:$PATH" @@ -79,7 +81,7 @@ start_minio() { pwd mkdir -p ./minio_data minio --version - minio server --address ":11111" ./minio_data & + nohup minio server --address ":11111" ./minio_data & wait_for_it lsof -i :11111 sleep 5 @@ -153,7 +155,7 @@ main() { fi start_minio setup_minio "$1" - upload_data "${query_dir}" "${2:-/repo/tests/}" + upload_data "${query_dir}" "$TEST_DIR" setup_aws_credentials } From a8d07555d4d01e1f261dd5c6c6f003a5581c2339 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 8 Nov 2024 12:31:22 +0000 Subject: [PATCH 43/74] Update 02967_parallel_replicas_joins_and_analyzer EXPLAIN with RIGHT JOIN changed --- ...llel_replicas_joins_and_analyzer.reference | 99 +++++++------------ 1 file changed, 35 insertions(+), 64 deletions(-) diff --git a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference index 93003b6cf6d..1269f792e76 100644 --- a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference +++ b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference @@ -266,24 +266,13 @@ Expression Join Expression Join - Union - Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union - Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union - Expression Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + ReadFromMemoryStorage + Expression + Expression + ReadFromMergeTree + Expression + ReadFromMemoryStorage -- -- RIGHT JOIN in sub5: sub5 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -317,27 +306,19 @@ select * from sub5 order by x SETTINGS enable_parallel_replicas = 2, max_paralle Expression Sorting Expression - Join - Union - Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + Sorting Expression Join - Union - Expression + Expression + ReadFromMemoryStorage + Expression + Join Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union - Expression + Expression + ReadFromMergeTree Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + Expression + ReadFromMergeTree -- -- Subqueries for IN allowed with sub1 as (select x, y from tab1 where x in (select number from numbers(16) where number != 2)), @@ -722,28 +703,22 @@ sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; -Expression - Join - Expression - Join - Union +Union + Expression + Join + Expression + Join Expression Expression ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union Expression Expression ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Union Expression Expression ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + Expression + ReadFromRemoteParallelReplicas -- -- RIGHT JOIN in sub5: sub5 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -776,28 +751,24 @@ sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll. select * from sub5 order by x SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting - Expression - Join - Union + Union + Expression + Sorting Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas - Expression - Join - Union + Join Expression Expression ReadFromMergeTree Expression - ReadFromRemoteParallelReplicas - Union - Expression - Expression - ReadFromMergeTree - Expression - ReadFromRemoteParallelReplicas + Join + Expression + Expression + ReadFromMergeTree + Expression + Expression + ReadFromMergeTree + Expression + ReadFromRemoteParallelReplicas -- -- Subqueries for IN allowed with sub1 as (select x, y from tab1 where x in (select number from numbers(16) where number != 2)), From 1561a0115fa740c746ccb054552de3ad751e12ae Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 8 Nov 2024 14:30:02 +0000 Subject: [PATCH 44/74] Fix test, set min_bytes_to_use_direct_io expicitly --- tests/queries/0_stateless/03254_pr_join_on_dups.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/03254_pr_join_on_dups.sql b/tests/queries/0_stateless/03254_pr_join_on_dups.sql index 166910d496f..aca4fc6b6c3 100644 --- a/tests/queries/0_stateless/03254_pr_join_on_dups.sql +++ b/tests/queries/0_stateless/03254_pr_join_on_dups.sql @@ -1,6 +1,8 @@ drop table if exists X sync; drop table if exists Y sync; +set min_bytes_to_use_direct_io = 0; -- min_bytes_to_use_direct_io > 0 is broken and leads to unexpected results, https://github.com/ClickHouse/ClickHouse/issues/65690 + create table X (id Int32, x_a String, x_b Nullable(Int32)) engine ReplicatedMergeTree('/clickhouse/{database}/X', '1') order by tuple(); create table Y (id Int32, y_a String, y_b Nullable(String)) engine ReplicatedMergeTree('/clickhouse/{database}/Y', '1') order by tuple(); @@ -10,7 +12,6 @@ insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; -set min_bytes_to_use_direct_io = 0; -- min_bytes_to_use_direct_io > 0 is broken and leads to unexpected results, https://github.com/ClickHouse/ClickHouse/issues/65690 select 'inner'; select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; From 37c24838693e573428414016a619fa70de61823a Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 8 Nov 2024 17:09:23 +0000 Subject: [PATCH 45/74] Do not randomize min_bytes_to_use_direct_io --- tests/clickhouse-test | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 9c035b7cc35..a1ffcc2030f 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -821,9 +821,10 @@ class SettingsRandomizer: "optimize_aggregation_in_order": lambda: random.randint(0, 1), "aggregation_in_order_max_block_bytes": lambda: random.randint(0, 50000000), "use_uncompressed_cache": lambda: random.randint(0, 1), - "min_bytes_to_use_direct_io": threshold_generator( - 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 - ), + # see https://github.com/ClickHouse/ClickHouse/issues/65690 + # "min_bytes_to_use_direct_io": threshold_generator( + # 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 + # ), "min_bytes_to_use_mmap_io": threshold_generator( 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 ), From 9baa5911f9183e1652593b5d362545377baeea2a Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Sun, 10 Nov 2024 20:54:59 +0000 Subject: [PATCH 46/74] Debugging stack with PR queries --- src/Planner/findParallelReplicasQuery.cpp | 57 +++++++++++++++++------ 1 file changed, 42 insertions(+), 15 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 28e2dd8a0ea..fbe2993b7c6 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -23,6 +23,8 @@ #include #include +#include + namespace DB { namespace Setting @@ -38,12 +40,12 @@ namespace ErrorCodes /// Returns a list of (sub)queries (candidates) which may support parallel replicas. /// The rule is : -/// subquery has only LEFT or ALL INNER JOIN (or none), and left part is MergeTree table or subquery candidate as well. +/// subquery has only LEFT / RIGHT / ALL INNER JOIN (or none), and left / right part is MergeTree table or subquery candidate as well. /// /// Additional checks are required, so we return many candidates. The innermost subquery is on top. -std::stack getSupportingParallelReplicasQuery(const IQueryTreeNode * query_tree_node) +std::vector getSupportingParallelReplicasQuery(const IQueryTreeNode * query_tree_node) { - std::stack res; + std::vector res; while (query_tree_node) { @@ -75,7 +77,7 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre { const auto & query_node_to_process = query_tree_node->as(); query_tree_node = query_node_to_process.getJoinTree().get(); - res.push(&query_node_to_process); + res.push_back(&query_node_to_process); break; } case QueryTreeNodeType::UNION: @@ -162,14 +164,25 @@ QueryTreeNodePtr replaceTablesWithDummyTables(QueryTreeNodePtr query, const Cont return query->cloneAndReplace(visitor.replacement_map); } +static void dumpStack(const std::vector & stack) +{ + std::ranges::reverse_view rv{stack}; + for (const auto * node : rv) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "{}\n{}", CityHash_v1_0_2::Hash128to64(node->getTreeHash()), node->dumpTree()); +} + /// Find the best candidate for parallel replicas execution by verifying query plan. -/// If query plan has only Expression, Filter of Join steps, we can execute it fully remotely and check the next query. +/// If query plan has only Expression, Filter or Join steps, we can execute it fully remotely and check the next query. /// Otherwise we can execute current query up to WithMergableStage only. const QueryNode * findQueryForParallelReplicas( - std::stack stack, + std::vector stack, const std::unordered_map & mapping, const Settings & settings) { + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "{}", StackTrace().toString()); + + dumpStack(stack); + struct Frame { const QueryPlan::Node * node = nullptr; @@ -188,14 +201,20 @@ const QueryNode * findQueryForParallelReplicas( while (!stack.empty()) { - const QueryNode * const subquery_node = stack.top(); - stack.pop(); + const QueryNode * const subquery_node = stack.back(); + stack.pop_back(); auto it = mapping.find(subquery_node); /// This should not happen ideally. if (it == mapping.end()) break; + LOG_DEBUG( + getLogger(__PRETTY_FUNCTION__), + "{} : {}", + CityHash_v1_0_2::Hash128to64(it->first->getTreeHash()), + it->second->step->getName()); + std::stack nodes_to_check; nodes_to_check.push({.node = it->second, .inside_join = false}); bool can_distribute_full_node = true; @@ -208,6 +227,8 @@ const QueryNode * findQueryForParallelReplicas( const auto & children = next_node_to_check->children; auto * step = next_node_to_check->step.get(); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Step {} childrens={}", step->getName(), children.size()); + if (children.empty()) { /// Found a source step. @@ -235,7 +256,7 @@ const QueryNode * findQueryForParallelReplicas( else { const auto * join = typeid_cast(step); - /// We've checked that JOIN is INNER/LEFT in query tree. + /// We've checked that JOIN is INNER/LEFT/RIGHT on query tree level before. /// Don't distribute UNION node. if (!join) return res; @@ -286,7 +307,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr return nullptr; /// We don't have any subquery and storage can process parallel replicas by itself. - if (stack.top() == query_tree_node.get()) + if (stack.back() == query_tree_node.get()) return nullptr; /// This is needed to avoid infinite recursion. @@ -309,18 +330,24 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr const auto & mapping = planner.getQueryNodeToPlanStepMapping(); const auto * res = findQueryForParallelReplicas(new_stack, mapping, context->getSettingsRef()); - /// Now, return a query from initial stack. if (res) { + // find query in initial stack while (!new_stack.empty()) { - if (res == new_stack.top()) - return stack.top(); + if (res == new_stack.back()) + { + res = stack.back(); + break; + } - stack.pop(); - new_stack.pop(); + stack.pop_back(); + new_stack.pop_back(); } } + + if (res) + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Choosen query: {}", res->dumpTree()); return res; } From 39e01d47b1892b2049d18fc19803949d1bfcda51 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 11 Nov 2024 11:54:04 +0000 Subject: [PATCH 47/74] Fix style check --- src/Planner/findParallelReplicasQuery.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index fbe2993b7c6..494326c0ed0 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -227,7 +227,7 @@ const QueryNode * findQueryForParallelReplicas( const auto & children = next_node_to_check->children; auto * step = next_node_to_check->step.get(); - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Step {} childrens={}", step->getName(), children.size()); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Step {} children={}", step->getName(), children.size()); if (children.empty()) { @@ -347,7 +347,7 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr } if (res) - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Choosen query: {}", res->dumpTree()); + LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Chosen query: {}", res->dumpTree()); return res; } From 06debdc479bab58f2d1d7fd4b3764e65a8c9fa01 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Fri, 8 Nov 2024 17:48:17 +0100 Subject: [PATCH 48/74] result with versioning --- ci/docker/stateless-test/Dockerfile | 6 +- ci/jobs/build_clickhouse.py | 1 - ci/jobs/functional_stateless_tests.py | 11 +- ci/jobs/scripts/clickhouse_proc.py | 11 + .../setup_hdfs_minicluster.sh | 19 ++ ci/praktika/__main__.py | 7 + ci/praktika/_environment.py | 14 +- ci/praktika/_settings.py | 128 ---------- ci/praktika/digest.py | 38 +-- ci/praktika/hook_cache.py | 11 +- ci/praktika/hook_html.py | 71 ++---- ci/praktika/json.html | 11 +- ci/praktika/mangle.py | 36 +-- ci/praktika/native_jobs.py | 14 +- ci/praktika/result.py | 240 +++++++++++++++++- ci/praktika/runner.py | 18 +- ci/praktika/runtime.py | 6 + ci/praktika/s3.py | 172 ++----------- ci/praktika/settings.py | 156 +++++++++++- ci/praktika/utils.py | 2 - ci/praktika/validator.py | 8 +- ci/workflows/pull_request.py | 1 + 22 files changed, 551 insertions(+), 430 deletions(-) create mode 100755 ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh delete mode 100644 ci/praktika/_settings.py diff --git a/ci/docker/stateless-test/Dockerfile b/ci/docker/stateless-test/Dockerfile index 4abd8204f1d..760fceeebbf 100644 --- a/ci/docker/stateless-test/Dockerfile +++ b/ci/docker/stateless-test/Dockerfile @@ -100,8 +100,12 @@ ENV PATH="/wd/tests:/tmp/praktika/input:$PATH" RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \ && tar -xvf hadoop-3.3.1.tar.gz \ - && rm -rf hadoop-3.3.1.tar.gz + && rm -rf hadoop-3.3.1.tar.gz \ + && chmod 777 /hadoop-3.3.1 RUN npm install -g azurite@3.30.0 \ && npm install -g tslib && npm install -g node + +RUN addgroup --gid 1001 clickhouse && adduser --uid 1001 --gid 1001 --disabled-password clickhouse +USER clickhouse \ No newline at end of file diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index 1e6d2c648a7..3bdc23d383c 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -127,7 +127,6 @@ def main(): Shell.check(f"ls -l {build_dir}/programs/") res = results[-1].is_ok() - Result.create_from(results=results, stopwatch=stop_watch).complete_job() diff --git a/ci/jobs/functional_stateless_tests.py b/ci/jobs/functional_stateless_tests.py index 0481086d80a..390a6336b45 100644 --- a/ci/jobs/functional_stateless_tests.py +++ b/ci/jobs/functional_stateless_tests.py @@ -27,11 +27,12 @@ def parse_args(): default="", ) parser.add_argument("--param", help="Optional job start stage", default=None) + parser.add_argument("--test", help="Optional test name pattern", default="") return parser.parse_args() def run_stateless_test( - no_parallel: bool, no_sequiential: bool, batch_num: int, batch_total: int + no_parallel: bool, no_sequiential: bool, batch_num: int, batch_total: int, test="" ): assert not (no_parallel and no_sequiential) test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt" @@ -43,7 +44,7 @@ def run_stateless_test( --no-drop-if-fail --capture-client-stacktrace --queries /repo/tests/queries --test-runs 1 --hung-check \ {'--no-parallel' if no_parallel else ''} {'--no-sequential' if no_sequiential else ''} \ --print-time --jobs {nproc} --report-coverage --report-logs-stats {aux} \ - --queries ./tests/queries -- '' | ts '%Y-%m-%d %H:%M:%S' \ + --queries ./tests/queries -- '{test}' | ts '%Y-%m-%d %H:%M:%S' \ | tee -a \"{test_output_file}\"" if Path(test_output_file).exists(): Path(test_output_file).unlink() @@ -119,11 +120,14 @@ def main(): stop_watch_ = Utils.Stopwatch() step_name = "Start ClickHouse Server" print(step_name) + hdfs_log = "/tmp/praktika/output/hdfs_mini.log" minio_log = "/tmp/praktika/output/minio.log" + res = res and CH.start_hdfs(log_file_path=hdfs_log) res = res and CH.start_minio(log_file_path=minio_log) - logs_to_attach += [minio_log] + logs_to_attach += [minio_log, hdfs_log] time.sleep(10) Shell.check("ps -ef | grep minio", verbose=True) + Shell.check("ps -ef | grep hdfs", verbose=True) res = res and Shell.check( "aws s3 ls s3://test --endpoint-url http://localhost:11111/", verbose=True ) @@ -153,6 +157,7 @@ def main(): no_sequiential=no_sequential, batch_num=batch_num, batch_total=total_batches, + test=args.test, ) results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run()) results[-1].set_timing(stopwatch=stop_watch_) diff --git a/ci/jobs/scripts/clickhouse_proc.py b/ci/jobs/scripts/clickhouse_proc.py index c43283e75e0..8f9bef57083 100644 --- a/ci/jobs/scripts/clickhouse_proc.py +++ b/ci/jobs/scripts/clickhouse_proc.py @@ -44,6 +44,17 @@ class ClickHouseProc: self.minio_proc = None + def start_hdfs(self, log_file_path): + command = ["./ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh"] + with open(log_file_path, "w") as log_file: + process = subprocess.Popen( + command, stdout=log_file, stderr=subprocess.STDOUT + ) + print( + f"Started setup_hdfs_minicluster.sh asynchronously with PID {process.pid}" + ) + return True + def start_minio(self, log_file_path): command = ["tests/docker_scripts/setup_minio.sh", "stateless", "./tests"] with open(log_file_path, "w") as log_file: diff --git a/ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh b/ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh new file mode 100755 index 00000000000..b810b27fe2b --- /dev/null +++ b/ci/jobs/scripts/functional_tests/setup_hdfs_minicluster.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# shellcheck disable=SC2024 + +set -e -x -a -u + +ls -lha + +cd /hadoop-3.3.1 + +export JAVA_HOME=/usr +mkdir -p target/test/data + +bin/mapred minicluster -format -nomr -nnport 12222 & + +while ! nc -z localhost 12222; do + sleep 1 +done + +lsof -i :12222 diff --git a/ci/praktika/__main__.py b/ci/praktika/__main__.py index fbb9f92909a..3dfdc26d69d 100644 --- a/ci/praktika/__main__.py +++ b/ci/praktika/__main__.py @@ -37,6 +37,12 @@ def create_parser(): type=str, default=None, ) + run_parser.add_argument( + "--test", + help="Custom parameter to pass into a job script, it's up to job script how to use it, for local test", + type=str, + default="", + ) run_parser.add_argument( "--pr", help="PR number. Optional parameter for local run. Set if you want an required artifact to be uploaded from CI run in that PR", @@ -106,6 +112,7 @@ if __name__ == "__main__": local_run=not args.ci, no_docker=args.no_docker, param=args.param, + test=args.test, pr=args.pr, branch=args.branch, sha=args.sha, diff --git a/ci/praktika/_environment.py b/ci/praktika/_environment.py index 1c6b547ddde..734a4be3176 100644 --- a/ci/praktika/_environment.py +++ b/ci/praktika/_environment.py @@ -6,7 +6,7 @@ from types import SimpleNamespace from typing import Any, Dict, List, Type from praktika import Workflow -from praktika._settings import _Settings +from praktika.settings import Settings from praktika.utils import MetaClasses, T @@ -35,7 +35,7 @@ class _Environment(MetaClasses.Serializable): @classmethod def file_name_static(cls, _name=""): - return f"{_Settings.TEMP_DIR}/{cls.name}.json" + return f"{Settings.TEMP_DIR}/{cls.name}.json" @classmethod def from_dict(cls: Type[T], obj: Dict[str, Any]) -> T: @@ -66,12 +66,12 @@ class _Environment(MetaClasses.Serializable): @staticmethod def get_needs_statuses(): - if Path(_Settings.WORKFLOW_STATUS_FILE).is_file(): - with open(_Settings.WORKFLOW_STATUS_FILE, "r", encoding="utf8") as f: + if Path(Settings.WORKFLOW_STATUS_FILE).is_file(): + with open(Settings.WORKFLOW_STATUS_FILE, "r", encoding="utf8") as f: return json.load(f) else: print( - f"ERROR: Status file [{_Settings.WORKFLOW_STATUS_FILE}] does not exist" + f"ERROR: Status file [{Settings.WORKFLOW_STATUS_FILE}] does not exist" ) raise RuntimeError() @@ -171,7 +171,7 @@ class _Environment(MetaClasses.Serializable): # TODO: find a better place for the function. This file should not import praktika.settings # as it's requires reading users config, that's why imports nested inside the function - def get_report_url(self, settings): + def get_report_url(self, settings, latest=False): import urllib path = settings.HTML_S3_PATH @@ -179,7 +179,7 @@ class _Environment(MetaClasses.Serializable): if bucket in path: path = path.replace(bucket, endpoint) break - REPORT_URL = f"https://{path}/{Path(settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}" + REPORT_URL = f"https://{path}/{Path(settings.HTML_PAGE_FILE).name}?PR={self.PR_NUMBER}&sha={'latest' if latest else self.SHA}&name_0={urllib.parse.quote(self.WORKFLOW_NAME, safe='')}&name_1={urllib.parse.quote(self.JOB_NAME, safe='')}" return REPORT_URL def is_local_run(self): diff --git a/ci/praktika/_settings.py b/ci/praktika/_settings.py deleted file mode 100644 index 17da1519e37..00000000000 --- a/ci/praktika/_settings.py +++ /dev/null @@ -1,128 +0,0 @@ -import dataclasses -from typing import Dict, Iterable, List, Optional - - -@dataclasses.dataclass -class _Settings: - ###################################### - # Pipeline generation settings # - ###################################### - MAIN_BRANCH = "main" - CI_PATH = "./ci" - WORKFLOW_PATH_PREFIX: str = "./.github/workflows" - WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows" - SETTINGS_DIRECTORY: str = f"{CI_PATH}/settings" - CI_CONFIG_JOB_NAME = "Config Workflow" - DOCKER_BUILD_JOB_NAME = "Docker Builds" - FINISH_WORKFLOW_JOB_NAME = "Finish Workflow" - READY_FOR_MERGE_STATUS_NAME = "Ready for Merge" - CI_CONFIG_RUNS_ON: Optional[List[str]] = None - DOCKER_BUILD_RUNS_ON: Optional[List[str]] = None - VALIDATE_FILE_PATHS: bool = True - - ###################################### - # Runtime Settings # - ###################################### - MAX_RETRIES_S3 = 3 - MAX_RETRIES_GH = 3 - - ###################################### - # S3 (artifact storage) settings # - ###################################### - S3_ARTIFACT_PATH: str = "" - - ###################################### - # CI workspace settings # - ###################################### - TEMP_DIR: str = "/tmp/praktika" - OUTPUT_DIR: str = f"{TEMP_DIR}/output" - INPUT_DIR: str = f"{TEMP_DIR}/input" - PYTHON_INTERPRETER: str = "python3" - PYTHON_PACKET_MANAGER: str = "pip3" - PYTHON_VERSION: str = "3.9" - INSTALL_PYTHON_FOR_NATIVE_JOBS: bool = False - INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS: str = "./ci/requirements.txt" - ENVIRONMENT_VAR_FILE: str = f"{TEMP_DIR}/environment.json" - RUN_LOG: str = f"{TEMP_DIR}/praktika_run.log" - - SECRET_GH_APP_ID: str = "GH_APP_ID" - SECRET_GH_APP_PEM_KEY: str = "GH_APP_PEM_KEY" - - ENV_SETUP_SCRIPT: str = "/tmp/praktika_setup_env.sh" - WORKFLOW_STATUS_FILE: str = f"{TEMP_DIR}/workflow_status.json" - - ###################################### - # CI Cache settings # - ###################################### - CACHE_VERSION: int = 1 - CACHE_DIGEST_LEN: int = 20 - CACHE_S3_PATH: str = "" - CACHE_LOCAL_PATH: str = f"{TEMP_DIR}/ci_cache" - - ###################################### - # Report settings # - ###################################### - HTML_S3_PATH: str = "" - HTML_PAGE_FILE: str = "./praktika/json.html" - TEXT_CONTENT_EXTENSIONS: Iterable[str] = frozenset([".txt", ".log"]) - S3_BUCKET_TO_HTTP_ENDPOINT: Optional[Dict[str, str]] = None - - DOCKERHUB_USERNAME: str = "" - DOCKERHUB_SECRET: str = "" - DOCKER_WD: str = "/wd" - - ###################################### - # CI DB Settings # - ###################################### - SECRET_CI_DB_URL: str = "CI_DB_URL" - SECRET_CI_DB_PASSWORD: str = "CI_DB_PASSWORD" - CI_DB_DB_NAME = "" - CI_DB_TABLE_NAME = "" - CI_DB_INSERT_TIMEOUT_SEC = 5 - - DISABLE_MERGE_COMMIT = True - - -_USER_DEFINED_SETTINGS = [ - "S3_ARTIFACT_PATH", - "CACHE_S3_PATH", - "HTML_S3_PATH", - "S3_BUCKET_TO_HTTP_ENDPOINT", - "TEXT_CONTENT_EXTENSIONS", - "TEMP_DIR", - "OUTPUT_DIR", - "INPUT_DIR", - "CI_CONFIG_RUNS_ON", - "DOCKER_BUILD_RUNS_ON", - "CI_CONFIG_JOB_NAME", - "PYTHON_INTERPRETER", - "PYTHON_VERSION", - "PYTHON_PACKET_MANAGER", - "INSTALL_PYTHON_FOR_NATIVE_JOBS", - "INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS", - "MAX_RETRIES_S3", - "MAX_RETRIES_GH", - "VALIDATE_FILE_PATHS", - "DOCKERHUB_USERNAME", - "DOCKERHUB_SECRET", - "READY_FOR_MERGE_STATUS_NAME", - "SECRET_CI_DB_URL", - "SECRET_CI_DB_PASSWORD", - "CI_DB_DB_NAME", - "CI_DB_TABLE_NAME", - "CI_DB_INSERT_TIMEOUT_SEC", - "SECRET_GH_APP_PEM_KEY", - "SECRET_GH_APP_ID", - "MAIN_BRANCH", - "DISABLE_MERGE_COMMIT", -] - - -class GHRunners: - ubuntu = "ubuntu-latest" - - -if __name__ == "__main__": - for setting in _USER_DEFINED_SETTINGS: - print(_Settings().__getattribute__(setting)) - # print(dataclasses.asdict(_Settings())) diff --git a/ci/praktika/digest.py b/ci/praktika/digest.py index a1f2eecf9b6..6b7e5eec07b 100644 --- a/ci/praktika/digest.py +++ b/ci/praktika/digest.py @@ -23,7 +23,7 @@ class Digest: hash_string = hash_obj.hexdigest() return hash_string - def calc_job_digest(self, job_config: Job.Config): + def calc_job_digest(self, job_config: Job.Config, docker_digests): config = job_config.digest_config if not config: return "f" * Settings.CACHE_DIGEST_LEN @@ -34,28 +34,28 @@ class Digest: print( f"calc digest for job [{job_config.name}]: hash_key [{cache_key}] - from cache" ) - return self.digest_cache[cache_key] - - included_files = Utils.traverse_paths( - job_config.digest_config.include_paths, - job_config.digest_config.exclude_paths, - sorted=True, - ) - print( - f"calc digest for job [{job_config.name}]: hash_key [{cache_key}], include [{len(included_files)}] files" - ) - - # Calculate MD5 hash - res = "" - if not included_files: - res = "f" * Settings.CACHE_DIGEST_LEN - print(f"NOTE: empty digest config [{config}] - return dummy digest") + digest = self.digest_cache[cache_key] else: + included_files = Utils.traverse_paths( + job_config.digest_config.include_paths, + job_config.digest_config.exclude_paths, + sorted=True, + ) + print( + f"calc digest for job [{job_config.name}]: hash_key [{cache_key}], include [{len(included_files)}] files" + ) + hash_md5 = hashlib.md5() for i, file_path in enumerate(included_files): hash_md5 = self._calc_file_digest(file_path, hash_md5) - digest = hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] - self.digest_cache[cache_key] = digest + digest = hash_md5.hexdigest()[: Settings.CACHE_DIGEST_LEN] + self.digest_cache[cache_key] = digest + + if job_config.run_in_docker: + # respect docker digest in the job digest + docker_digest = docker_digests[job_config.run_in_docker.split("+")[0]] + digest = "-".join([docker_digest, digest]) + return digest def calc_docker_digest( diff --git a/ci/praktika/hook_cache.py b/ci/praktika/hook_cache.py index 5cfedec0144..e001e936a71 100644 --- a/ci/praktika/hook_cache.py +++ b/ci/praktika/hook_cache.py @@ -1,6 +1,5 @@ from praktika._environment import _Environment from praktika.cache import Cache -from praktika.mangle import _get_workflows from praktika.runtime import RunConfig from praktika.settings import Settings from praktika.utils import Utils @@ -10,6 +9,7 @@ class CacheRunnerHooks: @classmethod def configure(cls, workflow): workflow_config = RunConfig.from_fs(workflow.name) + docker_digests = workflow_config.digest_dockers cache = Cache() print(f"Workflow Configure, workflow [{workflow.name}]") assert ( @@ -18,11 +18,13 @@ class CacheRunnerHooks: artifact_digest_map = {} job_digest_map = {} for job in workflow.jobs: + digest = cache.digest.calc_job_digest( + job_config=job, docker_digests=docker_digests + ) if not job.digest_config: print( f"NOTE: job [{job.name}] has no Config.digest_config - skip cache check, always run" ) - digest = cache.digest.calc_job_digest(job_config=job) job_digest_map[job.name] = digest if job.provides: # assign the job digest also to the artifacts it provides @@ -48,7 +50,6 @@ class CacheRunnerHooks: ), f"BUG, Workflow with enabled cache must have job digests after configuration, wf [{workflow.name}]" print("Check remote cache") - job_to_cache_record = {} for job_name, job_digest in workflow_config.digest_jobs.items(): record = cache.fetch_success(job_name=job_name, job_digest=job_digest) if record: @@ -58,7 +59,7 @@ class CacheRunnerHooks: ) workflow_config.cache_success.append(job_name) workflow_config.cache_success_base64.append(Utils.to_base64(job_name)) - job_to_cache_record[job_name] = record + workflow_config.cache_jobs[job_name] = record print("Check artifacts to reuse") for job in workflow.jobs: @@ -66,7 +67,7 @@ class CacheRunnerHooks: if job.provides: for artifact_name in job.provides: workflow_config.cache_artifacts[artifact_name] = ( - job_to_cache_record[job.name] + workflow_config.cache_jobs[job.name] ) print(f"Write config to GH's job output") diff --git a/ci/praktika/hook_html.py b/ci/praktika/hook_html.py index ca2692d1b22..e2faefb2fa9 100644 --- a/ci/praktika/hook_html.py +++ b/ci/praktika/hook_html.py @@ -6,7 +6,7 @@ from typing import List from praktika._environment import _Environment from praktika.gh import GH from praktika.parser import WorkflowConfigParser -from praktika.result import Result, ResultInfo +from praktika.result import Result, ResultInfo, _ResultS3 from praktika.runtime import RunConfig from praktika.s3 import S3 from praktika.settings import Settings @@ -119,6 +119,7 @@ class HtmlRunnerHooks: # generate pending Results for all jobs in the workflow if _workflow.enable_cache: skip_jobs = RunConfig.from_fs(_workflow.name).cache_success + job_cache_records = RunConfig.from_fs(_workflow.name).cache_jobs else: skip_jobs = [] @@ -128,21 +129,14 @@ class HtmlRunnerHooks: if job.name not in skip_jobs: result = Result.generate_pending(job.name) else: - result = Result.generate_skipped(job.name) + result = Result.generate_skipped(job.name, job_cache_records[job.name]) results.append(result) summary_result = Result.generate_pending(_workflow.name, results=results) summary_result.links.append(env.CHANGE_URL) summary_result.links.append(env.RUN_URL) summary_result.start_time = Utils.timestamp() - # clean the previous latest results in PR if any - if env.PR_NUMBER: - S3.clean_latest_result() - S3.copy_result_to_s3( - summary_result, - unlock=False, - ) - + assert _ResultS3.copy_result_to_s3_with_version(summary_result, version=0) page_url = env.get_report_url(settings=Settings) print(f"CI Status page url [{page_url}]") @@ -150,7 +144,7 @@ class HtmlRunnerHooks: name=_workflow.name, status=Result.Status.PENDING, description="", - url=env.get_report_url(settings=Settings), + url=env.get_report_url(settings=Settings, latest=True), ) res2 = GH.post_pr_comment( comment_body=f"Workflow [[{_workflow.name}]({page_url})], commit [{_Environment.get().SHA[:8]}]", @@ -167,14 +161,8 @@ class HtmlRunnerHooks: @classmethod def pre_run(cls, _workflow, _job): result = Result.from_fs(_job.name) - S3.copy_result_from_s3( - Result.file_name_static(_workflow.name), - ) - workflow_result = Result.from_fs(_workflow.name) - workflow_result.update_sub_result(result) - S3.copy_result_to_s3( - workflow_result, - unlock=True, + _ResultS3.update_workflow_results( + workflow_name=_workflow.name, new_sub_results=result ) @classmethod @@ -184,14 +172,13 @@ class HtmlRunnerHooks: @classmethod def post_run(cls, _workflow, _job, info_errors): result = Result.from_fs(_job.name) - env = _Environment.get() - S3.copy_result_from_s3( - Result.file_name_static(_workflow.name), - lock=True, - ) - workflow_result = Result.from_fs(_workflow.name) - print(f"Workflow info [{workflow_result.info}], info_errors [{info_errors}]") + _ResultS3.upload_result_files_to_s3(result) + _ResultS3.copy_result_to_s3(result) + env = _Environment.get() + + new_sub_results = [result] + new_result_info = "" env_info = env.REPORT_INFO if env_info: print( @@ -203,14 +190,8 @@ class HtmlRunnerHooks: info_str = f"{_job.name}:\n" info_str += "\n".join(info_errors) print("Update workflow results with new info") - workflow_result.set_info(info_str) + new_result_info = info_str - old_status = workflow_result.status - - S3.upload_result_files_to_s3(result) - workflow_result.update_sub_result(result) - - skipped_job_results = [] if not result.is_ok(): print( "Current job failed - find dependee jobs in the workflow and set their statuses to skipped" @@ -223,7 +204,7 @@ class HtmlRunnerHooks: print( f"NOTE: Set job [{dependee_job.name}] status to [{Result.Status.SKIPPED}] due to current failure" ) - skipped_job_results.append( + new_sub_results.append( Result( name=dependee_job.name, status=Result.Status.SKIPPED, @@ -231,20 +212,18 @@ class HtmlRunnerHooks: + f" [{_job.name}]", ) ) - for skipped_job_result in skipped_job_results: - workflow_result.update_sub_result(skipped_job_result) - S3.copy_result_to_s3( - workflow_result, - unlock=True, + updated_status = _ResultS3.update_workflow_results( + new_info=new_result_info, + new_sub_results=new_sub_results, + workflow_name=_workflow.name, ) - if workflow_result.status != old_status: - print( - f"Update GH commit status [{result.name}]: [{old_status} -> {workflow_result.status}]" - ) + + if updated_status: + print(f"Update GH commit status [{result.name}]: [{updated_status}]") GH.post_commit_status( - name=workflow_result.name, - status=GH.convert_to_gh_status(workflow_result.status), + name=_workflow.name, + status=GH.convert_to_gh_status(updated_status), description="", - url=env.get_report_url(settings=Settings), + url=env.get_report_url(settings=Settings, latest=True), ) diff --git a/ci/praktika/json.html b/ci/praktika/json.html index 4e15a67ba76..544fd6e68d4 100644 --- a/ci/praktika/json.html +++ b/ci/praktika/json.html @@ -342,7 +342,7 @@ const milliseconds = Math.floor((duration % 1) * 1000); const formattedSeconds = String(seconds); - const formattedMilliseconds = String(milliseconds).padStart(3, '0'); + const formattedMilliseconds = String(milliseconds).padStart(2, '0').slice(-2); return `${formattedSeconds}.${formattedMilliseconds}`; } @@ -600,8 +600,7 @@ td.classList.add('time-column'); td.textContent = value ? formatDuration(value) : ''; } else if (column === 'info') { - // For info and other columns, just display the value - td.textContent = value || ''; + td.textContent = value.includes('\n') ? '↵' : (value || ''); td.classList.add('info-column'); } @@ -675,7 +674,8 @@ } if (targetData) { - infoElement.style.display = 'none'; + //infoElement.style.display = 'none'; + infoElement.innerHTML = (targetData.info || '').replace(/\n/g, '
'); addStatusToStatus(targetData.status, targetData.start_time, targetData.duration) @@ -804,7 +804,8 @@ // Check if all required parameters are present to load JSON if (PR && sha && root_name) { - loadResultsJSON(PR, sha, nameParams); + const shaToLoad = (sha === 'latest') ? commitsArray[commitsArray.length - 1] : sha; + loadResultsJSON(PR, shaToLoad, nameParams); } else { document.getElementById('title').textContent = 'Error: Missing required URL parameters: PR, sha, or name_0'; } diff --git a/ci/praktika/mangle.py b/ci/praktika/mangle.py index b16d52fbbbf..f94b11adad5 100644 --- a/ci/praktika/mangle.py +++ b/ci/praktika/mangle.py @@ -1,11 +1,10 @@ import copy import importlib.util from pathlib import Path -from typing import Any, Dict from praktika import Job -from praktika._settings import _USER_DEFINED_SETTINGS, _Settings -from praktika.utils import ContextManager, Utils +from praktika.settings import Settings +from praktika.utils import Utils def _get_workflows(name=None, file=None): @@ -14,13 +13,13 @@ def _get_workflows(name=None, file=None): """ res = [] - directory = Path(_Settings.WORKFLOWS_DIRECTORY) + directory = Path(Settings.WORKFLOWS_DIRECTORY) for py_file in directory.glob("*.py"): if file and file not in str(py_file): continue module_name = py_file.name.removeprefix(".py") spec = importlib.util.spec_from_file_location( - module_name, f"{_Settings.WORKFLOWS_DIRECTORY}/{module_name}" + module_name, f"{Settings.WORKFLOWS_DIRECTORY}/{module_name}" ) assert spec foo = importlib.util.module_from_spec(spec) @@ -106,30 +105,3 @@ def _update_workflow_with_native_jobs(workflow): for job in workflow.jobs: aux_job.requires.append(job.name) workflow.jobs.append(aux_job) - - -def _get_user_settings() -> Dict[str, Any]: - """ - Gets user's settings - """ - res = {} # type: Dict[str, Any] - - directory = Path(_Settings.SETTINGS_DIRECTORY) - for py_file in directory.glob("*.py"): - module_name = py_file.name.removeprefix(".py") - spec = importlib.util.spec_from_file_location( - module_name, f"{_Settings.SETTINGS_DIRECTORY}/{module_name}" - ) - assert spec - foo = importlib.util.module_from_spec(spec) - assert spec.loader - spec.loader.exec_module(foo) - for setting in _USER_DEFINED_SETTINGS: - try: - value = getattr(foo, setting) - res[setting] = value - print(f"Apply user defined setting [{setting} = {value}]") - except Exception as e: - pass - - return res diff --git a/ci/praktika/native_jobs.py b/ci/praktika/native_jobs.py index 58af211988b..52bf6c6e204 100644 --- a/ci/praktika/native_jobs.py +++ b/ci/praktika/native_jobs.py @@ -10,9 +10,8 @@ from praktika.gh import GH from praktika.hook_cache import CacheRunnerHooks from praktika.hook_html import HtmlRunnerHooks from praktika.mangle import _get_workflows -from praktika.result import Result, ResultInfo +from praktika.result import Result, ResultInfo, _ResultS3 from praktika.runtime import RunConfig -from praktika.s3 import S3 from praktika.settings import Settings from praktika.utils import Shell, Utils @@ -225,6 +224,7 @@ def _config_workflow(workflow: Workflow.Config, job_name): cache_success=[], cache_success_base64=[], cache_artifacts={}, + cache_jobs={}, ).dump() # checks: @@ -310,9 +310,8 @@ def _finish_workflow(workflow, job_name): print(env.get_needs_statuses()) print("Check Workflow results") - S3.copy_result_from_s3( + _ResultS3.copy_result_from_s3( Result.file_name_static(workflow.name), - lock=False, ) workflow_result = Result.from_fs(workflow.name) @@ -345,7 +344,7 @@ def _finish_workflow(workflow, job_name): failed_results.append(result.name) if failed_results: - ready_for_merge_description = f"failed: {', '.join(failed_results)}" + ready_for_merge_description = f"Failed: {', '.join(failed_results)}" if not GH.post_commit_status( name=Settings.READY_FOR_MERGE_STATUS_NAME + f" [{workflow.name}]", @@ -357,10 +356,9 @@ def _finish_workflow(workflow, job_name): env.add_info(ResultInfo.GH_STATUS_ERROR) if update_final_report: - S3.copy_result_to_s3( + _ResultS3.copy_result_to_s3( workflow_result, - unlock=False, - ) # no lock - no unlock + ) Result.from_fs(job_name).set_status(Result.Status.SUCCESS) diff --git a/ci/praktika/result.py b/ci/praktika/result.py index 842deacbcbd..8164b1d1295 100644 --- a/ci/praktika/result.py +++ b/ci/praktika/result.py @@ -2,10 +2,12 @@ import dataclasses import datetime import sys from pathlib import Path -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union from praktika._environment import _Environment -from praktika._settings import _Settings +from praktika.cache import Cache +from praktika.s3 import S3 +from praktika.settings import Settings from praktika.utils import ContextManager, MetaClasses, Shell, Utils @@ -55,7 +57,7 @@ class Result(MetaClasses.Serializable): stopwatch: Utils.Stopwatch = None, status="", files=None, - info="", + info: Union[List[str], str] = "", with_info_from_results=True, ): if isinstance(status, bool): @@ -149,7 +151,7 @@ class Result(MetaClasses.Serializable): @classmethod def file_name_static(cls, name): - return f"{_Settings.TEMP_DIR}/result_{Utils.normalize_string(name)}.json" + return f"{Settings.TEMP_DIR}/result_{Utils.normalize_string(name)}.json" @classmethod def from_dict(cls, obj: Dict[str, Any]) -> "Result": @@ -232,7 +234,7 @@ class Result(MetaClasses.Serializable): ) @classmethod - def generate_skipped(cls, name, results=None): + def generate_skipped(cls, name, cache_record: Cache.CacheRecord, results=None): return Result( name=name, status=Result.Status.SKIPPED, @@ -241,7 +243,7 @@ class Result(MetaClasses.Serializable): results=results or [], files=[], links=[], - info="from cache", + info=f"from cache: sha [{cache_record.sha}], pr/branch [{cache_record.pr_number or cache_record.branch}]", ) @classmethod @@ -275,7 +277,7 @@ class Result(MetaClasses.Serializable): # Set log file path if logging is enabled log_file = ( - f"{_Settings.TEMP_DIR}/{Utils.normalize_string(name)}.log" + f"{Settings.TEMP_DIR}/{Utils.normalize_string(name)}.log" if with_log else None ) @@ -321,14 +323,31 @@ class Result(MetaClasses.Serializable): self.dump() if not self.is_ok(): print("ERROR: Job Failed") - for result in self.results: - if not result.is_ok(): - print("Failed checks:") - print(" | ", result) + print(self.to_stdout_formatted()) sys.exit(1) else: print("ok") + def to_stdout_formatted(self, indent="", res=""): + if self.is_ok(): + return res + + res += f"{indent}Task [{self.name}] failed.\n" + fail_info = "" + sub_indent = indent + " " + + if not self.results: + if not self.is_ok(): + fail_info += f"{sub_indent}{self.name}:\n" + for line in self.info.splitlines(): + fail_info += f"{sub_indent}{sub_indent}{line}\n" + return res + fail_info + + for sub_result in self.results: + res = sub_result.to_stdout_formatted(sub_indent, res) + + return res + class ResultInfo: SETUP_ENV_JOB_FAILED = ( @@ -351,3 +370,202 @@ class ResultInfo: ) S3_ERROR = "S3 call failure" + + +class _ResultS3: + + @classmethod + def copy_result_to_s3(cls, result, unlock=False): + result.dump() + env = _Environment.get() + s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}" + s3_path_full = f"{s3_path}/{Path(result.file_name()).name}" + url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name()) + # if unlock: + # if not cls.unlock(s3_path_full): + # print(f"ERROR: File [{s3_path_full}] unlock failure") + # assert False # TODO: investigate + return url + + @classmethod + def copy_result_from_s3(cls, local_path, lock=False): + env = _Environment.get() + file_name = Path(local_path).name + s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name}" + # if lock: + # cls.lock(s3_path) + if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path): + print(f"ERROR: failed to cp file [{s3_path}] from s3") + raise + + @classmethod + def copy_result_from_s3_with_version(cls, local_path): + env = _Environment.get() + file_name = Path(local_path).name + local_dir = Path(local_path).parent + file_name_pattern = f"{file_name}_*" + for file_path in local_dir.glob(file_name_pattern): + file_path.unlink() + s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/" + if not S3.copy_file_from_s3_matching_pattern( + s3_path=s3_path, local_path=local_dir, include=file_name_pattern + ): + print(f"ERROR: failed to cp file [{s3_path}] from s3") + raise + result_files = [] + for file_path in local_dir.glob(file_name_pattern): + result_files.append(file_path) + assert result_files, "No result files found" + result_files.sort() + version = int(result_files[-1].name.split("_")[-1]) + Shell.check(f"cp {result_files[-1]} {local_path}", strict=True, verbose=True) + return version + + @classmethod + def copy_result_to_s3_with_version(cls, result, version): + result.dump() + filename = Path(result.file_name()).name + file_name_versioned = f"{filename}_{str(version).zfill(3)}" + env = _Environment.get() + s3_path_versioned = ( + f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name_versioned}" + ) + s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/" + if version == 0: + S3.clean_s3_directory(s3_path=s3_path) + if not S3.put( + s3_path=s3_path_versioned, + local_path=result.file_name(), + if_none_matched=True, + ): + print("Failed to put versioned Result") + return False + if not S3.put(s3_path=s3_path, local_path=result.file_name()): + print("Failed to put non-versioned Result") + return True + + # @classmethod + # def lock(cls, s3_path, level=0): + # env = _Environment.get() + # s3_path_lock = s3_path + f".lock" + # file_path_lock = f"{Settings.TEMP_DIR}/{Path(s3_path_lock).name}" + # assert Shell.check( + # f"echo '''{env.JOB_NAME}''' > {file_path_lock}", verbose=True + # ), "Never" + # + # i = 20 + # meta = S3.head_object(s3_path_lock) + # while meta: + # locked_by_job = meta.get("Metadata", {"job": ""}).get("job", "") + # if locked_by_job: + # decoded_bytes = base64.b64decode(locked_by_job) + # locked_by_job = decoded_bytes.decode("utf-8") + # print( + # f"WARNING: Failed to acquire lock, meta [{meta}], job [{locked_by_job}] - wait" + # ) + # i -= 5 + # if i < 0: + # info = f"ERROR: lock acquire failure - unlock forcefully" + # print(info) + # env.add_info(info) + # break + # time.sleep(5) + # + # metadata = {"job": Utils.to_base64(env.JOB_NAME)} + # S3.put( + # s3_path=s3_path_lock, + # local_path=file_path_lock, + # metadata=metadata, + # if_none_matched=True, + # ) + # time.sleep(1) + # obj = S3.head_object(s3_path_lock) + # if not obj or not obj.has_tags(tags=metadata): + # print(f"WARNING: locked by another job [{obj}]") + # env.add_info("S3 lock file failure") + # cls.lock(s3_path, level=level + 1) + # print("INFO: lock acquired") + # + # @classmethod + # def unlock(cls, s3_path): + # s3_path_lock = s3_path + ".lock" + # env = _Environment.get() + # obj = S3.head_object(s3_path_lock) + # if not obj: + # print("ERROR: lock file is removed") + # assert False # investigate + # elif not obj.has_tags({"job": Utils.to_base64(env.JOB_NAME)}): + # print("ERROR: lock file was acquired by another job") + # assert False # investigate + # + # if not S3.delete(s3_path_lock): + # print(f"ERROR: File [{s3_path_lock}] delete failure") + # print("INFO: lock released") + # return True + + @classmethod + def upload_result_files_to_s3(cls, result): + if result.results: + for result_ in result.results: + cls.upload_result_files_to_s3(result_) + for file in result.files: + if not Path(file).is_file(): + print(f"ERROR: Invalid file [{file}] in [{result.name}] - skip upload") + result.info += f"\nWARNING: Result file [{file}] was not found" + file_link = S3._upload_file_to_s3(file, upload_to_s3=False) + else: + is_text = False + for text_file_suffix in Settings.TEXT_CONTENT_EXTENSIONS: + if file.endswith(text_file_suffix): + print( + f"File [{file}] matches Settings.TEXT_CONTENT_EXTENSIONS [{Settings.TEXT_CONTENT_EXTENSIONS}] - add text attribute for s3 object" + ) + is_text = True + break + file_link = S3._upload_file_to_s3( + file, + upload_to_s3=True, + text=is_text, + s3_subprefix=Utils.normalize_string(result.name), + ) + result.links.append(file_link) + if result.files: + print( + f"Result files [{result.files}] uploaded to s3 [{result.links[-len(result.files):]}] - clean files list" + ) + result.files = [] + result.dump() + + @classmethod + def update_workflow_results(cls, workflow_name, new_info="", new_sub_results=None): + assert new_info or new_sub_results + + attempt = 1 + prev_status = "" + new_status = "" + done = False + while attempt < 10: + version = cls.copy_result_from_s3_with_version( + Result.file_name_static(workflow_name) + ) + workflow_result = Result.from_fs(workflow_name) + prev_status = workflow_result.status + if new_info: + workflow_result.set_info(new_info) + if new_sub_results: + if isinstance(new_sub_results, Result): + new_sub_results = [new_sub_results] + for result_ in new_sub_results: + workflow_result.update_sub_result(result_) + new_status = workflow_result.status + if cls.copy_result_to_s3_with_version(workflow_result, version=version + 1): + done = True + break + print(f"Attempt [{attempt}] to upload workflow result failed") + attempt += 1 + assert done + + if prev_status != new_status: + return new_status + else: + return None diff --git a/ci/praktika/runner.py b/ci/praktika/runner.py index 1ac8748d1c0..38112dd5684 100644 --- a/ci/praktika/runner.py +++ b/ci/praktika/runner.py @@ -52,6 +52,7 @@ class Runner: cache_success=[], cache_success_base64=[], cache_artifacts={}, + cache_jobs={}, ) for docker in workflow.dockers: workflow_config.digest_dockers[docker.name] = Digest().calc_docker_digest( @@ -123,7 +124,7 @@ class Runner: return 0 - def _run(self, workflow, job, docker="", no_docker=False, param=None): + def _run(self, workflow, job, docker="", no_docker=False, param=None, test=""): # re-set envs for local run env = _Environment.get() env.JOB_NAME = job.name @@ -162,6 +163,9 @@ class Runner: if param: print(f"Custom --param [{param}] will be passed to job's script") cmd += f" --param {param}" + if test: + print(f"Custom --test [{test}] will be passed to job's script") + cmd += f" --test {test}" print(f"--- Run command [{cmd}]") with TeePopen(cmd, timeout=job.timeout) as process: @@ -240,10 +244,6 @@ class Runner: result.set_files(files=[Settings.RUN_LOG]) result.update_duration().dump() - if result.info and result.status != Result.Status.SUCCESS: - # provide job info to workflow level - info_errors.append(result.info) - if run_exit_code == 0: providing_artifacts = [] if job.provides and workflow.artifacts: @@ -310,6 +310,7 @@ class Runner: local_run=False, no_docker=False, param=None, + test="", pr=None, sha=None, branch=None, @@ -358,7 +359,12 @@ class Runner: print(f"=== Run script [{job.name}], workflow [{workflow.name}] ===") try: run_code = self._run( - workflow, job, docker=docker, no_docker=no_docker, param=param + workflow, + job, + docker=docker, + no_docker=no_docker, + param=param, + test=test, ) res = run_code == 0 if not res: diff --git a/ci/praktika/runtime.py b/ci/praktika/runtime.py index a87b67c2c79..07c24e0498c 100644 --- a/ci/praktika/runtime.py +++ b/ci/praktika/runtime.py @@ -15,17 +15,23 @@ class RunConfig(MetaClasses.Serializable): # there are might be issue with special characters in job names if used directly in yaml syntax - create base64 encoded list to avoid this cache_success_base64: List[str] cache_artifacts: Dict[str, Cache.CacheRecord] + cache_jobs: Dict[str, Cache.CacheRecord] sha: str @classmethod def from_dict(cls, obj): cache_artifacts = obj["cache_artifacts"] + cache_jobs = obj["cache_jobs"] cache_artifacts_deserialized = {} + cache_jobs_deserialized = {} for artifact_name, cache_artifact in cache_artifacts.items(): cache_artifacts_deserialized[artifact_name] = Cache.CacheRecord.from_dict( cache_artifact ) obj["cache_artifacts"] = cache_artifacts_deserialized + for job_name, cache_jobs in cache_jobs.items(): + cache_jobs_deserialized[job_name] = Cache.CacheRecord.from_dict(cache_jobs) + obj["cache_jobs"] = cache_artifacts_deserialized return RunConfig(**obj) @classmethod diff --git a/ci/praktika/s3.py b/ci/praktika/s3.py index 04a08622dcd..82034b57b80 100644 --- a/ci/praktika/s3.py +++ b/ci/praktika/s3.py @@ -1,12 +1,11 @@ import dataclasses import json -import time from pathlib import Path from typing import Dict from praktika._environment import _Environment from praktika.settings import Settings -from praktika.utils import Shell, Utils +from praktika.utils import Shell class S3: @@ -59,16 +58,15 @@ class S3: return f"https://{s3_full_path}".replace(bucket, endpoint) @classmethod - def put(cls, s3_path, local_path, text=False, metadata=None): + def put(cls, s3_path, local_path, text=False, metadata=None, if_none_matched=False): assert Path(local_path).exists(), f"Path [{local_path}] does not exist" assert Path(s3_path), f"Invalid S3 Path [{s3_path}]" assert Path( local_path ).is_file(), f"Path [{local_path}] is not file. Only files are supported" - file_name = Path(local_path).name s3_full_path = s3_path - if not s3_full_path.endswith(file_name): - s3_full_path = f"{s3_path}/{Path(local_path).name}" + if s3_full_path.endswith("/"): + s3_full_path = f"{s3_path}{Path(local_path).name}" s3_full_path = str(s3_full_path).removeprefix("s3://") bucket, key = s3_full_path.split("/", maxsplit=1) @@ -76,6 +74,8 @@ class S3: command = ( f"aws s3api put-object --bucket {bucket} --key {key} --body {local_path}" ) + if if_none_matched: + command += f' --if-none-match "*"' if metadata: for k, v in metadata.items(): command += f" --metadata {k}={v}" @@ -84,7 +84,7 @@ class S3: if text: cmd += " --content-type text/plain" res = cls.run_command_with_retries(command) - assert res + return res @classmethod def run_command_with_retries(cls, command, retries=Settings.MAX_RETRIES_S3): @@ -101,6 +101,14 @@ class S3: elif "does not exist" in stderr: print("ERROR: requested file does not exist") break + elif "Unknown options" in stderr: + print("ERROR: Invalid AWS CLI command or CLI client version:") + print(f" | awc error: {stderr}") + break + elif "PreconditionFailed" in stderr: + print("ERROR: AWS API Call Precondition Failed") + print(f" | awc error: {stderr}") + break if ret_code != 0: print( f"ERROR: aws s3 cp failed, stdout/stderr err: [{stderr}], out [{stdout}]" @@ -108,13 +116,6 @@ class S3: res = ret_code == 0 return res - @classmethod - def get_link(cls, s3_path, local_path): - s3_full_path = f"{s3_path}/{Path(local_path).name}" - bucket = s3_path.split("/")[0] - endpoint = Settings.S3_BUCKET_TO_HTTP_ENDPOINT[bucket] - return f"https://{s3_full_path}".replace(bucket, endpoint) - @classmethod def copy_file_from_s3(cls, s3_path, local_path): assert Path(s3_path), f"Invalid S3 Path [{s3_path}]" @@ -128,6 +129,19 @@ class S3: res = cls.run_command_with_retries(cmd) return res + @classmethod + def copy_file_from_s3_matching_pattern( + cls, s3_path, local_path, include, exclude="*" + ): + assert Path(s3_path), f"Invalid S3 Path [{s3_path}]" + assert Path( + local_path + ).is_dir(), f"Path [{local_path}] does not exist or not a directory" + assert s3_path.endswith("/"), f"s3 path is invalid [{s3_path}]" + cmd = f'aws s3 cp s3://{s3_path} {local_path} --exclude "{exclude}" --include "{include}" --recursive' + res = cls.run_command_with_retries(cmd) + return res + @classmethod def head_object(cls, s3_path): s3_path = str(s3_path).removeprefix("s3://") @@ -148,103 +162,6 @@ class S3: verbose=True, ) - # TODO: apparently should be placed into separate file to be used only inside praktika - # keeping this module clean from importing Settings, Environment and etc, making it easy for use externally - @classmethod - def copy_result_to_s3(cls, result, unlock=True): - result.dump() - env = _Environment.get() - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}" - s3_path_full = f"{s3_path}/{Path(result.file_name()).name}" - url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name()) - if env.PR_NUMBER: - print("Duplicate Result for latest commit alias in PR") - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix(latest=True)}" - url = S3.copy_file_to_s3(s3_path=s3_path, local_path=result.file_name()) - if unlock: - if not cls.unlock(s3_path_full): - print(f"ERROR: File [{s3_path_full}] unlock failure") - assert False # TODO: investigate - return url - - @classmethod - def copy_result_from_s3(cls, local_path, lock=True): - env = _Environment.get() - file_name = Path(local_path).name - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}/{file_name}" - if lock: - cls.lock(s3_path) - if not S3.copy_file_from_s3(s3_path=s3_path, local_path=local_path): - print(f"ERROR: failed to cp file [{s3_path}] from s3") - raise - - @classmethod - def lock(cls, s3_path, level=0): - assert level < 3, "Never" - env = _Environment.get() - s3_path_lock = s3_path + f".lock" - file_path_lock = f"{Settings.TEMP_DIR}/{Path(s3_path_lock).name}" - assert Shell.check( - f"echo '''{env.JOB_NAME}''' > {file_path_lock}", verbose=True - ), "Never" - - i = 20 - meta = S3.head_object(s3_path_lock) - while meta: - print(f"WARNING: Failed to acquire lock, meta [{meta}] - wait") - i -= 5 - if i < 0: - info = f"ERROR: lock acquire failure - unlock forcefully" - print(info) - env.add_info(info) - break - time.sleep(5) - - metadata = {"job": Utils.to_base64(env.JOB_NAME)} - S3.put( - s3_path=s3_path_lock, - local_path=file_path_lock, - metadata=metadata, - ) - time.sleep(1) - obj = S3.head_object(s3_path_lock) - if not obj or not obj.has_tags(tags=metadata): - print(f"WARNING: locked by another job [{obj}]") - env.add_info("S3 lock file failure") - cls.lock(s3_path, level=level + 1) - print("INFO: lock acquired") - - @classmethod - def unlock(cls, s3_path): - s3_path_lock = s3_path + ".lock" - env = _Environment.get() - obj = S3.head_object(s3_path_lock) - if not obj: - print("ERROR: lock file is removed") - assert False # investigate - elif not obj.has_tags({"job": Utils.to_base64(env.JOB_NAME)}): - print("ERROR: lock file was acquired by another job") - assert False # investigate - - if not S3.delete(s3_path_lock): - print(f"ERROR: File [{s3_path_lock}] delete failure") - print("INFO: lock released") - return True - - @classmethod - def get_result_link(cls, result): - env = _Environment.get() - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix(latest=True if env.PR_NUMBER else False)}" - return S3.get_link(s3_path=s3_path, local_path=result.file_name()) - - @classmethod - def clean_latest_result(cls): - env = _Environment.get() - env.SHA = "latest" - assert env.PR_NUMBER - s3_path = f"{Settings.HTML_S3_PATH}/{env.get_s3_prefix()}" - S3.clean_s3_directory(s3_path=s3_path) - @classmethod def _upload_file_to_s3( cls, local_file_path, upload_to_s3: bool, text: bool = False, s3_subprefix="" @@ -260,36 +177,3 @@ class S3: ) return html_link return f"file://{Path(local_file_path).absolute()}" - - @classmethod - def upload_result_files_to_s3(cls, result): - if result.results: - for result_ in result.results: - cls.upload_result_files_to_s3(result_) - for file in result.files: - if not Path(file).is_file(): - print(f"ERROR: Invalid file [{file}] in [{result.name}] - skip upload") - result.info += f"\nWARNING: Result file [{file}] was not found" - file_link = cls._upload_file_to_s3(file, upload_to_s3=False) - else: - is_text = False - for text_file_suffix in Settings.TEXT_CONTENT_EXTENSIONS: - if file.endswith(text_file_suffix): - print( - f"File [{file}] matches Settings.TEXT_CONTENT_EXTENSIONS [{Settings.TEXT_CONTENT_EXTENSIONS}] - add text attribute for s3 object" - ) - is_text = True - break - file_link = cls._upload_file_to_s3( - file, - upload_to_s3=True, - text=is_text, - s3_subprefix=Utils.normalize_string(result.name), - ) - result.links.append(file_link) - if result.files: - print( - f"Result files [{result.files}] uploaded to s3 [{result.links[-len(result.files):]}] - clean files list" - ) - result.files = [] - result.dump() diff --git a/ci/praktika/settings.py b/ci/praktika/settings.py index 1a4068d9398..b281a95370c 100644 --- a/ci/praktika/settings.py +++ b/ci/praktika/settings.py @@ -1,8 +1,152 @@ -from praktika._settings import _Settings -from praktika.mangle import _get_user_settings +import dataclasses +import importlib.util +from pathlib import Path +from typing import Dict, Iterable, List, Optional -Settings = _Settings() -user_settings = _get_user_settings() -for setting, value in user_settings.items(): - Settings.__setattr__(setting, value) +@dataclasses.dataclass +class _Settings: + ###################################### + # Pipeline generation settings # + ###################################### + MAIN_BRANCH = "main" + CI_PATH = "./ci" + WORKFLOW_PATH_PREFIX: str = "./.github/workflows" + WORKFLOWS_DIRECTORY: str = f"{CI_PATH}/workflows" + SETTINGS_DIRECTORY: str = f"{CI_PATH}/settings" + CI_CONFIG_JOB_NAME = "Config Workflow" + DOCKER_BUILD_JOB_NAME = "Docker Builds" + FINISH_WORKFLOW_JOB_NAME = "Finish Workflow" + READY_FOR_MERGE_STATUS_NAME = "Ready for Merge" + CI_CONFIG_RUNS_ON: Optional[List[str]] = None + DOCKER_BUILD_RUNS_ON: Optional[List[str]] = None + VALIDATE_FILE_PATHS: bool = True + + ###################################### + # Runtime Settings # + ###################################### + MAX_RETRIES_S3 = 3 + MAX_RETRIES_GH = 3 + + ###################################### + # S3 (artifact storage) settings # + ###################################### + S3_ARTIFACT_PATH: str = "" + + ###################################### + # CI workspace settings # + ###################################### + TEMP_DIR: str = "/tmp/praktika" + OUTPUT_DIR: str = f"{TEMP_DIR}/output" + INPUT_DIR: str = f"{TEMP_DIR}/input" + PYTHON_INTERPRETER: str = "python3" + PYTHON_PACKET_MANAGER: str = "pip3" + PYTHON_VERSION: str = "3.9" + INSTALL_PYTHON_FOR_NATIVE_JOBS: bool = False + INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS: str = "./ci/requirements.txt" + ENVIRONMENT_VAR_FILE: str = f"{TEMP_DIR}/environment.json" + RUN_LOG: str = f"{TEMP_DIR}/praktika_run.log" + + SECRET_GH_APP_ID: str = "GH_APP_ID" + SECRET_GH_APP_PEM_KEY: str = "GH_APP_PEM_KEY" + + ENV_SETUP_SCRIPT: str = "/tmp/praktika_setup_env.sh" + WORKFLOW_STATUS_FILE: str = f"{TEMP_DIR}/workflow_status.json" + + ###################################### + # CI Cache settings # + ###################################### + CACHE_VERSION: int = 1 + CACHE_DIGEST_LEN: int = 20 + CACHE_S3_PATH: str = "" + CACHE_LOCAL_PATH: str = f"{TEMP_DIR}/ci_cache" + + ###################################### + # Report settings # + ###################################### + HTML_S3_PATH: str = "" + HTML_PAGE_FILE: str = "./praktika/json.html" + TEXT_CONTENT_EXTENSIONS: Iterable[str] = frozenset([".txt", ".log"]) + S3_BUCKET_TO_HTTP_ENDPOINT: Optional[Dict[str, str]] = None + + DOCKERHUB_USERNAME: str = "" + DOCKERHUB_SECRET: str = "" + DOCKER_WD: str = "/wd" + + ###################################### + # CI DB Settings # + ###################################### + SECRET_CI_DB_URL: str = "CI_DB_URL" + SECRET_CI_DB_PASSWORD: str = "CI_DB_PASSWORD" + CI_DB_DB_NAME = "" + CI_DB_TABLE_NAME = "" + CI_DB_INSERT_TIMEOUT_SEC = 5 + + DISABLE_MERGE_COMMIT = True + + +_USER_DEFINED_SETTINGS = [ + "S3_ARTIFACT_PATH", + "CACHE_S3_PATH", + "HTML_S3_PATH", + "S3_BUCKET_TO_HTTP_ENDPOINT", + "TEXT_CONTENT_EXTENSIONS", + "TEMP_DIR", + "OUTPUT_DIR", + "INPUT_DIR", + "CI_CONFIG_RUNS_ON", + "DOCKER_BUILD_RUNS_ON", + "CI_CONFIG_JOB_NAME", + "PYTHON_INTERPRETER", + "PYTHON_VERSION", + "PYTHON_PACKET_MANAGER", + "INSTALL_PYTHON_FOR_NATIVE_JOBS", + "INSTALL_PYTHON_REQS_FOR_NATIVE_JOBS", + "MAX_RETRIES_S3", + "MAX_RETRIES_GH", + "VALIDATE_FILE_PATHS", + "DOCKERHUB_USERNAME", + "DOCKERHUB_SECRET", + "READY_FOR_MERGE_STATUS_NAME", + "SECRET_CI_DB_URL", + "SECRET_CI_DB_PASSWORD", + "CI_DB_DB_NAME", + "CI_DB_TABLE_NAME", + "CI_DB_INSERT_TIMEOUT_SEC", + "SECRET_GH_APP_PEM_KEY", + "SECRET_GH_APP_ID", + "MAIN_BRANCH", + "DISABLE_MERGE_COMMIT", +] + + +def _get_settings() -> _Settings: + res = _Settings() + + directory = Path(_Settings.SETTINGS_DIRECTORY) + for py_file in directory.glob("*.py"): + module_name = py_file.name.removeprefix(".py") + spec = importlib.util.spec_from_file_location( + module_name, f"{_Settings.SETTINGS_DIRECTORY}/{module_name}" + ) + assert spec + foo = importlib.util.module_from_spec(spec) + assert spec.loader + spec.loader.exec_module(foo) + for setting in _USER_DEFINED_SETTINGS: + try: + value = getattr(foo, setting) + res.__setattr__(setting, value) + # print(f"- read user defined setting [{setting} = {value}]") + except Exception as e: + # print(f"Exception while read user settings: {e}") + pass + + return res + + +class GHRunners: + ubuntu = "ubuntu-latest" + + +Settings = _get_settings() diff --git a/ci/praktika/utils.py b/ci/praktika/utils.py index 62eb13b3e19..2bcc94f2559 100644 --- a/ci/praktika/utils.py +++ b/ci/praktika/utils.py @@ -17,8 +17,6 @@ from threading import Thread from types import SimpleNamespace from typing import Any, Dict, Iterator, List, Optional, Type, TypeVar, Union -from praktika._settings import _Settings - T = TypeVar("T", bound="Serializable") diff --git a/ci/praktika/validator.py b/ci/praktika/validator.py index d612881b819..0bb722903e5 100644 --- a/ci/praktika/validator.py +++ b/ci/praktika/validator.py @@ -4,10 +4,8 @@ from itertools import chain from pathlib import Path from praktika import Workflow -from praktika._settings import GHRunners from praktika.mangle import _get_workflows -from praktika.settings import Settings -from praktika.utils import ContextManager +from praktika.settings import GHRunners, Settings class Validator: @@ -168,9 +166,7 @@ class Validator: "\n echo requests==2.32.3 >> ./ci/requirements.txt" ) message += "\n echo https://clickhouse-builds.s3.amazonaws.com/packages/praktika-0.1-py3-none-any.whl >> ./ci/requirements.txt" - cls.evaluate_check( - path.is_file(), message, job.name, workflow.name - ) + cls.evaluate_check(path.is_file(), message, job.name, workflow.name) @classmethod def validate_dockers(cls, workflow: Workflow.Config): diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index 0d505ae27c4..707babb1250 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -68,6 +68,7 @@ stateless_tests_jobs = Job.Config( name=JobNames.STATELESS, runs_on=[RunnerLabels.BUILDER], command="python3 ./ci/jobs/functional_stateless_tests.py --test-options {PARAMETER}", + # many tests expect to see "/var/lib/clickhouse" in various output lines - add mount for now, consider creating this dir in docker file run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined+--volume=/tmp/praktika:/var/lib/clickhouse", digest_config=Job.CacheDigestConfig( include_paths=[ From 73fa0f93a51137367a6b883fe31cdd28d466f618 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Mon, 11 Nov 2024 21:55:12 +0100 Subject: [PATCH 49/74] arm build --- .github/workflows/pr.yaml | 166 +++++++++++++++++++++- ci/docker/stateless-test/Dockerfile | 9 +- ci/docker/stateless-test/requirements.txt | 1 + ci/jobs/build_clickhouse.py | 20 +-- ci/jobs/functional_stateless_tests.py | 4 + ci/praktika/gh.py | 2 +- ci/praktika/native_jobs.py | 4 +- ci/settings/definitions.py | 4 +- ci/workflows/pull_request.py | 64 +++++---- tests/config/client_config.xml | 1 + 10 files changed, 231 insertions(+), 44 deletions(-) diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index 51bb9b52d10..a70ff0cfe23 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -182,7 +182,7 @@ jobs: build_amd_debug: runs-on: [builder] - needs: [config_workflow, docker_builds] + needs: [config_workflow, docker_builds, fast_test] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} name: "Build (amd_debug)" outputs: @@ -222,7 +222,7 @@ jobs: build_amd_release: runs-on: [builder] - needs: [config_workflow, docker_builds] + needs: [config_workflow, docker_builds, fast_test] if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} name: "Build (amd_release)" outputs: @@ -260,6 +260,86 @@ jobs: python3 -m praktika run --job '''Build (amd_release)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi + build_arm_release: + runs-on: [builder-aarch64] + needs: [config_workflow, docker_builds, fast_test] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} + name: "Build (arm_release)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Build (arm_release)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Build (arm_release)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + build_arm_asan: + runs-on: [builder-aarch64] + needs: [config_workflow, docker_builds, fast_test] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }} + name: "Build (arm_asan)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Build (arm_asan)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Build (arm_asan)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + stateless_tests_amd_debugparallel: runs-on: [builder] needs: [config_workflow, docker_builds, build_amd_debug] @@ -420,9 +500,89 @@ jobs: python3 -m praktika run --job '''Stateless tests (amd_release,non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log fi + stateless_tests_arm_asanparallel: + runs-on: [builder-aarch64] + needs: [config_workflow, docker_builds, build_arm_asan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbixwYXJhbGxlbCk=') }} + name: "Stateless tests (arm_asan,parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (arm_asan,parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (arm_asan,parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + + stateless_tests_arm_asannon_parallel: + runs-on: [func-tester-aarch64] + needs: [config_workflow, docker_builds, build_arm_asan] + if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbixub24tcGFyYWxsZWwp') }} + name: "Stateless tests (arm_asan,non-parallel)" + outputs: + data: ${{ steps.run.outputs.DATA }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + + - name: Prepare env script + run: | + cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' + export PYTHONPATH=./ci:. + + cat > /tmp/praktika/workflow_config_pr.json << 'EOF' + ${{ needs.config_workflow.outputs.data }} + EOF + cat > /tmp/praktika/workflow_status.json << 'EOF' + ${{ toJson(needs) }} + EOF + ENV_SETUP_SCRIPT_EOF + + rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika + mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output + + - name: Run + id: run + run: | + . /tmp/praktika_setup_env.sh + set -o pipefail + if command -v ts &> /dev/null; then + python3 -m praktika run --job '''Stateless tests (arm_asan,non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log + else + python3 -m praktika run --job '''Stateless tests (arm_asan,non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log + fi + finish_workflow: runs-on: [ci_services] - needs: [config_workflow, docker_builds, style_check, fast_test, build_amd_debug, build_amd_release, stateless_tests_amd_debugparallel, stateless_tests_amd_debugnon_parallel, stateless_tests_amd_releaseparallel, stateless_tests_amd_releasenon_parallel] + needs: [config_workflow, docker_builds, style_check, fast_test, build_amd_debug, build_amd_release, build_arm_release, build_arm_asan, stateless_tests_amd_debugparallel, stateless_tests_amd_debugnon_parallel, stateless_tests_amd_releaseparallel, stateless_tests_amd_releasenon_parallel, stateless_tests_arm_asanparallel, stateless_tests_arm_asannon_parallel] if: ${{ !cancelled() }} name: "Finish Workflow" outputs: diff --git a/ci/docker/stateless-test/Dockerfile b/ci/docker/stateless-test/Dockerfile index 760fceeebbf..cd02fc27f8e 100644 --- a/ci/docker/stateless-test/Dockerfile +++ b/ci/docker/stateless-test/Dockerfile @@ -7,6 +7,12 @@ RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz" + +RUN mkdir /etc/clickhouse-server /etc/clickhouse-keeper /etc/clickhouse-server && chmod 777 /etc/clickhouse-* + +RUN addgroup --gid 1001 clickhouse && adduser --uid 1001 --gid 1001 --disabled-password clickhouse +USER clickhouse + # moreutils - provides ts fo FT # expect, bzip2 - requried by FT # bsdmainutils - provides hexdump for FT @@ -106,6 +112,3 @@ RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoo RUN npm install -g azurite@3.30.0 \ && npm install -g tslib && npm install -g node - -RUN addgroup --gid 1001 clickhouse && adduser --uid 1001 --gid 1001 --disabled-password clickhouse -USER clickhouse \ No newline at end of file diff --git a/ci/docker/stateless-test/requirements.txt b/ci/docker/stateless-test/requirements.txt index d556d23485f..6f64cc08951 100644 --- a/ci/docker/stateless-test/requirements.txt +++ b/ci/docker/stateless-test/requirements.txt @@ -3,3 +3,4 @@ numpy==1.26.4 requests==2.32.3 pandas==1.5.3 scipy==1.12.0 +pyarrow==18.0.0 diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py index 3bdc23d383c..ed9fd491fcf 100644 --- a/ci/jobs/build_clickhouse.py +++ b/ci/jobs/build_clickhouse.py @@ -60,24 +60,24 @@ def main(): CACHE_TYPE = "sccache" + BUILD_TYPE = "RelWithDebInfo" + SANITIZER = "" + AUX_DEFS = " -DENABLE_TESTS=0 " + if "debug" in build_type: print("Build type set: debug") BUILD_TYPE = "Debug" - AUX_DEFS = ( - " -DENABLE_TESTS=1 -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1 " - ) + AUX_DEFS = " -DENABLE_TESTS=1 " elif "release" in build_type: print("Build type set: release") - BUILD_TYPE = "RelWithDebInfo" - AUX_DEFS = " -DENABLE_TESTS=0 " - else: - assert False - - if "asan" in build_type: + AUX_DEFS = ( + " -DENABLE_TESTS=0 -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1 " + ) + elif "asan" in build_type: print("Sanitizer set: address") SANITIZER = "address" else: - SANITIZER = "" + assert False cmake_cmd = CMAKE_CMD.format( BUILD_TYPE=BUILD_TYPE, diff --git a/ci/jobs/functional_stateless_tests.py b/ci/jobs/functional_stateless_tests.py index 390a6336b45..c2d374aa51b 100644 --- a/ci/jobs/functional_stateless_tests.py +++ b/ci/jobs/functional_stateless_tests.py @@ -1,4 +1,5 @@ import argparse +import os import time from pathlib import Path @@ -94,12 +95,15 @@ def main(): if res and JobStages.INSTALL_CLICKHOUSE in stages: commands = [ + f"rm -rf /tmp/praktika/var/log/clickhouse-server/clickhouse-server.*", f"chmod +x {ch_path}/clickhouse", f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-server", f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-client", f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server", f"cp programs/server/config.xml programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --s3-storage", + # clickhouse benchmark segfaults with --config-path, so provide client config by its default location + f"cp {Settings.TEMP_DIR}/etc/clickhouse-client/* /etc/clickhouse-client/", # update_path_ch_config, # f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml", # f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml", diff --git a/ci/praktika/gh.py b/ci/praktika/gh.py index 77c360a0052..b7e49628ac8 100644 --- a/ci/praktika/gh.py +++ b/ci/praktika/gh.py @@ -18,7 +18,7 @@ class GH: ret_code, out, err = Shell.get_res_stdout_stderr(command, verbose=True) res = ret_code == 0 if not res and "Validation Failed" in err: - print("ERROR: GH command validation error") + print(f"ERROR: GH command validation error.") break if not res and "Bad credentials" in err: print("ERROR: GH credentials/auth failure") diff --git a/ci/praktika/native_jobs.py b/ci/praktika/native_jobs.py index 52bf6c6e204..cff6c851d0e 100644 --- a/ci/praktika/native_jobs.py +++ b/ci/praktika/native_jobs.py @@ -344,7 +344,9 @@ def _finish_workflow(workflow, job_name): failed_results.append(result.name) if failed_results: - ready_for_merge_description = f"Failed: {', '.join(failed_results)}" + ready_for_merge_description = ( + f'Failed {len(failed_results)} "Required for Merge" jobs' + ) if not GH.post_commit_status( name=Settings.READY_FOR_MERGE_STATUS_NAME + f" [{workflow.name}]", diff --git a/ci/settings/definitions.py b/ci/settings/definitions.py index 99fec8b5402..9f529798830 100644 --- a/ci/settings/definitions.py +++ b/ci/settings/definitions.py @@ -7,8 +7,10 @@ S3_BUCKET_HTTP_ENDPOINT = "clickhouse-builds.s3.amazonaws.com" class RunnerLabels: CI_SERVICES = "ci_services" CI_SERVICES_EBS = "ci_services_ebs" - BUILDER = "builder" + BUILDER_AMD = "builder" + BUILDER_ARM = "builder-aarch64" FUNC_TESTER_AMD = "func-tester" + FUNC_TESTER_ARM = "func-tester-aarch64" BASE_BRANCH = "master" diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index 707babb1250..94dcc2ab722 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -15,6 +15,8 @@ from ci.settings.definitions import ( class ArtifactNames: CH_AMD_DEBUG = "CH_AMD_DEBUG" CH_AMD_RELEASE = "CH_AMD_RELEASE" + CH_ARM_RELEASE = "CH_ARM_RELEASE" + CH_ARM_ASAN = "CH_ARM_ASAN" style_check_job = Job.Config( @@ -26,7 +28,7 @@ style_check_job = Job.Config( fast_test_job = Job.Config( name=JobNames.FAST_TEST, - runs_on=[RunnerLabels.BUILDER], + runs_on=[RunnerLabels.BUILDER_AMD], command="python3 ./ci/jobs/fast_test.py", run_in_docker="clickhouse/fasttest", digest_config=Job.CacheDigestConfig( @@ -38,9 +40,10 @@ fast_test_job = Job.Config( ), ) -amd_build_jobs = Job.Config( +build_jobs = Job.Config( name=JobNames.BUILD, - runs_on=[RunnerLabels.BUILDER], + runs_on=["...from params..."], + requires=[JobNames.FAST_TEST], command="python3 ./ci/jobs/build_clickhouse.py --build-type {PARAMETER}", run_in_docker="clickhouse/fasttest", timeout=3600 * 2, @@ -60,13 +63,24 @@ amd_build_jobs = Job.Config( ], ), ).parametrize( - parameter=["amd_debug", "amd_release"], - provides=[[ArtifactNames.CH_AMD_DEBUG], [ArtifactNames.CH_AMD_RELEASE]], + parameter=["amd_debug", "amd_release", "arm_release", "arm_asan"], + provides=[ + [ArtifactNames.CH_AMD_DEBUG], + [ArtifactNames.CH_AMD_RELEASE], + [ArtifactNames.CH_ARM_RELEASE], + [ArtifactNames.CH_ARM_ASAN], + ], + runs_on=[ + [RunnerLabels.BUILDER_AMD], + [RunnerLabels.BUILDER_AMD], + [RunnerLabels.BUILDER_ARM], + [RunnerLabels.BUILDER_ARM], + ], ) stateless_tests_jobs = Job.Config( name=JobNames.STATELESS, - runs_on=[RunnerLabels.BUILDER], + runs_on=[RunnerLabels.BUILDER_AMD], command="python3 ./ci/jobs/functional_stateless_tests.py --test-options {PARAMETER}", # many tests expect to see "/var/lib/clickhouse" in various output lines - add mount for now, consider creating this dir in docker file run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined+--volume=/tmp/praktika:/var/lib/clickhouse", @@ -81,37 +95,27 @@ stateless_tests_jobs = Job.Config( "amd_debug,non-parallel", "amd_release,parallel", "amd_release,non-parallel", + "arm_asan,parallel", + "arm_asan,non-parallel", ], runs_on=[ - [RunnerLabels.BUILDER], + [RunnerLabels.BUILDER_AMD], [RunnerLabels.FUNC_TESTER_AMD], - [RunnerLabels.BUILDER], + [RunnerLabels.BUILDER_AMD], [RunnerLabels.FUNC_TESTER_AMD], + [RunnerLabels.BUILDER_ARM], + [RunnerLabels.FUNC_TESTER_ARM], ], requires=[ [ArtifactNames.CH_AMD_DEBUG], [ArtifactNames.CH_AMD_DEBUG], [ArtifactNames.CH_AMD_RELEASE], [ArtifactNames.CH_AMD_RELEASE], + [ArtifactNames.CH_ARM_ASAN], + [ArtifactNames.CH_ARM_ASAN], ], ) -# stateless_tests_amd_release_jobs = Job.Config( -# name=JobNames.STATELESS_AMD_RELEASE, -# runs_on=[RunnerLabels.BUILDER], -# command="python3 ./ci/jobs/functional_stateless_tests.py --test-options {PARAMETER}", -# run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined+--volume=/tmp/praktika:/var/lib/clickhouse", -# digest_config=Job.CacheDigestConfig( -# include_paths=[ -# "./ci/jobs/functional_stateless_tests.py", -# ], -# ), -# requires=[ArtifactNames.CH_AMD_RELEASE], -# ).parametrize( -# parameter=["parallel", "non-parallel"], -# runs_on=[[RunnerLabels.BUILDER], [RunnerLabels.FUNC_TESTER_AMD]], -# ) - workflow = Workflow.Config( name="PR", event=Workflow.Event.PULL_REQUEST, @@ -119,7 +123,7 @@ workflow = Workflow.Config( jobs=[ style_check_job, fast_test_job, - *amd_build_jobs, + *build_jobs, *stateless_tests_jobs, ], artifacts=[ @@ -133,6 +137,16 @@ workflow = Workflow.Config( type=Artifact.Type.S3, path=f"{Settings.TEMP_DIR}/build/programs/clickhouse", ), + Artifact.Config( + name=ArtifactNames.CH_ARM_RELEASE, + type=Artifact.Type.S3, + path=f"{Settings.TEMP_DIR}/build/programs/clickhouse", + ), + Artifact.Config( + name=ArtifactNames.CH_ARM_ASAN, + type=Artifact.Type.S3, + path=f"{Settings.TEMP_DIR}/build/programs/clickhouse", + ), ], dockers=DOCKERS, secrets=SECRETS, diff --git a/tests/config/client_config.xml b/tests/config/client_config.xml index b91952b8773..0eec3b09d24 100644 --- a/tests/config/client_config.xml +++ b/tests/config/client_config.xml @@ -6,6 +6,7 @@ true sslv2,sslv3 true + none AcceptCertificateHandler From 8b340c864a8015cd6e04833b84223b5c61631e8e Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 13 Nov 2024 13:10:23 +0000 Subject: [PATCH 50/74] better --- src/Interpreters/Aggregator.cpp | 13 ++++++++----- src/Interpreters/Aggregator.h | 4 ++-- src/Interpreters/tests/gtest_filecache.cpp | 2 +- src/Processors/Transforms/AggregatingTransform.cpp | 6 +++--- src/Processors/Transforms/AggregatingTransform.h | 2 ++ .../0_stateless/02402_external_disk_mertrics.sql | 3 ++- 6 files changed, 18 insertions(+), 12 deletions(-) diff --git a/src/Interpreters/Aggregator.cpp b/src/Interpreters/Aggregator.cpp index bb9e22e5a1b..92aa831f233 100644 --- a/src/Interpreters/Aggregator.cpp +++ b/src/Interpreters/Aggregator.cpp @@ -1519,9 +1519,11 @@ void Aggregator::writeToTemporaryFile(AggregatedDataVariants & data_variants, si Stopwatch watch; size_t rows = data_variants.size(); - std::unique_lock lk(tmp_files_mutex); - auto & out_stream = tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size); - lk.unlock(); + auto & out_stream = [this, max_temp_file_size]() -> TemporaryBlockStreamHolder & + { + std::lock_guard lk(tmp_files_mutex); + return tmp_files.emplace_back(getHeader(false), tmp_data.get(), max_temp_file_size); + }(); ProfileEvents::increment(ProfileEvents::ExternalAggregationWritePart); @@ -1642,9 +1644,10 @@ Block Aggregator::convertOneBucketToBlock(AggregatedDataVariants & variants, Are return block; } -std::list & Aggregator::getTemporaryData() +std::list Aggregator::detachTemporaryData() { - return tmp_files; + std::lock_guard lk(tmp_files_mutex); + return std::move(tmp_files); } bool Aggregator::hasTemporaryData() const diff --git a/src/Interpreters/Aggregator.h b/src/Interpreters/Aggregator.h index 451583946eb..eec64c171a6 100644 --- a/src/Interpreters/Aggregator.h +++ b/src/Interpreters/Aggregator.h @@ -311,7 +311,7 @@ public: bool hasTemporaryData() const; - std::list & getTemporaryData(); + std::list detachTemporaryData(); /// Get data structure of the result. Block getHeader(bool final) const; @@ -357,7 +357,7 @@ private: /// For external aggregation. TemporaryDataOnDiskScopePtr tmp_data; mutable std::mutex tmp_files_mutex; - mutable std::list tmp_files; + mutable std::list tmp_files TSA_GUARDED_BY(tmp_files_mutex); size_t min_bytes_for_prefetch = 0; diff --git a/src/Interpreters/tests/gtest_filecache.cpp b/src/Interpreters/tests/gtest_filecache.cpp index ae45443d4bd..60436604f70 100644 --- a/src/Interpreters/tests/gtest_filecache.cpp +++ b/src/Interpreters/tests/gtest_filecache.cpp @@ -988,7 +988,7 @@ try { TemporaryBlockStreamHolder stream(generateBlock(), tmp_data_scope.get()); ASSERT_TRUE(stream); - /// Do nothitng with stream, just create it and destroy. + /// Do nothing with stream, just create it and destroy. } { diff --git a/src/Processors/Transforms/AggregatingTransform.cpp b/src/Processors/Transforms/AggregatingTransform.cpp index 2c54788b995..21eec6c305a 100644 --- a/src/Processors/Transforms/AggregatingTransform.cpp +++ b/src/Processors/Transforms/AggregatingTransform.cpp @@ -813,10 +813,10 @@ void AggregatingTransform::initGenerate() /// Merge external data from all aggregators used in query. for (auto & aggregator : *params->aggregator_list_ptr) { - auto & tmp_data = aggregator.getTemporaryData(); - num_streams += tmp_data.size(); + tmp_files = aggregator.detachTemporaryData(); + num_streams += tmp_files.size(); - for (auto & tmp_stream : tmp_data) + for (auto & tmp_stream : tmp_files) { auto stat = tmp_stream.finishWriting(); compressed_size += stat.compressed_size; diff --git a/src/Processors/Transforms/AggregatingTransform.h b/src/Processors/Transforms/AggregatingTransform.h index b9212375c91..a7d18664786 100644 --- a/src/Processors/Transforms/AggregatingTransform.h +++ b/src/Processors/Transforms/AggregatingTransform.h @@ -216,6 +216,8 @@ private: RowsBeforeStepCounterPtr rows_before_aggregation; + std::list tmp_files; + void initGenerate(); }; diff --git a/tests/queries/0_stateless/02402_external_disk_mertrics.sql b/tests/queries/0_stateless/02402_external_disk_mertrics.sql index 7237ea19775..1a3bf9a230a 100644 --- a/tests/queries/0_stateless/02402_external_disk_mertrics.sql +++ b/tests/queries/0_stateless/02402_external_disk_mertrics.sql @@ -77,7 +77,8 @@ SELECT 'ok', 'fail: ' || toString(count()) || ' ' || toString(any(ProfileEvents)) ) - FROM system.query_log WHERE current_database = currentDatabase() + FROM system.query_log + WHERE current_database = currentDatabase() AND log_comment = '02402_external_disk_mertrics/join' AND query ILIKE 'SELECT%2097152%' AND type = 'QueryFinish'; From 3d086a637f6161dc23e264c102e92665a9f0171c Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 13 Nov 2024 13:43:30 +0000 Subject: [PATCH 51/74] fix reserve in TemporaryDataBuffer --- src/Interpreters/TemporaryDataOnDisk.cpp | 20 ++++++++++---------- src/Interpreters/TemporaryDataOnDisk.h | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index 60bfd379a72..07f45203f59 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -58,12 +58,12 @@ TemporaryFileHolder::TemporaryFileHolder() class TemporaryFileInLocalCache : public TemporaryFileHolder { public: - explicit TemporaryFileInLocalCache(FileCache & file_cache, size_t max_file_size = 0) + explicit TemporaryFileInLocalCache(FileCache & file_cache, size_t reserve_size = 0) { const auto key = FileSegment::Key::random(); LOG_TRACE(getLogger("TemporaryFileInLocalCache"), "Creating temporary file in cache with key {}", key); segment_holder = file_cache.set( - key, 0, std::max(10_MiB, max_file_size), + key, 0, std::max(1, reserve_size), CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser()); chassert(segment_holder->size() == 1); @@ -92,13 +92,13 @@ private: class TemporaryFileOnLocalDisk : public TemporaryFileHolder { public: - explicit TemporaryFileOnLocalDisk(VolumePtr volume, size_t max_file_size = 0) + explicit TemporaryFileOnLocalDisk(VolumePtr volume, size_t reserve_size = 0) : path_to_file("tmp" + toString(UUIDHelpers::generateV4())) { LOG_TRACE(getLogger("TemporaryFileOnLocalDisk"), "Creating temporary file '{}'", path_to_file); - if (max_file_size > 0) + if (reserve_size > 0) { - auto reservation = volume->reserve(max_file_size); + auto reservation = volume->reserve(reserve_size); if (!reservation) { auto disks = volume->getDisks(); @@ -116,7 +116,7 @@ public: throw Exception(ErrorCodes::NOT_ENOUGH_SPACE, "Not enough space on temporary disk, cannot reserve {} bytes on [{}]", - max_file_size, fmt::join(disks_info, ", ")); + reserve_size, fmt::join(disks_info, ", ")); } disk = reservation->getDisk(); } @@ -216,10 +216,10 @@ bool TemporaryDataReadBuffer::nextImpl() return true; } -TemporaryDataBuffer::TemporaryDataBuffer(TemporaryDataOnDiskScope * parent_, size_t max_file_size) +TemporaryDataBuffer::TemporaryDataBuffer(TemporaryDataOnDiskScope * parent_, size_t reserve_size) : WriteBuffer(nullptr, 0) , parent(parent_) - , file_holder(parent->file_provider(max_file_size == 0 ? parent->getSettings().max_size_on_disk : max_file_size)) + , file_holder(parent->file_provider(reserve_size)) , out_compressed_buf(file_holder->write(), getCodec(parent->getSettings())) { WriteBuffer::set(out_compressed_buf->buffer().begin(), out_compressed_buf->buffer().size()); @@ -339,8 +339,8 @@ void TemporaryDataOnDiskScope::deltaAllocAndCheck(ssize_t compressed_delta, ssiz stat.uncompressed_size += uncompressed_delta; } -TemporaryBlockStreamHolder::TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t max_file_size) - : WrapperGuard(std::make_unique(parent_, max_file_size), DBMS_TCP_PROTOCOL_VERSION, header_) +TemporaryBlockStreamHolder::TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t reserve_size) + : WrapperGuard(std::make_unique(parent_, reserve_size), DBMS_TCP_PROTOCOL_VERSION, header_) , header(header_) {} diff --git a/src/Interpreters/TemporaryDataOnDisk.h b/src/Interpreters/TemporaryDataOnDisk.h index eab3571dd07..7f023bae65b 100644 --- a/src/Interpreters/TemporaryDataOnDisk.h +++ b/src/Interpreters/TemporaryDataOnDisk.h @@ -184,7 +184,7 @@ public: size_t uncompressed_size = 0; }; - explicit TemporaryDataBuffer(TemporaryDataOnDiskScope * parent_, size_t max_file_size = 0); + explicit TemporaryDataBuffer(TemporaryDataOnDiskScope * parent_, size_t reserve_size = 0); void nextImpl() override; void finalizeImpl() override; void cancelImpl() noexcept override; @@ -214,7 +214,7 @@ using TemporaryBlockStreamReaderHolder = WrapperGuard; class TemporaryBlockStreamHolder : public WrapperGuard { public: - TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t max_file_size = 0); + TemporaryBlockStreamHolder(const Block & header_, TemporaryDataOnDiskScope * parent_, size_t reserve_size = 0); TemporaryBlockStreamReaderHolder getReadStream() const; From b702a0e2a65b0650470394f93a0f639fd653577d Mon Sep 17 00:00:00 2001 From: vdimir Date: Wed, 13 Nov 2024 14:17:49 +0000 Subject: [PATCH 52/74] fix build --- src/Interpreters/TemporaryDataOnDisk.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/TemporaryDataOnDisk.cpp b/src/Interpreters/TemporaryDataOnDisk.cpp index 07f45203f59..ba7c4dc4b34 100644 --- a/src/Interpreters/TemporaryDataOnDisk.cpp +++ b/src/Interpreters/TemporaryDataOnDisk.cpp @@ -63,7 +63,7 @@ public: const auto key = FileSegment::Key::random(); LOG_TRACE(getLogger("TemporaryFileInLocalCache"), "Creating temporary file in cache with key {}", key); segment_holder = file_cache.set( - key, 0, std::max(1, reserve_size), + key, 0, std::max(1, reserve_size), CreateFileSegmentSettings(FileSegmentKind::Ephemeral), FileCache::getCommonUser()); chassert(segment_holder->size() == 1); From d34f11a833104a039dafc15eb597c8078a6f13ea Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 13 Nov 2024 19:32:47 +0000 Subject: [PATCH 53/74] Fix RIGHT join with PR local plan --- .../QueryPlan/ParallelReplicasLocalPlan.cpp | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/Processors/QueryPlan/ParallelReplicasLocalPlan.cpp b/src/Processors/QueryPlan/ParallelReplicasLocalPlan.cpp index 050044edd3a..8c3a69243d8 100644 --- a/src/Processors/QueryPlan/ParallelReplicasLocalPlan.cpp +++ b/src/Processors/QueryPlan/ParallelReplicasLocalPlan.cpp @@ -3,12 +3,15 @@ #include #include #include +#include #include #include +#include #include #include #include #include +#include #include #include #include @@ -61,6 +64,19 @@ std::pair, bool> createLocalPlanForParallelReplicas( if (reading) break; + const JoinStep * join = typeid_cast(node->step.get()); + if (join) + { + chassert(node->children.size() == 2); + + const auto kind = join->getJoin()->getTableJoin().kind(); + if (kind == JoinKind::Right) + node = node->children.at(1); + else + node = node->children.at(0); + continue; + } + if (!node->children.empty()) node = node->children.at(0); else From 7d0a6bd1b84bc2c275ea7c696c72706dad33ed05 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 13 Nov 2024 20:48:03 +0000 Subject: [PATCH 54/74] update 02967_parallel_replicas_joins_and_analyzer.reference --- ...llel_replicas_joins_and_analyzer.reference | 50 +++++++++++-------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference index 1269f792e76..c4394c201be 100644 --- a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference +++ b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference @@ -262,17 +262,20 @@ sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; -Expression - Join - Expression - Join - Expression - ReadFromMemoryStorage - Expression +Union + Expression + Join + Expression + Join Expression - ReadFromMergeTree - Expression - ReadFromMemoryStorage + ReadFromMemoryStorage + Expression + Expression + ReadFromMergeTree + Expression + ReadFromMemoryStorage + Expression + ReadFromRemoteParallelReplicas -- -- RIGHT JOIN in sub5: sub5 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -305,20 +308,23 @@ sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll. select * from sub5 order by x SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; Expression Sorting - Expression - Sorting - Expression - Join - Expression - ReadFromMemoryStorage - Expression - Join - Expression + Union + Expression + Sorting + Expression + Join + Expression + ReadFromMemoryStorage + Expression + Join Expression - ReadFromMergeTree - Expression + Expression + ReadFromMergeTree Expression - ReadFromMergeTree + Expression + ReadFromMergeTree + Expression + ReadFromRemoteParallelReplicas -- -- Subqueries for IN allowed with sub1 as (select x, y from tab1 where x in (select number from numbers(16) where number != 2)), From 05ed5f28a557a2f0b1043fbb15dcd808731fb64a Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 13 Nov 2024 21:51:33 +0100 Subject: [PATCH 55/74] small fixes + add test --- src/Common/ThreadStatus.cpp | 8 +- src/Common/ThreadStatus.h | 12 +-- src/Processors/IProcessor.cpp | 15 ++++ src/Processors/QueryPlan/IQueryPlanStep.cpp | 5 ++ src/QueryPipeline/printPipeline.cpp | 4 +- .../03213_distributed_analyzer.reference | 2 +- .../03269_explain_unique_ids.reference | 87 +++++++++++++++++++ .../0_stateless/03269_explain_unique_ids.sh | 59 +++++++++++++ 8 files changed, 179 insertions(+), 13 deletions(-) create mode 100644 tests/queries/0_stateless/03269_explain_unique_ids.reference create mode 100755 tests/queries/0_stateless/03269_explain_unique_ids.sh diff --git a/src/Common/ThreadStatus.cpp b/src/Common/ThreadStatus.cpp index ca5bf029abb..96d48d41e48 100644 --- a/src/Common/ThreadStatus.cpp +++ b/src/Common/ThreadStatus.cpp @@ -204,14 +204,14 @@ bool ThreadStatus::isQueryCanceled() const return false; } -size_t ThreadStatus::incrStepIndex() +size_t ThreadStatus::getNextPlanStepIndex() const { - return ++(*local_data.step_count); + return local_data.plan_step_index->fetch_add(1); } -size_t ThreadStatus::incrProcessorIndex() +size_t ThreadStatus::getNextPipelineProcessorIndex() const { - return ++(*local_data.processor_count); + return local_data.pipeline_processor_index->fetch_add(1); } ThreadStatus::~ThreadStatus() diff --git a/src/Common/ThreadStatus.h b/src/Common/ThreadStatus.h index f5e1b731e14..f2fa035f98e 100644 --- a/src/Common/ThreadStatus.h +++ b/src/Common/ThreadStatus.h @@ -91,10 +91,10 @@ public: String query_for_logs; UInt64 normalized_query_hash = 0; - //QueryPlan can not build parallel, but processor may build parallel in expand() function. - //so we use atomic_size_t for processor_count - std::shared_ptr step_count = std::make_shared(0); - std::shared_ptr processor_count = std::make_shared(0); + // Since processors might be added on the fly within expand() function we use atomic_size_t. + // These two fields are used for EXPLAIN PLAN / PIPELINE. + std::shared_ptr plan_step_index = std::make_shared(0); + std::shared_ptr pipeline_processor_index = std::make_shared(0); QueryIsCanceledPredicate query_is_canceled_predicate = {}; }; @@ -319,8 +319,8 @@ public: void initGlobalProfiler(UInt64 global_profiler_real_time_period, UInt64 global_profiler_cpu_time_period); - size_t incrStepIndex(); - size_t incrProcessorIndex(); + size_t getNextPlanStepIndex() const; + size_t getNextPipelineProcessorIndex() const; private: void applyGlobalSettings(); diff --git a/src/Processors/IProcessor.cpp b/src/Processors/IProcessor.cpp index d9bd5074c09..7a985e289d8 100644 --- a/src/Processors/IProcessor.cpp +++ b/src/Processors/IProcessor.cpp @@ -10,6 +10,20 @@ namespace DB { +IProcessor::IProcessor() +{ + processor_index = CurrentThread::isInitialized() ? CurrentThread::get().getNextPipelineProcessorIndex() : 0; +} + +IProcessor::IProcessor(InputPorts inputs_, OutputPorts outputs_) : inputs(std::move(inputs_)), outputs(std::move(outputs_)) +{ + for (auto & port : inputs) + port.processor = this; + for (auto & port : outputs) + port.processor = this; + processor_index = CurrentThread::isInitialized() ? CurrentThread::get().getNextPipelineProcessorIndex() : 0; +} + void IProcessor::setQueryPlanStep(IQueryPlanStep * step, size_t group) { query_plan_step = step; @@ -18,6 +32,7 @@ void IProcessor::setQueryPlanStep(IQueryPlanStep * step, size_t group) { plan_step_name = step->getName(); plan_step_description = step->getStepDescription(); + step_uniq_id = step->getUniqID(); } } diff --git a/src/Processors/QueryPlan/IQueryPlanStep.cpp b/src/Processors/QueryPlan/IQueryPlanStep.cpp index aeb94e8826d..fdb1690bc6d 100644 --- a/src/Processors/QueryPlan/IQueryPlanStep.cpp +++ b/src/Processors/QueryPlan/IQueryPlanStep.cpp @@ -10,6 +10,11 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } +IQueryPlanStep::IQueryPlanStep() +{ + step_index = CurrentThread::isInitialized() ? CurrentThread::get().getNextPlanStepIndex() : 0; +} + void IQueryPlanStep::updateInputHeaders(Headers input_headers_) { input_headers = std::move(input_headers_); diff --git a/src/QueryPipeline/printPipeline.cpp b/src/QueryPipeline/printPipeline.cpp index 1726d776921..40c88502ed0 100644 --- a/src/QueryPipeline/printPipeline.cpp +++ b/src/QueryPipeline/printPipeline.cpp @@ -113,7 +113,7 @@ void printPipelineCompact(const Processors & processors, WriteBuffer & out, bool if (item.first != nullptr) { out << " subgraph cluster_" << next_step << " {\n"; - out << " label =\"" << item.first->getUniqID() << "\";\n"; + out << " label =\"" << item.first->getName() << "\";\n"; out << " style=filled;\n"; out << " color=lightgrey;\n"; out << " node [style=filled,color=white];\n"; @@ -125,7 +125,7 @@ void printPipelineCompact(const Processors & processors, WriteBuffer & out, bool for (const auto & node : item.second) { const auto & processor = node->agents.front(); - out << " n" << node->id << " [label=\"" << processor->getUniqID(); + out << " n" << node->id << " [label=\"" << processor->getName(); if (node->agents.size() > 1) out << " × " << node->agents.size(); diff --git a/tests/queries/0_stateless/03213_distributed_analyzer.reference b/tests/queries/0_stateless/03213_distributed_analyzer.reference index 2456192ca9d..9d63c0a7a5e 100644 --- a/tests/queries/0_stateless/03213_distributed_analyzer.reference +++ b/tests/queries/0_stateless/03213_distributed_analyzer.reference @@ -1 +1 @@ -['digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote_4"];',' }','}','digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote_4"];',' }','}'] +['digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote"];',' }','}','digraph','{',' rankdir="LR";',' { node [shape = rect]',' n1 [label="Remote"];',' }','}'] diff --git a/tests/queries/0_stateless/03269_explain_unique_ids.reference b/tests/queries/0_stateless/03269_explain_unique_ids.reference new file mode 100644 index 00000000000..a6ffcc7588c --- /dev/null +++ b/tests/queries/0_stateless/03269_explain_unique_ids.reference @@ -0,0 +1,87 @@ +[\n {\n "Plan": {\n "Node Type": "CreatingSets",\n "Node Id": "CreatingSets_22",\n "Description": "Create sets before main query execution",\n "Plans": [\n {\n "Node Type": "Expression",\n "Node Id": "Expression_18",\n "Description": "(Project names + (Before ORDER BY + Projection) [lifted up part])",\n "Plans": [\n {\n "Node Type": "Sorting",\n "Node Id": "Sorting_7",\n "Description": "Sorting for ORDER BY",\n "Plans": [\n {\n "Node Type": "Expression",\n "Node Id": "Expression_16",\n "Description": "(Before ORDER BY + Projection)",\n "Plans": [\n {\n "Node Type": "Aggregating",\n "Node Id": "Aggregating_4",\n "Plans": [\n {\n "Node Type": "Expression",\n "Node Id": "Expression_3",\n "Description": "Before GROUP BY",\n "Plans": [\n {\n "Node Type": "Filter",\n "Node Id": "Filter_14",\n "Description": "(WHERE + Change column names to column identifiers)",\n "Plans": [\n {\n "Node Type": "ReadFromMergeTree",\n "Node Id": "ReadFromMergeTree_0",\n "Description": "default.t"\n }\n ]\n }\n ]\n }\n ]\n }\n ]\n }\n ]\n }\n ]\n }\n ]\n }\n }\n] + + +digraph +{ + rankdir="LR"; + { node [shape = rect] + n0[label="MergeTreeSelect(pool: ReadPool, algorithm: Thread)_5"]; + n1[label="FilterTransform_6"]; + n2[label="ExpressionTransform_7"]; + n3[label="AggregatingTransform_8"]; + n4[label="Resize_9"]; + n5[label="ExpressionTransform_10"]; + n6[label="ExpressionTransform_11"]; + n7[label="ExpressionTransform_12"]; + n8[label="ExpressionTransform_13"]; + n9[label="PartialSortingTransform_14"]; + n10[label="PartialSortingTransform_15"]; + n11[label="PartialSortingTransform_16"]; + n12[label="PartialSortingTransform_17"]; + n13[label="LimitsCheckingTransform_18"]; + n14[label="LimitsCheckingTransform_19"]; + n15[label="LimitsCheckingTransform_20"]; + n16[label="LimitsCheckingTransform_21"]; + n17[label="MergeSortingTransform_22"]; + n18[label="MergeSortingTransform_23"]; + n19[label="MergeSortingTransform_24"]; + n20[label="MergeSortingTransform_25"]; + n21[label="MergingSortedTransform_26"]; + n22[label="ExpressionTransform_27"]; + } + n0 -> n1; + n1 -> n2; + n2 -> n3; + n3 -> n4; + n4 -> n5; + n4 -> n6; + n4 -> n7; + n4 -> n8; + n5 -> n9; + n6 -> n10; + n7 -> n11; + n8 -> n12; + n9 -> n13; + n10 -> n14; + n11 -> n15; + n12 -> n16; + n13 -> n17; + n14 -> n18; + n15 -> n19; + n16 -> n20; + n17 -> n21; + n18 -> n21; + n19 -> n21; + n20 -> n21; + n21 -> n22; +} + + +('AggregatingTransform_8','Aggregating_4') +('ConvertingAggregatedToChunksTransform_32','') +('ExpressionTransform_10','Expression_16') +('ExpressionTransform_11','Expression_16') +('ExpressionTransform_12','Expression_16') +('ExpressionTransform_13','Expression_16') +('ExpressionTransform_27','Expression_18') +('ExpressionTransform_7','Expression_3') +('FilterTransform_6','Filter_14') +('LazyOutputFormat_29','') +('LimitsCheckingTransform_18','Sorting_7') +('LimitsCheckingTransform_19','Sorting_7') +('LimitsCheckingTransform_20','Sorting_7') +('LimitsCheckingTransform_21','Sorting_7') +('LimitsCheckingTransform_28','') +('MergeSortingTransform_22','Sorting_7') +('MergeSortingTransform_23','Sorting_7') +('MergeSortingTransform_24','Sorting_7') +('MergeSortingTransform_25','Sorting_7') +('MergeTreeSelect(pool: ReadPool, algorithm: Thread)_5','ReadFromMergeTree_0') +('MergingSortedTransform_26','Sorting_7') +('NullSource_30','') +('NullSource_31','') +('PartialSortingTransform_14','Sorting_7') +('PartialSortingTransform_15','Sorting_7') +('PartialSortingTransform_16','Sorting_7') +('PartialSortingTransform_17','Sorting_7') +('Resize_9','Aggregating_4') diff --git a/tests/queries/0_stateless/03269_explain_unique_ids.sh b/tests/queries/0_stateless/03269_explain_unique_ids.sh new file mode 100755 index 00000000000..2d495345143 --- /dev/null +++ b/tests/queries/0_stateless/03269_explain_unique_ids.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash +# Tags: no-random-settings, no-random-merge-tree-settings + +set -e + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +opts=( + --enable_analyzer=1 + --join_algorithm='parallel_hash' + --max_threads=4 +) + +$CLICKHOUSE_CLIENT -q " + CREATE TABLE t + ( + a UInt32 + ) + ENGINE = MergeTree + ORDER BY a; + + INSERT INTO t SELECT number FROM numbers_mt(1e6); +" + +query=" + WITH t0 AS + ( + SELECT * + FROM numbers(1000) + ) + SELECT a * 3 + FROM t + WHERE a IN (t0) + GROUP BY a + ORDER BY a; +" + +$CLICKHOUSE_CLIENT "${opts[@]}" -q "EXPLAIN json=1 $query" + +printf "\n\n" + +$CLICKHOUSE_CLIENT "${opts[@]}" -q "EXPLAIN PIPELINE compact=0,graph=1 $query" + +printf "\n\n" + +query_id="03269_explain_unique_ids_$RANDOM$RANDOM" +$CLICKHOUSE_CLIENT "${opts[@]}" --log_processors_profiles=1 --query_id="$query_id" --format Null -q "$query" + +$CLICKHOUSE_CLIENT -q " + SYSTEM FLUSH LOGS; + + SELECT DISTINCT (processor_uniq_id, step_uniq_id) + FROM system.processors_profile_log + WHERE query_id = '$query_id' + ORDER BY ALL; +" From 436462ce98b32cae881e83f3e485384fe9887a3f Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 13 Nov 2024 21:54:45 +0100 Subject: [PATCH 56/74] upd tests --- .../01786_explain_merge_tree.reference | 4 ++-- .../0_stateless/01823_explain_json.reference | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/queries/0_stateless/01786_explain_merge_tree.reference b/tests/queries/0_stateless/01786_explain_merge_tree.reference index 75736669905..36ebbe1a1da 100644 --- a/tests/queries/0_stateless/01786_explain_merge_tree.reference +++ b/tests/queries/0_stateless/01786_explain_merge_tree.reference @@ -29,7 +29,7 @@ Granules: 2/3 ----------------- "Node Type": "ReadFromMergeTree", - "Node Id": "ReadFromMergeTree_1", + "Node Id": "ReadFromMergeTree_0", "Description": "default.test_index", "Indexes": [ { @@ -127,7 +127,7 @@ Granules: 3/6 ----------------- "Node Type": "ReadFromMergeTree", - "Node Id": "ReadFromMergeTree_1", + "Node Id": "ReadFromMergeTree_0", "Description": "default.test_index", "Indexes": [ { diff --git a/tests/queries/0_stateless/01823_explain_json.reference b/tests/queries/0_stateless/01823_explain_json.reference index 1aa5aa134e9..6612fd232bf 100644 --- a/tests/queries/0_stateless/01823_explain_json.reference +++ b/tests/queries/0_stateless/01823_explain_json.reference @@ -2,25 +2,25 @@ { "Plan": { "Node Type": "Union", - "Node Id": "Union_11", + "Node Id": "Union_10", "Plans": [ { "Node Type": "Expression", - "Node Id": "Expression_14", + "Node Id": "Expression_13", "Plans": [ { "Node Type": "ReadFromStorage", - "Node Id": "ReadFromStorage_1" + "Node Id": "ReadFromStorage_0" } ] }, { "Node Type": "Expression", - "Node Id": "Expression_17", + "Node Id": "Expression_16", "Plans": [ { "Node Type": "ReadFromStorage", - "Node Id": "ReadFromStorage_5" + "Node Id": "ReadFromStorage_4" } ] } @@ -40,7 +40,7 @@ } -------- "Node Type": "Aggregating", - "Node Id": "Aggregating_4", + "Node Id": "Aggregating_3", "Header": [ { "Name": "__table1.number", @@ -79,16 +79,16 @@ ], -------- "Node Type": "ArrayJoin", - "Node Id": "ArrayJoin_5", + "Node Id": "ArrayJoin_4", "Left": false, "Columns": ["__table1.x", "__table1.y"], -------- "Node Type": "Distinct", - "Node Id": "Distinct_5", + "Node Id": "Distinct_4", "Columns": ["intDiv(__table1.number, 2_UInt8)", "intDiv(__table1.number, 3_UInt8)"], -- "Node Type": "Distinct", - "Node Id": "Distinct_4", + "Node Id": "Distinct_3", "Columns": ["intDiv(__table1.number, 2_UInt8)", "intDiv(__table1.number, 3_UInt8)"], -------- "Sort Description": [ From b2dbbea1794a71713c181025ff61119f49803675 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 13 Nov 2024 21:10:59 +0000 Subject: [PATCH 57/74] Code cleanup --- src/Planner/findParallelReplicasQuery.cpp | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 494326c0ed0..75655428d8c 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -164,12 +164,14 @@ QueryTreeNodePtr replaceTablesWithDummyTables(QueryTreeNodePtr query, const Cont return query->cloneAndReplace(visitor.replacement_map); } +#ifdef DUMP_PARALLEL_REPLICAS_QUERY_CANDIDATES static void dumpStack(const std::vector & stack) { std::ranges::reverse_view rv{stack}; for (const auto * node : rv) LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "{}\n{}", CityHash_v1_0_2::Hash128to64(node->getTreeHash()), node->dumpTree()); } +#endif /// Find the best candidate for parallel replicas execution by verifying query plan. /// If query plan has only Expression, Filter or Join steps, we can execute it fully remotely and check the next query. @@ -179,9 +181,9 @@ const QueryNode * findQueryForParallelReplicas( const std::unordered_map & mapping, const Settings & settings) { - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "{}", StackTrace().toString()); - +#ifdef DUMP_PARALLEL_REPLICAS_QUERY_CANDIDATES dumpStack(stack); +#endif struct Frame { @@ -209,12 +211,6 @@ const QueryNode * findQueryForParallelReplicas( if (it == mapping.end()) break; - LOG_DEBUG( - getLogger(__PRETTY_FUNCTION__), - "{} : {}", - CityHash_v1_0_2::Hash128to64(it->first->getTreeHash()), - it->second->step->getName()); - std::stack nodes_to_check; nodes_to_check.push({.node = it->second, .inside_join = false}); bool can_distribute_full_node = true; @@ -227,8 +223,6 @@ const QueryNode * findQueryForParallelReplicas( const auto & children = next_node_to_check->children; auto * step = next_node_to_check->step.get(); - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Step {} children={}", step->getName(), children.size()); - if (children.empty()) { /// Found a source step. @@ -345,9 +339,6 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr new_stack.pop_back(); } } - - if (res) - LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "Chosen query: {}", res->dumpTree()); return res; } From 33fb3b70997cc2ed5466d503e948ce5e42b6674e Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Wed, 13 Nov 2024 21:13:32 +0000 Subject: [PATCH 58/74] Test cleanup --- ...llel_replicas_joins_and_analyzer.reference | 96 +++++++------------ ...arallel_replicas_joins_and_analyzer.sql.j2 | 69 +++++++------ 2 files changed, 68 insertions(+), 97 deletions(-) diff --git a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference index c4394c201be..765847c0607 100644 --- a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference +++ b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.reference @@ -2,7 +2,7 @@ set parallel_replicas_prefer_local_join = 0; -- A query with only INNER/LEFT joins is fully send to replicas. JOIN is executed in GLOBAL mode. -select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z order by x SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z order by x; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -18,7 +18,7 @@ select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x 13 13 0 0 0 0 14 14 14 14 0 0 15 15 0 0 0 0 -explain description=0 select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +explain description=0 select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z; Union Expression Join @@ -40,8 +40,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -63,8 +62,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; Expression Sorting Union @@ -90,8 +88,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select sum(x), sum(y), sum(r.y), sum(z), sum(rr.z), sum(a), key from sub3 ll any left join sub4 rr on ll.z = rr.z group by x % 2 as key) -select * from sub5 order by key -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by key; 54 54 50 50 12 12 0 64 64 0 0 0 0 1 explain description=0 @@ -100,8 +97,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select sum(x), sum(y), sum(r.y), sum(z), sum(rr.z), sum(a), key from sub3 ll any left join sub4 rr on ll.z = rr.z group by x % 2 as key) -select * from sub5 order by key -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by key; Expression Sorting Expression @@ -129,8 +125,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y order by l.x), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -152,8 +147,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y order by l.x), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; Expression Sorting Expression @@ -181,8 +175,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -204,8 +197,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; Expression Sorting Expression @@ -237,8 +229,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub2 r any right join sub1 l on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5; 0 0 0 0 0 0 6 6 6 6 0 0 8 8 8 8 0 0 @@ -260,8 +251,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub2 r any right join sub1 l on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5; Union Expression Join @@ -283,7 +273,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll.z = rr.z) -select * from sub5 order by x SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; 0 0 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 @@ -305,7 +295,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll.z = rr.z) -select * from sub5 order by x SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; Expression Sorting Union @@ -332,8 +322,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -355,8 +344,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; Expression Sorting Union @@ -389,7 +377,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS enable_parallel_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1, parallel_replicas_allow_in_with_subquery=0; +SETTINGS enable_parallel_replicas = 1, parallel_replicas_allow_in_with_subquery = 0; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -412,7 +400,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS enable_parallel_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1, parallel_replicas_allow_in_with_subquery=0;-- { echoOn } +SETTINGS enable_parallel_replicas = 1, parallel_replicas_allow_in_with_subquery = 0;-- { echoOn } Expression Sorting Expression @@ -442,7 +430,7 @@ Expression ReadFromRemoteParallelReplicas set parallel_replicas_prefer_local_join = 1; -- A query with only INNER/LEFT joins is fully send to replicas. JOIN is executed in GLOBAL mode. -select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z order by x SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z order by x; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -458,7 +446,7 @@ select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x 13 13 0 0 0 0 14 14 14 14 0 0 15 15 0 0 0 0 -explain description=0 select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +explain description=0 select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z; Union Expression Join @@ -482,8 +470,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -505,8 +492,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; Expression Sorting Union @@ -534,8 +520,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select sum(x), sum(y), sum(r.y), sum(z), sum(rr.z), sum(a), key from sub3 ll any left join sub4 rr on ll.z = rr.z group by x % 2 as key) -select * from sub5 order by key -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by key; 54 54 50 50 12 12 0 64 64 0 0 0 0 1 explain description=0 @@ -544,8 +529,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select sum(x), sum(y), sum(r.y), sum(z), sum(rr.z), sum(a), key from sub3 ll any left join sub4 rr on ll.z = rr.z group by x % 2 as key) -select * from sub5 order by key -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by key; Expression Sorting Expression @@ -575,8 +559,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y order by l.x), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -598,8 +581,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y order by l.x), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; Expression Sorting Expression @@ -628,8 +610,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -651,8 +632,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; Expression Sorting Expression @@ -684,8 +664,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub2 r any right join sub1 l on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5; 0 0 0 0 0 0 6 6 6 6 0 0 8 8 8 8 0 0 @@ -707,8 +686,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub2 r any right join sub1 l on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5; Union Expression Join @@ -732,7 +710,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll.z = rr.z) -select * from sub5 order by x SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; 0 0 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 @@ -754,7 +732,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll.z = rr.z) -select * from sub5 order by x SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; Expression Sorting Union @@ -782,8 +760,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -805,8 +782,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; Expression Sorting Union @@ -841,7 +817,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS enable_parallel_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1, parallel_replicas_allow_in_with_subquery=0; +SETTINGS enable_parallel_replicas = 1, parallel_replicas_allow_in_with_subquery = 0; 0 0 0 0 0 0 1 1 0 0 0 0 3 3 0 0 0 0 @@ -864,7 +840,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS enable_parallel_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1, parallel_replicas_allow_in_with_subquery=0; +SETTINGS enable_parallel_replicas = 1, parallel_replicas_allow_in_with_subquery = 0; Expression Sorting Expression diff --git a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.sql.j2 b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.sql.j2 index 23291881eb4..31cb0b735ae 100644 --- a/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.sql.j2 +++ b/tests/queries/0_stateless/02967_parallel_replicas_joins_and_analyzer.sql.j2 @@ -1,16 +1,17 @@ -drop table if exists tab1; -drop table if exists tab2; -drop table if exists tab3; +drop table if exists tab1 sync; +drop table if exists tab2 sync; +drop table if exists tab3 sync; -create table tab1 (x UInt32, y UInt32, shard UInt32) engine = MergeTree order by shard; -create table tab2 (y UInt32, z UInt32) engine = MergeTree order by tuple(); -create table tab3 (z UInt32, a UInt32) engine = MergeTree order by tuple(); +create table tab1 (x UInt32, y UInt32, shard UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/{database}/test_02967/tab1', 'r1') order by shard; +create table tab2 (y UInt32, z UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/{database}/test_02967/tab2', 'r1') order by tuple(); +create table tab3 (z UInt32, a UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/{database}/test_02967/tab3', 'r1') order by tuple(); insert into tab1 select number, number, number from numbers(16); insert into tab2 select number * 2, number * 2 from numbers(8); insert into tab3 select number * 4, number * 4 from numbers(4); -set parallel_replicas_local_plan=1; +set enable_analyzer = 1; +set enable_parallel_replicas = 2, max_parallel_replicas = 2, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_local_plan = 1; {% for use_global_in in [0, 1] -%} @@ -19,8 +20,9 @@ set parallel_replicas_local_plan=1; set parallel_replicas_prefer_local_join = {{use_global_in}}; -- A query with only INNER/LEFT joins is fully send to replicas. JOIN is executed in GLOBAL mode. -select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z order by x SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; -explain description=0 select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z order by x; +explain description=0 select x, y, r.y, z, rr.z, a from (select l.x, l.y, r.y, r.z as z from (select x, y from tab1 where x != 2) l any left join (select y, z from tab2 where y != 4) r on l.y = r.y) ll any left join (select z, a from tab3 where z != 8) rr on ll.z = rr.z; + -- -- The same query with cte; with sub1 as (select x, y from tab1 where x != 2), @@ -28,8 +30,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; explain description=0 with sub1 as (select x, y from tab1 where x != 2), @@ -37,8 +38,8 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; + -- -- GROUP BY should work up to WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -46,8 +47,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select sum(x), sum(y), sum(r.y), sum(z), sum(rr.z), sum(a), key from sub3 ll any left join sub4 rr on ll.z = rr.z group by x % 2 as key) -select * from sub5 order by key -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by key; explain description=0 with sub1 as (select x, y from tab1 where x != 2), @@ -55,8 +55,8 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select sum(x), sum(y), sum(r.y), sum(z), sum(rr.z), sum(a), key from sub3 ll any left join sub4 rr on ll.z = rr.z group by x % 2 as key) -select * from sub5 order by key -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by key; + -- -- ORDER BY in sub3 : sub1 is fully pushed, sub3 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -64,8 +64,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y order by l.x), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; explain description=0 with sub1 as (select x, y from tab1 where x != 2), @@ -73,8 +72,8 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y order by l.x), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; + -- -- ORDER BY in sub1 : sub1 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2 order by y), @@ -82,8 +81,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; explain description=0 with sub1 as (select x, y from tab1 where x != 2 order by y), @@ -91,8 +89,8 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; + -- -- RIGHT JOIN in sub3: sub3 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -100,8 +98,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub2 r any right join sub1 l on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5; explain description=0 with sub1 as (select x, y from tab1 where x != 2), @@ -109,8 +106,8 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub2 r any right join sub1 l on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, l.y, y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5; + -- -- RIGHT JOIN in sub5: sub5 -> WithMergableStage with sub1 as (select x, y from tab1 where x != 2), @@ -118,7 +115,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll.z = rr.z) -select * from sub5 order by x SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; explain description=0 with sub1 as (select x, y from tab1 where x != 2), @@ -126,7 +123,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select z, a, x, y, r.y, ll.z from sub4 rr any right join sub3 ll on ll.z = rr.z) -select * from sub5 order by x SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; -- -- Subqueries for IN allowed @@ -135,8 +132,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; explain description=0 with sub1 as (select x, y from tab1 where x in (select number from numbers(16) where number != 2)), @@ -144,8 +140,7 @@ sub2 as (select y, z from tab2 where y != 4), sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y = r.y), sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) -select * from sub5 order by x -SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1; +select * from sub5 order by x; -- -- Subqueries for IN are not allowed @@ -155,7 +150,7 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS enable_parallel_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1, parallel_replicas_allow_in_with_subquery=0; +SETTINGS enable_parallel_replicas = 1, parallel_replicas_allow_in_with_subquery = 0; explain description=0 with sub1 as (select x, y from tab1 where x in (select number from numbers(16) where number != 2)), @@ -164,6 +159,6 @@ sub3 as (select l.x, l.y, r.y, r.z as z from sub1 l any left join sub2 r on l.y sub4 as (select z, a from tab3 where z != 8), sub5 as (select x, y, r.y, z, rr.z, a from sub3 ll any left join sub4 rr on ll.z = rr.z) select * from sub5 order by x -SETTINGS enable_parallel_replicas = 1, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, prefer_localhost_replica = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_analyzer=1, parallel_replicas_allow_in_with_subquery=0; +SETTINGS enable_parallel_replicas = 1, parallel_replicas_allow_in_with_subquery = 0; {%- endfor %} From 8c3fe9e2c2c57d2e243b8105858c88a4240e8827 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 13 Nov 2024 22:31:23 +0100 Subject: [PATCH 59/74] upd test --- tests/queries/0_stateless/03269_explain_unique_ids.reference | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/queries/0_stateless/03269_explain_unique_ids.reference b/tests/queries/0_stateless/03269_explain_unique_ids.reference index a6ffcc7588c..d27dbcbaffa 100644 --- a/tests/queries/0_stateless/03269_explain_unique_ids.reference +++ b/tests/queries/0_stateless/03269_explain_unique_ids.reference @@ -59,6 +59,9 @@ digraph ('AggregatingTransform_8','Aggregating_4') ('ConvertingAggregatedToChunksTransform_32','') +('CreatingSetsTransform_2','CreatingSet_19') +('EmptySink_3','') +('ExpressionTransform_1','Expression_21') ('ExpressionTransform_10','Expression_16') ('ExpressionTransform_11','Expression_16') ('ExpressionTransform_12','Expression_16') @@ -80,6 +83,7 @@ digraph ('MergingSortedTransform_26','Sorting_7') ('NullSource_30','') ('NullSource_31','') +('NumbersRange_0','ReadFromSystemNumbers_9') ('PartialSortingTransform_14','Sorting_7') ('PartialSortingTransform_15','Sorting_7') ('PartialSortingTransform_16','Sorting_7') From 94ce517dc1c9187e3cca6944d3652d2c990637ad Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 13 Nov 2024 22:41:30 +0100 Subject: [PATCH 60/74] upd docs --- docs/en/sql-reference/statements/explain.md | 71 ++++++++++----------- 1 file changed, 35 insertions(+), 36 deletions(-) diff --git a/docs/en/sql-reference/statements/explain.md b/docs/en/sql-reference/statements/explain.md index e7c2000301a..62190a5ba51 100644 --- a/docs/en/sql-reference/statements/explain.md +++ b/docs/en/sql-reference/statements/explain.md @@ -161,6 +161,8 @@ Settings: - `actions` — Prints detailed information about step actions. Default: 0. - `json` — Prints query plan steps as a row in [JSON](../../interfaces/formats.md#json) format. Default: 0. It is recommended to use [TSVRaw](../../interfaces/formats.md#tabseparatedraw) format to avoid unnecessary escaping. +When `json=1` step names will contain an additional suffix with unique step identifier. + Example: ```sql @@ -194,30 +196,25 @@ EXPLAIN json = 1, description = 0 SELECT 1 UNION ALL SELECT 2 FORMAT TSVRaw; { "Plan": { "Node Type": "Union", + "Node Id": "Union_10", "Plans": [ { "Node Type": "Expression", + "Node Id": "Expression_13", "Plans": [ { - "Node Type": "SettingQuotaAndLimits", - "Plans": [ - { - "Node Type": "ReadFromStorage" - } - ] + "Node Type": "ReadFromStorage", + "Node Id": "ReadFromStorage_0" } ] }, { "Node Type": "Expression", + "Node Id": "Expression_16", "Plans": [ { - "Node Type": "SettingQuotaAndLimits", - "Plans": [ - { - "Node Type": "ReadFromStorage" - } - ] + "Node Type": "ReadFromStorage", + "Node Id": "ReadFromStorage_4" } ] } @@ -249,6 +246,7 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy; { "Plan": { "Node Type": "Expression", + "Node Id": "Expression_5", "Header": [ { "Name": "1", @@ -261,23 +259,13 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy; ], "Plans": [ { - "Node Type": "SettingQuotaAndLimits", + "Node Type": "ReadFromStorage", + "Node Id": "ReadFromStorage_0", "Header": [ { "Name": "dummy", "Type": "UInt8" } - ], - "Plans": [ - { - "Node Type": "ReadFromStorage", - "Header": [ - { - "Name": "dummy", - "Type": "UInt8" - } - ] - } ] } ] @@ -351,17 +339,31 @@ EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw; { "Plan": { "Node Type": "Expression", + "Node Id": "Expression_5", "Expression": { - "Inputs": [], + "Inputs": [ + { + "Name": "dummy", + "Type": "UInt8" + } + ], "Actions": [ { - "Node Type": "Column", + "Node Type": "INPUT", "Result Type": "UInt8", - "Result Type": "Column", + "Result Name": "dummy", + "Arguments": [0], + "Removed Arguments": [0], + "Result": 0 + }, + { + "Node Type": "COLUMN", + "Result Type": "UInt8", + "Result Name": "1", "Column": "Const(UInt8)", "Arguments": [], "Removed Arguments": [], - "Result": 0 + "Result": 1 } ], "Outputs": [ @@ -370,17 +372,12 @@ EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw; "Type": "UInt8" } ], - "Positions": [0], - "Project Input": true + "Positions": [1] }, "Plans": [ { - "Node Type": "SettingQuotaAndLimits", - "Plans": [ - { - "Node Type": "ReadFromStorage" - } - ] + "Node Type": "ReadFromStorage", + "Node Id": "ReadFromStorage_0" } ] } @@ -396,6 +393,8 @@ Settings: - `graph` — Prints a graph described in the [DOT](https://en.wikipedia.org/wiki/DOT_(graph_description_language)) graph description language. Default: 0. - `compact` — Prints graph in compact mode if `graph` setting is enabled. Default: 1. +When `compact=0` and `graph=1` processor names will contain an additional suffix with unique processor identifier. + Example: ```sql From ac0f013fabecef06a3ff2d34123ac2cad4a21a17 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 13 Nov 2024 23:36:06 +0100 Subject: [PATCH 61/74] upd test --- tests/queries/0_stateless/03269_explain_unique_ids.reference | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03269_explain_unique_ids.reference b/tests/queries/0_stateless/03269_explain_unique_ids.reference index d27dbcbaffa..b70b7427621 100644 --- a/tests/queries/0_stateless/03269_explain_unique_ids.reference +++ b/tests/queries/0_stateless/03269_explain_unique_ids.reference @@ -5,7 +5,7 @@ digraph { rankdir="LR"; { node [shape = rect] - n0[label="MergeTreeSelect(pool: ReadPool, algorithm: Thread)_5"]; + n0[label="MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder)_5"]; n1[label="FilterTransform_6"]; n2[label="ExpressionTransform_7"]; n3[label="AggregatingTransform_8"]; @@ -79,7 +79,7 @@ digraph ('MergeSortingTransform_23','Sorting_7') ('MergeSortingTransform_24','Sorting_7') ('MergeSortingTransform_25','Sorting_7') -('MergeTreeSelect(pool: ReadPool, algorithm: Thread)_5','ReadFromMergeTree_0') +('MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder)_5','ReadFromMergeTree_0') ('MergingSortedTransform_26','Sorting_7') ('NullSource_30','') ('NullSource_31','') From d6b3f94a35fe9d1d5901fcb47e0b3c2207c2dceb Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Thu, 14 Nov 2024 09:30:58 +0000 Subject: [PATCH 62/74] Cleanup --- src/Storages/buildQueryTreeForShard.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Storages/buildQueryTreeForShard.cpp b/src/Storages/buildQueryTreeForShard.cpp index 8d8af134a05..bce30260954 100644 --- a/src/Storages/buildQueryTreeForShard.cpp +++ b/src/Storages/buildQueryTreeForShard.cpp @@ -336,7 +336,7 @@ QueryTreeNodePtr getSubqueryFromTableExpression( { throw Exception( ErrorCodes::LOGICAL_ERROR, - "Expected JOIN right table expression to be table, table function, query or union node. Actual {}", + "Expected JOIN table expression to be table, table function, query or union node. Actual {}", join_table_expression->formatASTForErrorMessage()); } @@ -366,8 +366,7 @@ QueryTreeNodePtr buildQueryTreeForShard(const PlannerContextPtr & planner_contex { QueryTreeNodePtr join_table_expression; const auto join_kind = join_node->getKind(); - // const auto join_strictness = join_node->getStrictness(); - if (join_kind == JoinKind::Left || (join_kind == JoinKind::Inner /* && join_strictness == JoinStrictness::All*/)) + if (join_kind == JoinKind::Left || join_kind == JoinKind::Inner) { join_table_expression = join_node->getRightTableExpression(); } From 17327a242d6119a2cb9d404adb977e1165b83b23 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Thu, 14 Nov 2024 09:32:28 +0000 Subject: [PATCH 63/74] Cleanup --- tests/clickhouse-test | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/clickhouse-test b/tests/clickhouse-test index a1ffcc2030f..9c035b7cc35 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -821,10 +821,9 @@ class SettingsRandomizer: "optimize_aggregation_in_order": lambda: random.randint(0, 1), "aggregation_in_order_max_block_bytes": lambda: random.randint(0, 50000000), "use_uncompressed_cache": lambda: random.randint(0, 1), - # see https://github.com/ClickHouse/ClickHouse/issues/65690 - # "min_bytes_to_use_direct_io": threshold_generator( - # 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 - # ), + "min_bytes_to_use_direct_io": threshold_generator( + 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 + ), "min_bytes_to_use_mmap_io": threshold_generator( 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 ), From aeda0db12c2d9efd2ca97998962230b8990ecbbb Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 14 Nov 2024 13:16:28 +0100 Subject: [PATCH 64/74] fix test --- .../0_stateless/03269_explain_unique_ids.reference | 4 ++-- tests/queries/0_stateless/03269_explain_unique_ids.sh | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/03269_explain_unique_ids.reference b/tests/queries/0_stateless/03269_explain_unique_ids.reference index b70b7427621..e5e693b5bfb 100644 --- a/tests/queries/0_stateless/03269_explain_unique_ids.reference +++ b/tests/queries/0_stateless/03269_explain_unique_ids.reference @@ -5,7 +5,7 @@ digraph { rankdir="LR"; { node [shape = rect] - n0[label="MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder)_5"]; + n0[label="MergeTreeSelect_5"]; n1[label="FilterTransform_6"]; n2[label="ExpressionTransform_7"]; n3[label="AggregatingTransform_8"]; @@ -79,7 +79,7 @@ digraph ('MergeSortingTransform_23','Sorting_7') ('MergeSortingTransform_24','Sorting_7') ('MergeSortingTransform_25','Sorting_7') -('MergeTreeSelect(pool: ReadPoolInOrder, algorithm: InOrder)_5','ReadFromMergeTree_0') +('MergeTreeSelect_5','ReadFromMergeTree_0') ('MergingSortedTransform_26','Sorting_7') ('NullSource_30','') ('NullSource_31','') diff --git a/tests/queries/0_stateless/03269_explain_unique_ids.sh b/tests/queries/0_stateless/03269_explain_unique_ids.sh index 2d495345143..0122b909e8e 100755 --- a/tests/queries/0_stateless/03269_explain_unique_ids.sh +++ b/tests/queries/0_stateless/03269_explain_unique_ids.sh @@ -10,7 +10,6 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) opts=( --enable_analyzer=1 - --join_algorithm='parallel_hash' --max_threads=4 ) @@ -23,6 +22,8 @@ $CLICKHOUSE_CLIENT -q " ORDER BY a; INSERT INTO t SELECT number FROM numbers_mt(1e6); + + OPTIMIZE TABLE t FINAL; " query=" @@ -35,14 +36,14 @@ query=" FROM t WHERE a IN (t0) GROUP BY a - ORDER BY a; + ORDER BY a " $CLICKHOUSE_CLIENT "${opts[@]}" -q "EXPLAIN json=1 $query" printf "\n\n" -$CLICKHOUSE_CLIENT "${opts[@]}" -q "EXPLAIN PIPELINE compact=0,graph=1 $query" +$CLICKHOUSE_CLIENT "${opts[@]}" -q "SELECT replaceRegexpAll(explain, '(\w+)\(.*\)', '\\1') FROM (EXPLAIN PIPELINE compact=0,graph=1 $query)" printf "\n\n" @@ -52,7 +53,7 @@ $CLICKHOUSE_CLIENT "${opts[@]}" --log_processors_profiles=1 --query_id="$query_i $CLICKHOUSE_CLIENT -q " SYSTEM FLUSH LOGS; - SELECT DISTINCT (processor_uniq_id, step_uniq_id) + SELECT DISTINCT (replaceRegexpAll(processor_uniq_id, '(\w+)\(.*\)', '\\1'), step_uniq_id) FROM system.processors_profile_log WHERE query_id = '$query_id' ORDER BY ALL; From 5fafb44446f40a5ee89c642e8901e7a648a06861 Mon Sep 17 00:00:00 2001 From: Vladimir Cherkasov Date: Thu, 14 Nov 2024 15:05:34 +0100 Subject: [PATCH 65/74] m --- tests/integration/test_storage_mongodb/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_storage_mongodb/test.py b/tests/integration/test_storage_mongodb/test.py index ec098d7ac54..e810b613290 100644 --- a/tests/integration/test_storage_mongodb/test.py +++ b/tests/integration/test_storage_mongodb/test.py @@ -395,7 +395,7 @@ def test_secure_connection_uri(started_cluster): simple_mongo_table.insert_many(data) node = started_cluster.instances["node"] node.query( - "CREATE OR REPLACE TABLE test_secure_connection_uri(key UInt64, data String) ENGINE = MongoDB('mongodb://root:clickhouse@mongo_secure:27017/test?tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true', 'test_secure_connection_uri')" + "CREATE OR REPLACE TABLE test_secure_connection_uri(key UInt64, data String) ENGINE = MongoDB('mongodb://root:clickhouse@mongo_secure:27017/test?tls=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true&authSource=admin', 'test_secure_connection_uri')" ) assert node.query("SELECT COUNT() FROM test_secure_connection_uri") == "100\n" From 6441c5960781e4d42bf83079093636897efce4ec Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Thu, 14 Nov 2024 15:24:47 +0000 Subject: [PATCH 66/74] Better --- .../QueryPlan/ParallelReplicasLocalPlan.cpp | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/Processors/QueryPlan/ParallelReplicasLocalPlan.cpp b/src/Processors/QueryPlan/ParallelReplicasLocalPlan.cpp index 8c3a69243d8..6704095ca82 100644 --- a/src/Processors/QueryPlan/ParallelReplicasLocalPlan.cpp +++ b/src/Processors/QueryPlan/ParallelReplicasLocalPlan.cpp @@ -64,21 +64,15 @@ std::pair, bool> createLocalPlanForParallelReplicas( if (reading) break; - const JoinStep * join = typeid_cast(node->step.get()); - if (join) + if (!node->children.empty()) { - chassert(node->children.size() == 2); - - const auto kind = join->getJoin()->getTableJoin().kind(); - if (kind == JoinKind::Right) + // in case of RIGHT JOIN, - reading from right table is parallelized among replicas + const JoinStep * join = typeid_cast(node->step.get()); + if (join && join->getJoin()->getTableJoin().kind() == JoinKind::Right) node = node->children.at(1); else node = node->children.at(0); - continue; } - - if (!node->children.empty()) - node = node->children.at(0); else node = nullptr; } From 941c0f0c3b5794571361e5fb27780d656a106786 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Wed, 13 Nov 2024 13:24:32 +0100 Subject: [PATCH 67/74] add stateful tests --- .github/workflows/pr.yaml | 621 ------------------ .github/workflows/pull_request.yml | 212 ++++++ ci/docker/stateful-test/Dockerfile | 14 + ci/docker/stateless-test/Dockerfile | 7 +- ci/jobs/functional_stateful_tests.py | 170 +++++ ci/jobs/functional_stateless_tests.py | 11 +- ci/jobs/scripts/clickhouse_proc.py | 12 +- .../scripts/functional_tests/setup_minio.sh | 162 +++++ ci/praktika/json.html | 3 +- ci/settings/definitions.py | 13 +- ci/workflows/pull_request.py | 31 +- tests/config/config.d/ssl_certs.xml | 4 +- tests/config/install.sh | 11 +- tests/docker_scripts/setup_minio.sh | 32 +- 14 files changed, 634 insertions(+), 669 deletions(-) delete mode 100644 .github/workflows/pr.yaml create mode 100644 .github/workflows/pull_request.yml create mode 100644 ci/docker/stateful-test/Dockerfile create mode 100644 ci/jobs/functional_stateful_tests.py create mode 100755 ci/jobs/scripts/functional_tests/setup_minio.sh diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml deleted file mode 100644 index a70ff0cfe23..00000000000 --- a/.github/workflows/pr.yaml +++ /dev/null @@ -1,621 +0,0 @@ -# generated by praktika - -name: PR - -on: - pull_request: - branches: ['master'] - -# Cancel the previous wf run in PRs. -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -env: - # Force the stdout and stderr streams to be unbuffered - PYTHONUNBUFFERED: 1 - GH_TOKEN: ${{ github.token }} - -# Allow updating GH commit statuses and PR comments to post an actual job reports link -permissions: write-all - -jobs: - - config_workflow: - runs-on: [ci_services] - needs: [] - name: "Config Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Config Workflow''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Config Workflow''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi - - docker_builds: - runs-on: [ci_services_ebs] - needs: [config_workflow] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIEJ1aWxkcw==') }} - name: "Docker Builds" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Docker Builds''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Docker Builds''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi - - style_check: - runs-on: [ci_services] - needs: [config_workflow, docker_builds] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3R5bGUgQ2hlY2s=') }} - name: "Style Check" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Style Check''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Style Check''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi - - fast_test: - runs-on: [builder] - needs: [config_workflow, docker_builds] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RmFzdCB0ZXN0') }} - name: "Fast test" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Fast test''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Fast test''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi - - build_amd_debug: - runs-on: [builder] - needs: [config_workflow, docker_builds, fast_test] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }} - name: "Build (amd_debug)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Build (amd_debug)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Build (amd_debug)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi - - build_amd_release: - runs-on: [builder] - needs: [config_workflow, docker_builds, fast_test] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }} - name: "Build (amd_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Build (amd_release)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Build (amd_release)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi - - build_arm_release: - runs-on: [builder-aarch64] - needs: [config_workflow, docker_builds, fast_test] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }} - name: "Build (arm_release)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Build (arm_release)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Build (arm_release)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi - - build_arm_asan: - runs-on: [builder-aarch64] - needs: [config_workflow, docker_builds, fast_test] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }} - name: "Build (arm_asan)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Build (arm_asan)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Build (arm_asan)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi - - stateless_tests_amd_debugparallel: - runs-on: [builder] - needs: [config_workflow, docker_builds, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcscGFyYWxsZWwp') }} - name: "Stateless tests (amd_debug,parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd_debug,parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Stateless tests (amd_debug,parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi - - stateless_tests_amd_debugnon_parallel: - runs-on: [func-tester] - needs: [config_workflow, docker_builds, build_amd_debug] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsbm9uLXBhcmFsbGVsKQ==') }} - name: "Stateless tests (amd_debug,non-parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd_debug,non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Stateless tests (amd_debug,non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi - - stateless_tests_amd_releaseparallel: - runs-on: [builder] - needs: [config_workflow, docker_builds, build_amd_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfcmVsZWFzZSxwYXJhbGxlbCk=') }} - name: "Stateless tests (amd_release,parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd_release,parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Stateless tests (amd_release,parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi - - stateless_tests_amd_releasenon_parallel: - runs-on: [func-tester] - needs: [config_workflow, docker_builds, build_amd_release] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfcmVsZWFzZSxub24tcGFyYWxsZWwp') }} - name: "Stateless tests (amd_release,non-parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (amd_release,non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Stateless tests (amd_release,non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi - - stateless_tests_arm_asanparallel: - runs-on: [builder-aarch64] - needs: [config_workflow, docker_builds, build_arm_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbixwYXJhbGxlbCk=') }} - name: "Stateless tests (arm_asan,parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (arm_asan,parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Stateless tests (arm_asan,parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi - - stateless_tests_arm_asannon_parallel: - runs-on: [func-tester-aarch64] - needs: [config_workflow, docker_builds, build_arm_asan] - if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbixub24tcGFyYWxsZWwp') }} - name: "Stateless tests (arm_asan,non-parallel)" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Stateless tests (arm_asan,non-parallel)''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Stateless tests (arm_asan,non-parallel)''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi - - finish_workflow: - runs-on: [ci_services] - needs: [config_workflow, docker_builds, style_check, fast_test, build_amd_debug, build_amd_release, build_arm_release, build_arm_asan, stateless_tests_amd_debugparallel, stateless_tests_amd_debugnon_parallel, stateless_tests_amd_releaseparallel, stateless_tests_amd_releasenon_parallel, stateless_tests_arm_asanparallel, stateless_tests_arm_asannon_parallel] - if: ${{ !cancelled() }} - name: "Finish Workflow" - outputs: - data: ${{ steps.run.outputs.DATA }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Prepare env script - run: | - cat > /tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF' - export PYTHONPATH=./ci:. - - cat > /tmp/praktika/workflow_config_pr.json << 'EOF' - ${{ needs.config_workflow.outputs.data }} - EOF - cat > /tmp/praktika/workflow_status.json << 'EOF' - ${{ toJson(needs) }} - EOF - ENV_SETUP_SCRIPT_EOF - - rm -rf /tmp/praktika/input /tmp/praktika/output /tmp/praktika - mkdir -p /tmp/praktika /tmp/praktika/input /tmp/praktika/output - - - name: Run - id: run - run: | - . /tmp/praktika_setup_env.sh - set -o pipefail - if command -v ts &> /dev/null; then - python3 -m praktika run --job '''Finish Workflow''' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee /tmp/praktika/praktika_run.log - else - python3 -m praktika run --job '''Finish Workflow''' --workflow "PR" --ci |& tee /tmp/praktika/praktika_run.log - fi diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml new file mode 100644 index 00000000000..e4eb44b2774 --- /dev/null +++ b/.github/workflows/pull_request.yml @@ -0,0 +1,212 @@ +# yamllint disable rule:comments-indentation +name: PullRequestCI + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + +on: # yamllint disable-line rule:truthy + pull_request: + types: + - synchronize + - reopened + - opened + branches: + - master + +# Cancel the previous wf run in PRs. +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + RunConfig: + runs-on: [self-hosted, style-checker-aarch64] + outputs: + data: ${{ steps.runconfig.outputs.CI_DATA }} + steps: + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + clear-repository: true # to ensure correct digests + fetch-depth: 0 # to get a version + filter: tree:0 + - name: Debug Info + uses: ./.github/actions/debug + - name: Set pending Sync status + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --set-pending-status + - name: Labels check + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 run_check.py + - name: Python unit tests + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + echo "Testing the main ci directory" + python3 -m unittest discover -s . -p 'test_*.py' + - name: PrepareRunConfig + id: runconfig + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --outfile ${{ runner.temp }}/ci_run_data.json + + echo "::group::CI configuration" + python3 -m json.tool ${{ runner.temp }}/ci_run_data.json + echo "::endgroup::" + + { + echo 'CI_DATA<> "$GITHUB_OUTPUT" + - name: Re-create GH statuses for skipped jobs if any + run: | + python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ runner.temp }}/ci_run_data.json --update-gh-statuses + BuildDockers: + needs: [RunConfig] + if: ${{ !failure() && !cancelled() && toJson(fromJson(needs.RunConfig.outputs.data).docker_data.missing_multi) != '[]' }} + uses: ./.github/workflows/docker_test_images.yml + with: + data: ${{ needs.RunConfig.outputs.data }} + StyleCheck: + needs: [RunConfig, BuildDockers] + if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Style check')}} + uses: ./.github/workflows/reusable_test.yml + with: + test_name: Style check + runner_type: style-checker-aarch64 + run_command: | + python3 style_check.py + data: ${{ needs.RunConfig.outputs.data }} + secrets: + secret_envs: | + ROBOT_CLICKHOUSE_SSH_KEY< "$WORKFLOW_RESULT_FILE" << 'EOF' + ${{ toJson(needs) }} + EOF + python3 merge_pr.py --set-ci-status + - name: Check Workflow results + uses: ./.github/actions/check_workflow + with: + needs: ${{ toJson(needs) }} + + ################################# Stage Final ################################# + # + FinishCheck: + if: ${{ !failure() && !cancelled() }} + needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2_ww, Tests_2] + runs-on: [self-hosted, style-checker-aarch64] + steps: + - name: Check out repository code + uses: ClickHouse/checkout@v1 + with: + filter: tree:0 + - name: Finish label + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }} + +############################################################################################# +###################################### JEPSEN TESTS ######################################### +############################################################################################# + # This is special test NOT INCLUDED in FinishCheck + # When it's skipped, all dependent tasks will be skipped too. + # DO NOT add it there + Jepsen: + # we need concurrency as the job uses dedicated instances in the cloud + concurrency: + group: jepsen + if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse Keeper Jepsen') }} + needs: [RunConfig, Builds_1] + uses: ./.github/workflows/reusable_test.yml + with: + test_name: ClickHouse Keeper Jepsen + runner_type: style-checker-aarch64 + data: ${{ needs.RunConfig.outputs.data }} diff --git a/ci/docker/stateful-test/Dockerfile b/ci/docker/stateful-test/Dockerfile new file mode 100644 index 00000000000..e21aec4a48f --- /dev/null +++ b/ci/docker/stateful-test/Dockerfile @@ -0,0 +1,14 @@ +ARG FROM_TAG=latest +FROM clickhouse/stateless-test:$FROM_TAG + +USER root + +RUN apt-get update -y \ + && env DEBIAN_FRONTEND=noninteractive \ + apt-get install --yes --no-install-recommends \ + nodejs \ + npm \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* \ + +USER clickhouse diff --git a/ci/docker/stateless-test/Dockerfile b/ci/docker/stateless-test/Dockerfile index cd02fc27f8e..dcfaa5f6267 100644 --- a/ci/docker/stateless-test/Dockerfile +++ b/ci/docker/stateless-test/Dockerfile @@ -8,10 +8,10 @@ RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz" -RUN mkdir /etc/clickhouse-server /etc/clickhouse-keeper /etc/clickhouse-server && chmod 777 /etc/clickhouse-* +RUN mkdir /etc/clickhouse-server /etc/clickhouse-keeper /etc/clickhouse-client && chmod 777 /etc/clickhouse-* \ + && mkdir -p /var/lib/clickhouse /var/log/clickhouse-server && chmod 777 /var/log/clickhouse-server /var/lib/clickhouse RUN addgroup --gid 1001 clickhouse && adduser --uid 1001 --gid 1001 --disabled-password clickhouse -USER clickhouse # moreutils - provides ts fo FT # expect, bzip2 - requried by FT @@ -57,6 +57,7 @@ RUN apt-get update -y \ p7zip-full \ curl \ wget \ + xz-utils \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* @@ -112,3 +113,5 @@ RUN curl -L --no-verbose -O 'https://archive.apache.org/dist/hadoop/common/hadoo RUN npm install -g azurite@3.30.0 \ && npm install -g tslib && npm install -g node + +USER clickhouse diff --git a/ci/jobs/functional_stateful_tests.py b/ci/jobs/functional_stateful_tests.py new file mode 100644 index 00000000000..0faba4383ec --- /dev/null +++ b/ci/jobs/functional_stateful_tests.py @@ -0,0 +1,170 @@ +import argparse +import os +import time +from pathlib import Path + +from praktika.result import Result +from praktika.settings import Settings +from praktika.utils import MetaClasses, Shell, Utils + +from ci.jobs.scripts.clickhouse_proc import ClickHouseProc +from ci.jobs.scripts.functional_tests_results import FTResultsProcessor + + +class JobStages(metaclass=MetaClasses.WithIter): + INSTALL_CLICKHOUSE = "install" + START = "start" + TEST = "test" + + +def parse_args(): + parser = argparse.ArgumentParser(description="ClickHouse Build Job") + parser.add_argument( + "--ch-path", help="Path to clickhouse binary", default=f"{Settings.INPUT_DIR}" + ) + parser.add_argument( + "--test-options", + help="Comma separated option(s): parallel|non-parallel|BATCH_NUM/BTATCH_TOT|..", + default="", + ) + parser.add_argument("--param", help="Optional job start stage", default=None) + parser.add_argument("--test", help="Optional test name pattern", default="") + return parser.parse_args() + + +def run_test( + no_parallel: bool, no_sequiential: bool, batch_num: int, batch_total: int, test="" +): + test_output_file = f"{Settings.OUTPUT_DIR}/test_result.txt" + + test_command = f"clickhouse-test --jobs 2 --testname --shard --zookeeper --check-zookeeper-session --no-stateless \ + --hung-check --print-time \ + --capture-client-stacktrace --queries ./tests/queries -- '{test}' \ + | ts '%Y-%m-%d %H:%M:%S' | tee -a \"{test_output_file}\"" + if Path(test_output_file).exists(): + Path(test_output_file).unlink() + Shell.run(test_command, verbose=True) + + +def main(): + + args = parse_args() + test_options = args.test_options.split(",") + no_parallel = "non-parallel" in test_options + no_sequential = "parallel" in test_options + batch_num, total_batches = 0, 0 + for to in test_options: + if "/" in to: + batch_num, total_batches = map(int, to.split("/")) + + # os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output( + # f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value", + # verbose=True, + # strict=True + # ) + + ch_path = args.ch_path + assert Path( + ch_path + "/clickhouse" + ).is_file(), f"clickhouse binary not found under [{ch_path}]" + + stop_watch = Utils.Stopwatch() + + stages = list(JobStages) + + logs_to_attach = [] + + stage = args.param or JobStages.INSTALL_CLICKHOUSE + if stage: + assert stage in JobStages, f"--param must be one of [{list(JobStages)}]" + print(f"Job will start from stage [{stage}]") + while stage in stages: + stages.pop(0) + stages.insert(0, stage) + + res = True + results = [] + + Utils.add_to_PATH(f"{ch_path}:tests") + + if res and JobStages.INSTALL_CLICKHOUSE in stages: + commands = [ + f"rm -rf /tmp/praktika/var/log/clickhouse-server/clickhouse-server.*", + f"chmod +x {ch_path}/clickhouse", + f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-server", + f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-client", + f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-compressor", + f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-local", + f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server", + f"cp programs/server/config.xml programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", + f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --s3-storage", + # clickhouse benchmark segfaults with --config-path, so provide client config by its default location + f"cp {Settings.TEMP_DIR}/etc/clickhouse-client/* /etc/clickhouse-client/", + # update_path_ch_config, + # f"sed -i 's|>/var/|>{Settings.TEMP_DIR}/var/|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.xml", + # f"sed -i 's|>/etc/|>{Settings.TEMP_DIR}/etc/|g' {Settings.TEMP_DIR}/etc/clickhouse-server/config.d/ssl_certs.xml", + f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done", + f"for file in /tmp/praktika/etc/clickhouse-server/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|>/var/log|>{Settings.TEMP_DIR}/var/log|g; s|>/etc/|>{Settings.TEMP_DIR}/etc/|g' $(readlink -f $file); done", + f"for file in /tmp/praktika/etc/clickhouse-server/config.d/*.xml; do [ -f $file ] && echo Change config $file && sed -i 's|local_disk|{Settings.TEMP_DIR}/local_disk|g' $(readlink -f $file); done", + f"clickhouse-server --version", + ] + results.append( + Result.create_from_command_execution( + name="Install ClickHouse", command=commands, with_log=True + ) + ) + res = results[-1].is_ok() + + CH = ClickHouseProc() + if res and JobStages.START in stages: + stop_watch_ = Utils.Stopwatch() + step_name = "Start ClickHouse Server" + print(step_name) + minio_log = "/tmp/praktika/output/minio.log" + res = res and CH.start_minio(test_type="stateful", log_file_path=minio_log) + logs_to_attach += [minio_log] + time.sleep(10) + Shell.check("ps -ef | grep minio", verbose=True) + res = res and Shell.check( + "aws s3 ls s3://test --endpoint-url http://localhost:11111/", verbose=True + ) + res = res and CH.start() + res = res and CH.wait_ready() + if res: + print("ch started") + logs_to_attach += [ + "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.log", + "/tmp/praktika/var/log/clickhouse-server/clickhouse-server.err.log", + ] + results.append( + Result.create_from( + name=step_name, + status=res, + stopwatch=stop_watch_, + ) + ) + res = results[-1].is_ok() + + if res and JobStages.TEST in stages: + stop_watch_ = Utils.Stopwatch() + step_name = "Tests" + print(step_name) + # assert Shell.check("clickhouse-client -q \"insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')\"", verbose=True) + run_test( + no_parallel=no_parallel, + no_sequiential=no_sequential, + batch_num=batch_num, + batch_total=total_batches, + test=args.test, + ) + results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run()) + results[-1].set_timing(stopwatch=stop_watch_) + res = results[-1].is_ok() + + Result.create_from( + results=results, stopwatch=stop_watch, files=logs_to_attach if not res else [] + ).complete_job() + + +if __name__ == "__main__": + main() diff --git a/ci/jobs/functional_stateless_tests.py b/ci/jobs/functional_stateless_tests.py index c2d374aa51b..0d73312bd9e 100644 --- a/ci/jobs/functional_stateless_tests.py +++ b/ci/jobs/functional_stateless_tests.py @@ -99,9 +99,12 @@ def main(): f"chmod +x {ch_path}/clickhouse", f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-server", f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-client", + f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-compressor", + f"ln -sf {ch_path}/clickhouse {ch_path}/clickhouse-local", f"rm -rf {Settings.TEMP_DIR}/etc/ && mkdir -p {Settings.TEMP_DIR}/etc/clickhouse-client {Settings.TEMP_DIR}/etc/clickhouse-server", f"cp programs/server/config.xml programs/server/users.xml {Settings.TEMP_DIR}/etc/clickhouse-server/", - f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --s3-storage", + # TODO: find a way to work with Azure secret so it's ok for local tests as well, for now keep azure disabled + f"./tests/config/install.sh {Settings.TEMP_DIR}/etc/clickhouse-server {Settings.TEMP_DIR}/etc/clickhouse-client --s3-storage --no-azure", # clickhouse benchmark segfaults with --config-path, so provide client config by its default location f"cp {Settings.TEMP_DIR}/etc/clickhouse-client/* /etc/clickhouse-client/", # update_path_ch_config, @@ -127,7 +130,7 @@ def main(): hdfs_log = "/tmp/praktika/output/hdfs_mini.log" minio_log = "/tmp/praktika/output/minio.log" res = res and CH.start_hdfs(log_file_path=hdfs_log) - res = res and CH.start_minio(log_file_path=minio_log) + res = res and CH.start_minio(test_type="stateful", log_file_path=minio_log) logs_to_attach += [minio_log, hdfs_log] time.sleep(10) Shell.check("ps -ef | grep minio", verbose=True) @@ -156,6 +159,10 @@ def main(): stop_watch_ = Utils.Stopwatch() step_name = "Tests" print(step_name) + assert Shell.check( + "clickhouse-client -q \"insert into system.zookeeper (name, path, value) values ('auxiliary_zookeeper2', '/test/chroot/', '')\"", + verbose=True, + ) run_stateless_test( no_parallel=no_parallel, no_sequiential=no_sequential, diff --git a/ci/jobs/scripts/clickhouse_proc.py b/ci/jobs/scripts/clickhouse_proc.py index 8f9bef57083..6108563605f 100644 --- a/ci/jobs/scripts/clickhouse_proc.py +++ b/ci/jobs/scripts/clickhouse_proc.py @@ -31,12 +31,10 @@ class ClickHouseProc: self.info = "" self.info_file = "" - self.minio_cmd = f"tests/docker_scripts/setup_minio.sh stateless 2>&1 > {Settings.OUTPUT_DIR}/minio.log" - Utils.set_env("CLICKHOUSE_CONFIG_DIR", self.ch_config_dir) Utils.set_env("CLICKHOUSE_CONFIG", self.config_file) Utils.set_env("CLICKHOUSE_USER_FILES", self.user_files_path) - Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas") + # Utils.set_env("CLICKHOUSE_SCHEMA_FILES", f"{self.ch_config_dir}/format_schemas") # if not fast_test: # with open(f"{self.ch_config_dir}/config.d/backups.xml", "w") as file: @@ -55,8 +53,12 @@ class ClickHouseProc: ) return True - def start_minio(self, log_file_path): - command = ["tests/docker_scripts/setup_minio.sh", "stateless", "./tests"] + def start_minio(self, test_type, log_file_path): + command = [ + "./ci/jobs/scripts/functional_tests/setup_minio.sh", + test_type, + "./tests", + ] with open(log_file_path, "w") as log_file: process = subprocess.Popen( command, stdout=log_file, stderr=subprocess.STDOUT diff --git a/ci/jobs/scripts/functional_tests/setup_minio.sh b/ci/jobs/scripts/functional_tests/setup_minio.sh new file mode 100755 index 00000000000..88839c39674 --- /dev/null +++ b/ci/jobs/scripts/functional_tests/setup_minio.sh @@ -0,0 +1,162 @@ +#!/bin/bash + +set -euxf -o pipefail + +export MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse} +export MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse} +TEST_DIR=${2:-/repo/tests/} + +if [ -d "$TEMP_DIR" ]; then + TEST_DIR=$(readlink -f $TEST_DIR) + cd "$TEMP_DIR" + # add / for minio mc in docker + PATH="/:.:$PATH" +fi + +usage() { + echo $"Usage: $0 (default path: /usr/share/clickhouse-test)" + exit 1 +} + +check_arg() { + local query_dir + if [ ! $# -eq 1 ]; then + if [ ! $# -eq 2 ]; then + echo "ERROR: need either one or two arguments, (default path: /usr/share/clickhouse-test)" + usage + fi + fi + case "$1" in + stateless) + query_dir="0_stateless" + ;; + stateful) + query_dir="1_stateful" + ;; + *) + echo "unknown test type ${test_type}" + usage + ;; + esac + echo ${query_dir} +} + +find_arch() { + local arch + case $(uname -m) in + x86_64) + arch="amd64" + ;; + aarch64) + arch="arm64" + ;; + *) + echo "unknown architecture $(uname -m)"; + exit 1 + ;; + esac + echo ${arch} +} + +find_os() { + local os + os=$(uname -s | tr '[:upper:]' '[:lower:]') + echo "${os}" +} + +download_minio() { + local os + local arch + local minio_server_version=${MINIO_SERVER_VERSION:-2024-08-03T04-33-23Z} + local minio_client_version=${MINIO_CLIENT_VERSION:-2024-07-31T15-58-33Z} + + os=$(find_os) + arch=$(find_arch) + wget "https://dl.min.io/server/minio/release/${os}-${arch}/archive/minio.RELEASE.${minio_server_version}" -O ./minio + wget "https://dl.min.io/client/mc/release/${os}-${arch}/archive/mc.RELEASE.${minio_client_version}" -O ./mc + chmod +x ./mc ./minio +} + +start_minio() { + pwd + mkdir -p ./minio_data + minio --version + nohup minio server --address ":11111" ./minio_data & + wait_for_it + lsof -i :11111 + sleep 5 +} + +setup_minio() { + local test_type=$1 + echo "setup_minio(), test_type=$test_type" + mc alias set clickminio http://localhost:11111 clickhouse clickhouse + mc admin user add clickminio test testtest + mc admin policy attach clickminio readwrite --user=test ||: + mc mb --ignore-existing clickminio/test + if [ "$test_type" = "stateless" ]; then + echo "Create @test bucket in minio" + mc anonymous set public clickminio/test + fi +} + +# uploads data to minio, by default after unpacking all tests +# will be in /usr/share/clickhouse-test/queries +upload_data() { + local query_dir=$1 + local test_path=$2 + local data_path=${test_path}/queries/${query_dir}/data_minio + echo "upload_data() data_path=$data_path" + + # iterating over globs will cause redundant file variable to be + # a path to a file, not a filename + # shellcheck disable=SC2045 + if [ -d "${data_path}" ]; then + mc cp --recursive "${data_path}"/ clickminio/test/ + fi +} + +setup_aws_credentials() { + local minio_root_user=${MINIO_ROOT_USER:-clickhouse} + local minio_root_password=${MINIO_ROOT_PASSWORD:-clickhouse} + mkdir -p ~/.aws + cat <> ~/.aws/credentials +[default] +aws_access_key_id=${minio_root_user} +aws_secret_access_key=${minio_root_password} +EOT +} + +wait_for_it() { + local counter=0 + local max_counter=60 + local url="http://localhost:11111" + local params=( + --silent + --verbose + ) + while ! curl "${params[@]}" "${url}" 2>&1 | grep AccessDenied + do + if [[ ${counter} == "${max_counter}" ]]; then + echo "failed to setup minio" + exit 0 + fi + echo "trying to connect to minio" + sleep 1 + counter=$((counter + 1)) + done +} + +main() { + local query_dir + query_dir=$(check_arg "$@") + if ! (minio --version && mc --version); then + download_minio + fi + start_minio + setup_minio "$1" + upload_data "${query_dir}" "$TEST_DIR" + setup_aws_credentials +} + +main "$@" diff --git a/ci/praktika/json.html b/ci/praktika/json.html index 544fd6e68d4..b11106719cd 100644 --- a/ci/praktika/json.html +++ b/ci/praktika/json.html @@ -103,12 +103,13 @@ #result-container { background-color: var(--tile-background); margin-left: calc(var(--status-width) + 20px); - padding: 20px; + padding: 0; box-sizing: border-box; text-align: center; font-size: 18px; font-weight: normal; flex-grow: 1; + margin-bottom: 40px; } #footer { diff --git a/ci/settings/definitions.py b/ci/settings/definitions.py index 9f529798830..8ebf79231ac 100644 --- a/ci/settings/definitions.py +++ b/ci/settings/definitions.py @@ -133,12 +133,12 @@ DOCKERS = [ platforms=Docker.Platforms.arm_amd, depends_on=[], ), - # Docker.Config( - # name="clickhouse/stateful-test", - # path="./ci/docker/test/stateful", - # platforms=Docker.Platforms.arm_amd, - # depends_on=["clickhouse/stateless-test"], - # ), + Docker.Config( + name="clickhouse/stateful-test", + path="./ci/docker/stateful-test", + platforms=Docker.Platforms.arm_amd, + depends_on=["clickhouse/stateless-test"], + ), # Docker.Config( # name="clickhouse/stress-test", # path="./ci/docker/test/stress", @@ -241,3 +241,4 @@ class JobNames: FAST_TEST = "Fast test" BUILD = "Build" STATELESS = "Stateless tests" + STATEFUL = "Stateful tests" diff --git a/ci/workflows/pull_request.py b/ci/workflows/pull_request.py index 94dcc2ab722..761ab8a6ebc 100644 --- a/ci/workflows/pull_request.py +++ b/ci/workflows/pull_request.py @@ -1,5 +1,3 @@ -from typing import List - from praktika import Artifact, Job, Workflow from praktika.settings import Settings @@ -83,7 +81,7 @@ stateless_tests_jobs = Job.Config( runs_on=[RunnerLabels.BUILDER_AMD], command="python3 ./ci/jobs/functional_stateless_tests.py --test-options {PARAMETER}", # many tests expect to see "/var/lib/clickhouse" in various output lines - add mount for now, consider creating this dir in docker file - run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined+--volume=/tmp/praktika:/var/lib/clickhouse", + run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined", digest_config=Job.CacheDigestConfig( include_paths=[ "./ci/jobs/functional_stateless_tests.py", @@ -116,6 +114,30 @@ stateless_tests_jobs = Job.Config( ], ) +stateful_tests_jobs = Job.Config( + name=JobNames.STATEFUL, + runs_on=[RunnerLabels.BUILDER_AMD], + command="python3 ./ci/jobs/functional_stateful_tests.py --test-options {PARAMETER}", + # many tests expect to see "/var/lib/clickhouse" + # some tests expect to see "/var/log/clickhouse" + run_in_docker="clickhouse/stateless-test+--security-opt seccomp=unconfined", + digest_config=Job.CacheDigestConfig( + include_paths=[ + "./ci/jobs/functional_stateful_tests.py", + ], + ), +).parametrize( + parameter=[ + "amd_debug,parallel", + ], + runs_on=[ + [RunnerLabels.BUILDER_AMD], + ], + requires=[ + [ArtifactNames.CH_AMD_DEBUG], + ], +) + workflow = Workflow.Config( name="PR", event=Workflow.Event.PULL_REQUEST, @@ -125,6 +147,7 @@ workflow = Workflow.Config( fast_test_job, *build_jobs, *stateless_tests_jobs, + *stateful_tests_jobs, ], artifacts=[ Artifact.Config( @@ -157,7 +180,7 @@ workflow = Workflow.Config( WORKFLOWS = [ workflow, -] # type: List[Workflow.Config] +] # if __name__ == "__main__": diff --git a/tests/config/config.d/ssl_certs.xml b/tests/config/config.d/ssl_certs.xml index c20fef89e00..26b679f39df 100644 --- a/tests/config/config.d/ssl_certs.xml +++ b/tests/config/config.d/ssl_certs.xml @@ -1,8 +1,8 @@ - /tmp/praktika/etc/clickhouse-server/server.crt - /tmp/praktika/etc/clickhouse-server/server.key + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key diff --git a/tests/config/install.sh b/tests/config/install.sh index 9630977b9c1..b9d8092c74f 100755 --- a/tests/config/install.sh +++ b/tests/config/install.sh @@ -13,12 +13,13 @@ shift # DEST_SERVER_PATH shift # DEST_CLIENT_PATH FAST_TEST=0 -S3_STORAGE=0 +NO_AZURE=0 while [[ "$#" -gt 0 ]]; do case $1 in --fast-test) FAST_TEST=1 ;; - --s3-storage) S3_STORAGE=1 ;; + --s3-storage) EXPORT_S3_STORAGE_POLICIES=1 ;; + --no-azure) NO_AZURE=1 ;; *) echo "Unknown option: $1" ; exit 1 ;; esac shift @@ -199,8 +200,10 @@ elif [[ "$USE_AZURE_STORAGE_FOR_MERGE_TREE" == "1" ]]; then ln -sf $SRC_PATH/config.d/azure_storage_policy_by_default.xml $DEST_SERVER_PATH/config.d/ fi -if [[ "$EXPORT_S3_STORAGE_POLICIES" == "1" ]] || [[ "$S3_STORAGE" = "1" ]]; then - #ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/ +if [[ "$EXPORT_S3_STORAGE_POLICIES" == "1" ]]; then + if [[ "$NO_AZURE" != "1" ]]; then + ln -sf $SRC_PATH/config.d/azure_storage_conf.xml $DEST_SERVER_PATH/config.d/ + fi ln -sf $SRC_PATH/config.d/storage_conf.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf_02944.xml $DEST_SERVER_PATH/config.d/ ln -sf $SRC_PATH/config.d/storage_conf_02963.xml $DEST_SERVER_PATH/config.d/ diff --git a/tests/docker_scripts/setup_minio.sh b/tests/docker_scripts/setup_minio.sh index 88839c39674..40e93e713a1 100755 --- a/tests/docker_scripts/setup_minio.sh +++ b/tests/docker_scripts/setup_minio.sh @@ -4,14 +4,6 @@ set -euxf -o pipefail export MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse} export MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse} -TEST_DIR=${2:-/repo/tests/} - -if [ -d "$TEMP_DIR" ]; then - TEST_DIR=$(readlink -f $TEST_DIR) - cd "$TEMP_DIR" - # add / for minio mc in docker - PATH="/:.:$PATH" -fi usage() { echo $"Usage: $0 (default path: /usr/share/clickhouse-test)" @@ -78,10 +70,9 @@ download_minio() { } start_minio() { - pwd mkdir -p ./minio_data - minio --version - nohup minio server --address ":11111" ./minio_data & + ./minio --version + ./minio server --address ":11111" ./minio_data & wait_for_it lsof -i :11111 sleep 5 @@ -89,14 +80,12 @@ start_minio() { setup_minio() { local test_type=$1 - echo "setup_minio(), test_type=$test_type" - mc alias set clickminio http://localhost:11111 clickhouse clickhouse - mc admin user add clickminio test testtest - mc admin policy attach clickminio readwrite --user=test ||: - mc mb --ignore-existing clickminio/test + ./mc alias set clickminio http://localhost:11111 clickhouse clickhouse + ./mc admin user add clickminio test testtest + ./mc admin policy attach clickminio readwrite --user=test + ./mc mb --ignore-existing clickminio/test if [ "$test_type" = "stateless" ]; then - echo "Create @test bucket in minio" - mc anonymous set public clickminio/test + ./mc anonymous set public clickminio/test fi } @@ -106,13 +95,12 @@ upload_data() { local query_dir=$1 local test_path=$2 local data_path=${test_path}/queries/${query_dir}/data_minio - echo "upload_data() data_path=$data_path" # iterating over globs will cause redundant file variable to be # a path to a file, not a filename # shellcheck disable=SC2045 if [ -d "${data_path}" ]; then - mc cp --recursive "${data_path}"/ clickminio/test/ + ./mc cp --recursive "${data_path}"/ clickminio/test/ fi } @@ -150,12 +138,12 @@ wait_for_it() { main() { local query_dir query_dir=$(check_arg "$@") - if ! (minio --version && mc --version); then + if [ ! -f ./minio ]; then download_minio fi start_minio setup_minio "$1" - upload_data "${query_dir}" "$TEST_DIR" + upload_data "${query_dir}" "${2:-/repo/tests/}" setup_aws_credentials } From 13abc806a20b54db36db998561a206ef2c813b4d Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 14 Nov 2024 20:35:33 +0100 Subject: [PATCH 68/74] Don't randomise settings in 02354_distributed_with_external_aggregation_memory_usage maybe it will help, i'm not sure --- ...02354_distributed_with_external_aggregation_memory_usage.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql b/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql index f9da5b3a73c..c9f3b2be8c6 100644 --- a/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql +++ b/tests/queries/0_stateless/02354_distributed_with_external_aggregation_memory_usage.sql @@ -1,4 +1,4 @@ --- Tags: long, no-tsan, no-msan, no-asan, no-ubsan, no-debug, no-object-storage +-- Tags: long, no-tsan, no-msan, no-asan, no-ubsan, no-debug, no-object-storage, no-random-merge-tree-settings, no-random-settings SET max_rows_to_read = '101M'; From 8d99fb9e7c9667ee68aef6d0a7619e4f125a1052 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Thu, 14 Nov 2024 20:51:18 +0100 Subject: [PATCH 69/74] Revert "Revert "Remove ridiculous code bloat"" --- .../AggregateFunctionDeltaSumTimestamp.cpp | 68 ++++++++++++++---- src/AggregateFunctions/Helpers.h | 70 +------------------ 2 files changed, 57 insertions(+), 81 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp index 5819c533fd9..ad1fecac784 100644 --- a/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp +++ b/src/AggregateFunctions/AggregateFunctionDeltaSumTimestamp.cpp @@ -22,6 +22,13 @@ namespace ErrorCodes namespace { +/** Due to a lack of proper code review, this code was contributed with a multiplication of template instantiations + * over all pairs of data types, and we deeply regret that. + * + * We cannot remove all combinations, because the binary representation of serialized data has to remain the same, + * but we can partially heal the wound by treating unsigned and signed data types in the same way. + */ + template struct AggregationFunctionDeltaSumTimestampData { @@ -37,23 +44,22 @@ template class AggregationFunctionDeltaSumTimestamp final : public IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - > + AggregationFunctionDeltaSumTimestamp> { public: AggregationFunctionDeltaSumTimestamp(const DataTypes & arguments, const Array & params) : IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - >{arguments, params, createResultType()} - {} + AggregationFunctionDeltaSumTimestamp>{arguments, params, createResultType()} + { + } AggregationFunctionDeltaSumTimestamp() : IAggregateFunctionDataHelper< AggregationFunctionDeltaSumTimestampData, - AggregationFunctionDeltaSumTimestamp - >{} - {} + AggregationFunctionDeltaSumTimestamp>{} + { + } bool allocatesMemoryInArena() const override { return false; } @@ -63,8 +69,8 @@ public: void NO_SANITIZE_UNDEFINED ALWAYS_INLINE add(AggregateDataPtr __restrict place, const IColumn ** columns, size_t row_num, Arena *) const override { - auto value = assert_cast &>(*columns[0]).getData()[row_num]; - auto ts = assert_cast &>(*columns[1]).getData()[row_num]; + auto value = unalignedLoad(columns[0]->getRawData().data() + row_num * sizeof(ValueType)); + auto ts = unalignedLoad(columns[1]->getRawData().data() + row_num * sizeof(TimestampType)); auto & data = this->data(place); @@ -172,10 +178,48 @@ public: void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override { - assert_cast &>(to).getData().push_back(this->data(place).sum); + static_cast(to).template insertRawData( + reinterpret_cast(&this->data(place).sum)); } }; + +template class AggregateFunctionTemplate, typename... TArgs> +IAggregateFunction * createWithTwoTypesSecond(const IDataType & second_type, TArgs && ... args) +{ + WhichDataType which(second_type); + + if (which.idx == TypeIndex::UInt32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::UInt64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Int32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Int64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Float32) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Float64) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::Date) return new AggregateFunctionTemplate(args...); + if (which.idx == TypeIndex::DateTime) return new AggregateFunctionTemplate(args...); + + return nullptr; +} + +template