From a4d3732091b2b8d85c997cd030032978a845f4b4 Mon Sep 17 00:00:00 2001 From: turbo jason Date: Fri, 19 Mar 2021 20:51:29 +0800 Subject: [PATCH 1/5] [ClickHouse][Copier] Improve copier work --- programs/copier/ClusterCopier.cpp | 170 ++++++++++++++++++++---------- programs/copier/ClusterCopier.h | 9 +- programs/copier/TaskCluster.h | 1 + 3 files changed, 124 insertions(+), 56 deletions(-) diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index 7eea23160b2..5014f75ae48 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -600,10 +600,14 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t Settings settings_push = task_cluster->settings_push; - /// It is important, ALTER ATTACH PARTITION must be done synchronously - /// And we will execute this ALTER query on each replica of a shard. - /// It is correct, because this query is idempotent. - settings_push.replication_alter_partitions_sync = 2; + Settings settings_push = task_cluster->settings_push; + ClusterExecutionMode execution_mode = ClusterExecutionMode::ON_EACH_NODE; + UInt64 max_successful_executions_per_shard = 0; + if (settings_push.replication_alter_partitions_sync == 1) + { + execution_mode = ClusterExecutionMode::ON_EACH_SHARD; + max_successful_executions_per_shard = 1; + } query_alter_ast_string += " ALTER TABLE " + getQuotedTable(original_table) + ((partition_name == "'all'") ? " ATTACH PARTITION ID " : " ATTACH PARTITION ") + partition_name + @@ -613,14 +617,33 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t try { - size_t num_nodes = executeQueryOnCluster( - task_table.cluster_push, - query_alter_ast_string, - settings_push, - PoolMode::GET_MANY, - ClusterExecutionMode::ON_EACH_NODE); + /// Try attach partition on each shard + UInt64 num_nodes = executeQueryOnCluster( + task_table.cluster_push, + query_alter_ast_string, + task_cluster->settings_push, + PoolMode::GET_MANY, + execution_mode, + max_successful_executions_per_shard); - LOG_INFO(log, "Number of nodes that executed ALTER query successfully : {}", toString(num_nodes)); + if (settings_push.replication_alter_partitions_sync == 1) + { + LOG_INFO( + log, + "Destination tables {} have been executed alter query successfuly on {} shards of {}", + getQuotedTable(task_table.table_push), + num_nodes, + task_table.cluster_push->getShardCount()); + + if (num_nodes != task_table.cluster_push->getShardCount()) + { + return TaskStatus::Error; + } + } + else + { + LOG_INFO(log, "Number of nodes that executed ALTER query successfully : {}", toString(num_nodes)); + } } catch (...) { @@ -856,6 +879,16 @@ bool ClusterCopier::tryDropPartitionPiece( bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTable & task_table) { + //first create destionation table + TaskStatus task_status = TaskStatus::Error; + + task_status = tryCreateDestinationTable(timeouts, task_table); + /// Exit if success + if (task_status != TaskStatus::Finished) + { + LOG_WARNING(log, "Create destination Tale Failed "); + return false; + } /// An heuristic: if previous shard is already done, then check next one without sleeps due to max_workers constraint bool previous_shard_is_instantly_finished = false; @@ -932,7 +965,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab /// Do not sleep if there is a sequence of already processed shards to increase startup bool is_unprioritized_task = !previous_shard_is_instantly_finished && shard->priority.is_remote; - TaskStatus task_status = TaskStatus::Error; + task_status = TaskStatus::Error; bool was_error = false; has_shard_to_process = true; for (UInt64 try_num = 0; try_num < max_shard_partition_tries; ++try_num) @@ -1050,6 +1083,44 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab return table_is_done; } +TaskStatus ClusterCopier::tryCreateDestinationTable(const ConnectionTimeouts & timeouts, TaskTable & task_table) +{ + /// Try create original table (if not exists) on each shard + + //TaskTable & task_table = task_shard.task_table; + const TaskShardPtr task_shard = task_table.all_shards.at(0); + /// We need to update table definitions for each part, it could be changed after ALTER + task_shard->current_pull_table_create_query = getCreateTableForPullShard(timeouts, *task_shard); + try + { + auto create_query_push_ast + = rewriteCreateQueryStorage(task_shard->current_pull_table_create_query, task_table.table_push, task_table.engine_push_ast); + auto & create = create_query_push_ast->as(); + create.if_not_exists = true; + InterpreterCreateQuery::prepareOnClusterQuery(create, context, task_table.cluster_push_name); + String query = queryToString(create_query_push_ast); + + LOG_DEBUG(log, "Create destination tables. Query: {}", query); + UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, task_cluster->settings_push, PoolMode::GET_MANY); + LOG_INFO( + log, + "Destination tables {} have been created on {} shards of {}", + getQuotedTable(task_table.table_push), + shards, + task_table.cluster_push->getShardCount()); + if (shards != task_table.cluster_push->getShardCount()) + { + return TaskStatus::Error; + } + } + catch (...) + { + tryLogCurrentException(log, "Error while creating original table. Maybe we are not first."); + } + + return TaskStatus::Finished; +} + /// Job for copying partition from particular shard. TaskStatus ClusterCopier::tryProcessPartitionTask(const ConnectionTimeouts & timeouts, ShardPartition & task_partition, bool is_unprioritized_task) { @@ -1366,8 +1437,17 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( LOG_DEBUG(log, "Create destination tables. Query: {}", query); UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, task_cluster->settings_push, PoolMode::GET_MANY); - LOG_DEBUG(log, "Destination tables {} have been created on {} shards of {}", - getQuotedTable(task_table.table_push), shards, task_table.cluster_push->getShardCount()); + LOG_INFO( + log, + "Destination tables {} have been created on {} shards of {}", + getQuotedTable(task_table.table_push), + shards, + task_table.cluster_push->getShardCount()); + + if (shards != task_table.cluster_push->getShardCount()) + { + return TaskStatus::Error; + } } /// Do the copying @@ -1471,32 +1551,13 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( { tryLogCurrentException(log, "An error occurred during copying, partition will be marked as dirty"); create_is_dirty_node(clean_state_clock); + dropHelpingTablesByPieceNumber(task_table, current_piece_number); return TaskStatus::Error; } } LOG_INFO(log, "Partition {} piece {} copied. But not moved to original destination table.", task_partition.name, toString(current_piece_number)); - - /// Try create original table (if not exists) on each shard - try - { - auto create_query_push_ast = rewriteCreateQueryStorage(task_shard.current_pull_table_create_query, - task_table.table_push, task_table.engine_push_ast); - auto & create = create_query_push_ast->as(); - create.if_not_exists = true; - InterpreterCreateQuery::prepareOnClusterQuery(create, context, task_table.cluster_push_name); - String query = queryToString(create_query_push_ast); - - LOG_DEBUG(log, "Create destination tables. Query: {}", query); - UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, task_cluster->settings_push, PoolMode::GET_MANY); - LOG_DEBUG(log, "Destination tables {} have been created on {} shards of {}", getQuotedTable(task_table.table_push), shards, task_table.cluster_push->getShardCount()); - } - catch (...) - { - tryLogCurrentException(log, "Error while creating original table. Maybe we are not first."); - } - /// Finalize the processing, change state of current partition task (and also check is_dirty flag) { String state_finished = TaskStateWithOwner::getData(TaskState::Finished, host_id); @@ -1538,33 +1599,36 @@ void ClusterCopier::dropLocalTableIfExists(const DatabaseAndTableName & table_na interpreter.execute(); } +void ClusterCopier::dropHelpingTablesByPieceNumber(const TaskTable & task_table, size_t current_piece_number) +{ + LOG_DEBUG(log, "Removing helping tables piece {}", current_piece_number); + + DatabaseAndTableName original_table = task_table.table_push; + DatabaseAndTableName helping_table + = DatabaseAndTableName(original_table.first, original_table.second + "_piece_" + toString(current_piece_number)); + + String query = "DROP TABLE IF EXISTS " + getQuotedTable(helping_table); + + const ClusterPtr & cluster_push = task_table.cluster_push; + Settings settings_push = task_cluster->settings_push; + + LOG_DEBUG(log, "Execute distributed DROP TABLE: {}", query); + + /// We have to drop partition_piece on each replica + UInt64 num_nodes = executeQueryOnCluster(cluster_push, query, settings_push, PoolMode::GET_MANY, ClusterExecutionMode::ON_EACH_NODE); + + LOG_INFO(log, "DROP TABLE query was successfully executed on {} nodes.", toString(num_nodes)); +} void ClusterCopier::dropHelpingTables(const TaskTable & task_table) { LOG_DEBUG(log, "Removing helping tables"); for (size_t current_piece_number = 0; current_piece_number < task_table.number_of_splits; ++current_piece_number) { - DatabaseAndTableName original_table = task_table.table_push; - DatabaseAndTableName helping_table = DatabaseAndTableName(original_table.first, original_table.second + "_piece_" + toString(current_piece_number)); - - String query = "DROP TABLE IF EXISTS " + getQuotedTable(helping_table); - - const ClusterPtr & cluster_push = task_table.cluster_push; - Settings settings_push = task_cluster->settings_push; - - LOG_DEBUG(log, "Execute distributed DROP TABLE: {}", query); - /// We have to drop partition_piece on each replica - UInt64 num_nodes = executeQueryOnCluster( - cluster_push, query, - settings_push, - PoolMode::GET_MANY, - ClusterExecutionMode::ON_EACH_NODE); - - LOG_DEBUG(log, "DROP TABLE query was successfully executed on {} nodes.", toString(num_nodes)); + dropHelpingTablesByPieceNumber(task_table, current_piece_number); } } - void ClusterCopier::dropParticularPartitionPieceFromAllHelpingTables(const TaskTable & task_table, const String & partition_name) { LOG_DEBUG(log, "Try drop partition partition from all helping tables."); @@ -1586,7 +1650,7 @@ void ClusterCopier::dropParticularPartitionPieceFromAllHelpingTables(const TaskT PoolMode::GET_MANY, ClusterExecutionMode::ON_EACH_NODE); - LOG_DEBUG(log, "DROP PARTITION query was successfully executed on {} nodes.", toString(num_nodes)); + LOG_INFO(log, "DROP PARTITION query was successfully executed on {} nodes.", toString(num_nodes)); } LOG_DEBUG(log, "All helping tables dropped partition {}", partition_name); } diff --git a/programs/copier/ClusterCopier.h b/programs/copier/ClusterCopier.h index 9aff5493cf8..95bb54cf4e1 100644 --- a/programs/copier/ClusterCopier.h +++ b/programs/copier/ClusterCopier.h @@ -123,12 +123,13 @@ protected: bool tryDropPartitionPiece(ShardPartition & task_partition, const size_t current_piece_number, const zkutil::ZooKeeperPtr & zookeeper, const CleanStateClock & clean_state_clock); - static constexpr UInt64 max_table_tries = 1000; - static constexpr UInt64 max_shard_partition_tries = 600; - static constexpr UInt64 max_shard_partition_piece_tries_for_alter = 100; + static constexpr UInt64 max_table_tries = 3; + static constexpr UInt64 max_shard_partition_tries = 3; + static constexpr UInt64 max_shard_partition_piece_tries_for_alter = 3; bool tryProcessTable(const ConnectionTimeouts & timeouts, TaskTable & task_table); + TaskStatus tryCreateDestinationTable(const ConnectionTimeouts & timeouts, TaskTable & task_table); /// Job for copying partition from particular shard. TaskStatus tryProcessPartitionTask(const ConnectionTimeouts & timeouts, ShardPartition & task_partition, @@ -149,6 +150,8 @@ protected: void dropHelpingTables(const TaskTable & task_table); + void dropHelpingTablesByPieceNumber(const TaskTable & task_table, size_t current_piece_number); + /// Is used for usage less disk space. /// After all pieces were successfully moved to original destination /// table we can get rid of partition pieces (partitions in helping tables). diff --git a/programs/copier/TaskCluster.h b/programs/copier/TaskCluster.h index 5b28f461dd8..1a50597d07f 100644 --- a/programs/copier/TaskCluster.h +++ b/programs/copier/TaskCluster.h @@ -98,6 +98,7 @@ inline void DB::TaskCluster::reloadSettings(const Poco::Util::AbstractConfigurat set_default_value(settings_pull.max_block_size, 8192UL); set_default_value(settings_pull.preferred_block_size_bytes, 0); set_default_value(settings_push.insert_distributed_timeout, 0); + set_default_value(settings_push.replication_alter_partitions_sync, 2); } } From 1b1d425e426e0a88ff3911b9946e8725909ab595 Mon Sep 17 00:00:00 2001 From: turbo jason Date: Mon, 22 Mar 2021 10:27:32 +0800 Subject: [PATCH 2/5] remove duplicated code --- programs/copier/ClusterCopier.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index 5014f75ae48..a11c47e4e66 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -598,8 +598,6 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t original_table.second + "_piece_" + toString(current_piece_number)); - Settings settings_push = task_cluster->settings_push; - Settings settings_push = task_cluster->settings_push; ClusterExecutionMode execution_mode = ClusterExecutionMode::ON_EACH_NODE; UInt64 max_successful_executions_per_shard = 0; From 6aa89d0ee36e2720f95f60eaa24bde37cc812e8c Mon Sep 17 00:00:00 2001 From: turbo jason Date: Tue, 23 Mar 2021 11:00:06 +0800 Subject: [PATCH 3/5] remove duplicated code --- programs/copier/ClusterCopier.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index a11c47e4e66..2d7765f372e 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -1549,7 +1549,6 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( { tryLogCurrentException(log, "An error occurred during copying, partition will be marked as dirty"); create_is_dirty_node(clean_state_clock); - dropHelpingTablesByPieceNumber(task_table, current_piece_number); return TaskStatus::Error; } } From f4be05ca25ff51c9eb74bef53f7323e288d6b1b7 Mon Sep 17 00:00:00 2001 From: turbo jason Date: Tue, 23 Mar 2021 15:04:25 +0800 Subject: [PATCH 4/5] fix code style --- programs/copier/ClusterCopier.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index 2d7765f372e..83832c50147 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -877,7 +877,7 @@ bool ClusterCopier::tryDropPartitionPiece( bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTable & task_table) { - //first create destionation table + /// Create destination table TaskStatus task_status = TaskStatus::Error; task_status = tryCreateDestinationTable(timeouts, task_table); From 22c9cb1aa30c12fe4b36c603b5aad49bcbc02772 Mon Sep 17 00:00:00 2001 From: turbo jason Date: Tue, 23 Mar 2021 15:35:15 +0800 Subject: [PATCH 5/5] fix code style --- programs/copier/ClusterCopier.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index 83832c50147..cc524b8fc35 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -628,7 +628,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t { LOG_INFO( log, - "Destination tables {} have been executed alter query successfuly on {} shards of {}", + "Destination tables {} have been executed alter query successfully on {} shards of {}", getQuotedTable(task_table.table_push), num_nodes, task_table.cluster_push->getShardCount());