From 125e1bd1821af0562a65f5ea1a0955345165982e Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 8 Jun 2022 09:22:46 +0000 Subject: [PATCH 01/74] Initial version for linearizable reads --- src/Coordination/KeeperDispatcher.cpp | 96 ++++++++++++++++++++++++- src/Coordination/KeeperDispatcher.h | 5 ++ src/Coordination/KeeperServer.cpp | 29 +++++++- src/Coordination/KeeperServer.h | 29 +++++++- src/Coordination/KeeperStateMachine.cpp | 5 +- src/Coordination/KeeperStateMachine.h | 7 +- 6 files changed, 165 insertions(+), 6 deletions(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 9ad5fe9e8ed..299768e170d 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -244,6 +244,40 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ request_info.time = duration_cast(system_clock::now().time_since_epoch()).count(); request_info.session_id = session_id; + if (request->isReadRequest()) + { + auto leader_info_result = server->getLeaderInfo(); + + auto & leader_info_ctx = leader_info_result->get(); + + /// If we get some errors, than send them to clients + if (!leader_info_result->get_accepted() || leader_info_result->get_result_code() == nuraft::cmd_result_code::TIMEOUT) + { + addErrorResponses({ request_info }, Coordination::Error::ZOPERATIONTIMEOUT); + return false; + } + else if (leader_info_result->get_result_code() != nuraft::cmd_result_code::OK) + { + addErrorResponses({ request_info }, Coordination::Error::ZCONNECTIONLOSS); + return false; + } + + KeeperServer::LeaderInfo leader_info; + leader_info.term = leader_info_ctx->get_ulong(); + leader_info.last_committed_index = leader_info_ctx->get_ulong(); + LOG_INFO(log, "Leader is {}, idx {}", leader_info.term, leader_info.last_committed_index); + + auto current_status = server->getCurrentState(); + LOG_INFO(log, "Current is {}, idx {}", current_status.term, current_status.last_committed_index); + + if (current_status.term < leader_info.term || current_status.last_committed_index < leader_info.last_committed_index) + { + std::lock_guard lock(leader_waiter_mutex); + leader_waiters[leader_info].push_back(std::move(request_info)); + return true; + } + } + std::lock_guard lock(push_request_mutex); if (shutdown_called) @@ -273,7 +307,7 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf responses_thread = ThreadFromGlobalPool([this] { responseThread(); }); snapshot_thread = ThreadFromGlobalPool([this] { snapshotThread(); }); - server = std::make_unique(configuration_and_settings, config, responses_queue, snapshots_queue); + server = std::make_unique(configuration_and_settings, config, responses_queue, snapshots_queue, [this](KeeperStorage::RequestForSession & request_for_session, uint64_t log_idx) { onRequestCommit(request_for_session, log_idx); }); try { @@ -590,6 +624,66 @@ void KeeperDispatcher::updateConfigurationThread() } } +<<<<<<< HEAD +======= +void KeeperDispatcher::onRequestCommit(KeeperStorage::RequestForSession & request_for_session, uint64_t log_idx) +{ + const auto committed_zxid = request_for_session.zxid; + + const auto process_requests = [this](auto & request_queue) + { + for (auto & request_info : request_queue) + { + std::lock_guard lock(push_request_mutex); + + if (shutdown_called) + return; + + /// Put close requests without timeouts + if (request_info.request->getOpNum() == Coordination::OpNum::Close) + { + if (!requests_queue->push(std::move(request_info))) + throw Exception("Cannot push request to queue", ErrorCodes::SYSTEM_ERROR); + } + else if (!requests_queue->tryPush(std::move(request_info), configuration_and_settings->coordination_settings->operation_timeout_ms.totalMilliseconds())) + { + throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED); + } + } + + request_queue.clear(); + }; + { + std::lock_guard lock(session_waiter_mutex); + + for (auto it = session_waiters.begin(); it != session_waiters.end();) + { + auto & [session_id, session_waiter] = *it; + + if (session_waiter.wait_for_zxid <= committed_zxid) + { + process_requests(session_waiter.request_queue); + it = session_waiters.erase(it); + } + else + ++it; + } + } + + { + std::lock_guard lock(leader_waiter_mutex); + auto current_status = server->getCurrentState(); + LOG_INFO(log, "Got term {}, idx {}", current_status.term, log_idx); + auto request_queue_it = leader_waiters.find(KeeperServer::LeaderInfo{.term = current_status.term, .last_committed_index = log_idx}); + if (request_queue_it != leader_waiters.end()) + { + process_requests(request_queue_it->second); + leader_waiters.erase(request_queue_it); + } + } +} + +>>>>>>> Initial version for linearizable reads bool KeeperDispatcher::isServerActive() const { return checkInit() && hasLeader() && !server->isRecovering(); diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index b632327a165..9668fb34653 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -77,6 +77,9 @@ private: /// Counter for new session_id requests. std::atomic internal_session_id_counter{0}; + std::unordered_map> leader_waiters; + std::mutex leader_waiter_mutex; + /// Thread put requests to raft void requestThread(); /// Thread put responses for subscribed sessions @@ -116,6 +119,8 @@ public: return server && server->checkInit(); } + void onRequestCommit(KeeperStorage::RequestForSession & request_for_session, uint64_t log_idx); + /// Is server accepting requests, i.e. connected to the cluster /// and achieved quorum bool isServerActive() const; diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index d4c188fe8d9..6b48221a0db 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -102,7 +102,8 @@ KeeperServer::KeeperServer( const KeeperConfigurationAndSettingsPtr & configuration_and_settings_, const Poco::Util::AbstractConfiguration & config, ResponsesQueue & responses_queue_, - SnapshotsQueue & snapshots_queue_) + SnapshotsQueue & snapshots_queue_, + KeeperStateMachine::CommitCallback commit_callback) : server_id(configuration_and_settings_->server_id) , coordination_settings(configuration_and_settings_->coordination_settings) , state_machine(nuraft::cs_new( @@ -111,7 +112,8 @@ KeeperServer::KeeperServer( configuration_and_settings_->snapshot_storage_path, coordination_settings, checkAndGetSuperdigest(configuration_and_settings_->super_digest), - config.getBool("keeper_server.digest_enabled", true))) + config.getBool("keeper_server.digest_enabled", true), + std::move(commit_callback))) , state_manager(nuraft::cs_new( server_id, "keeper_server", configuration_and_settings_->log_storage_path, config, coordination_settings)) , log(&Poco::Logger::get("KeeperServer")) @@ -161,6 +163,19 @@ struct KeeperServer::KeeperRaftServer : public nuraft::raft_server reconfigure(new_config); } + RaftAppendResult getLeaderInfo() + { + nuraft::ptr req = nuraft::cs_new + ( 0ull, nuraft::msg_type::leader_status_request, 0, 0, + 0ull, 0ull, 0ull ) ; + auto result = send_msg_to_leader(req); + + if (!result->has_result()) + result->get(); + + return result; + } + using nuraft::raft_server::raft_server; // peers are initially marked as responding because at least one cycle @@ -637,6 +652,16 @@ std::vector KeeperServer::getDeadSessions() return state_machine->getDeadSessions(); } +RaftAppendResult KeeperServer::getLeaderInfo() +{ + return raft_instance->getLeaderInfo(); +} + +KeeperServer::LeaderInfo KeeperServer::getCurrentState() +{ + return { .term = raft_instance->get_term(), .last_committed_index = state_machine->last_commit_index() }; +} + ConfigUpdateActions KeeperServer::getConfigurationDiff(const Poco::Util::AbstractConfiguration & config) { auto diff = state_manager->getConfigurationDiff(config); diff --git a/src/Coordination/KeeperServer.h b/src/Coordination/KeeperServer.h index 8c21cf47d94..a18e1a20c5c 100644 --- a/src/Coordination/KeeperServer.h +++ b/src/Coordination/KeeperServer.h @@ -65,7 +65,8 @@ public: const KeeperConfigurationAndSettingsPtr & settings_, const Poco::Util::AbstractConfiguration & config_, ResponsesQueue & responses_queue_, - SnapshotsQueue & snapshots_queue_); + SnapshotsQueue & snapshots_queue_, + KeeperStateMachine::CommitCallback commit_callback); /// Load state machine from the latest snapshot and load log storage. Start NuRaft with required settings. void startup(const Poco::Util::AbstractConfiguration & config, bool enable_ipv6 = true); @@ -111,6 +112,17 @@ public: int getServerID() const { return server_id; } + struct LeaderInfo + { + uint64_t term; + uint64_t last_committed_index; + + bool operator==(const LeaderInfo &) const = default; + }; + + RaftAppendResult getLeaderInfo(); + LeaderInfo getCurrentState(); + /// Get configuration diff between current configuration in RAFT and in XML file ConfigUpdateActions getConfigurationDiff(const Poco::Util::AbstractConfiguration & config); @@ -125,3 +137,18 @@ public: }; } +namespace std { + + template <> + struct hash + { + size_t operator()(const DB::KeeperServer::LeaderInfo & info) const + { + SipHash hash_state; + hash_state.update(info.term); + hash_state.update(info.last_committed_index); + return hash_state.get64(); + } + }; + +} diff --git a/src/Coordination/KeeperStateMachine.cpp b/src/Coordination/KeeperStateMachine.cpp index 368b23f34d2..db0d0a09e3e 100644 --- a/src/Coordination/KeeperStateMachine.cpp +++ b/src/Coordination/KeeperStateMachine.cpp @@ -29,7 +29,8 @@ KeeperStateMachine::KeeperStateMachine( const std::string & snapshots_path_, const CoordinationSettingsPtr & coordination_settings_, const std::string & superdigest_, - const bool digest_enabled_) + const bool digest_enabled_, + CommitCallback commit_callback_) : coordination_settings(coordination_settings_) , snapshot_manager( snapshots_path_, @@ -44,6 +45,7 @@ KeeperStateMachine::KeeperStateMachine( , log(&Poco::Logger::get("KeeperStateMachine")) , superdigest(superdigest_) , digest_enabled(digest_enabled_) + , commit_callback(std::move(commit_callback_)) { } @@ -240,6 +242,7 @@ nuraft::ptr KeeperStateMachine::commit(const uint64_t log_idx, n } } + commit_callback(request_for_session, log_idx); last_committed_idx = log_idx; return nullptr; } diff --git a/src/Coordination/KeeperStateMachine.h b/src/Coordination/KeeperStateMachine.h index c80b35bb704..cfebc81cbf8 100644 --- a/src/Coordination/KeeperStateMachine.h +++ b/src/Coordination/KeeperStateMachine.h @@ -19,13 +19,16 @@ using SnapshotsQueue = ConcurrentBoundedQueue; class KeeperStateMachine : public nuraft::state_machine { public: + using CommitCallback = std::function; + KeeperStateMachine( ResponsesQueue & responses_queue_, SnapshotsQueue & snapshots_queue_, const std::string & snapshots_path_, const CoordinationSettingsPtr & coordination_settings_, const std::string & superdigest_ = "", - bool digest_enabled_ = true); + bool digest_enabled_ = true, + CommitCallback commit_callback_ = [](KeeperStorage::RequestForSession &, uint64_t){}); /// Read state from the latest snapshot void init(); @@ -141,6 +144,8 @@ private: const std::string superdigest; const bool digest_enabled; + + const CommitCallback commit_callback; }; } From fa8bfced1af63ca30963c7614ecb1aca944bfe2a Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 8 Jun 2022 09:23:58 +0000 Subject: [PATCH 02/74] Update NuRaft --- contrib/NuRaft | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/NuRaft b/contrib/NuRaft index 1334b9ae725..6cf55ad8c34 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit 1334b9ae72576821a698d657d08838861cf33007 +Subproject commit 6cf55ad8c3426cb8b65e4225baf8a182dd48fc7d From 9fe06019c0cb9184585307878187d3fec66366e1 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 17 Jun 2022 18:42:46 +0000 Subject: [PATCH 03/74] Some small fixes --- src/Coordination/KeeperDispatcher.cpp | 39 +++++++-------------------- src/Coordination/KeeperDispatcher.h | 2 +- src/Coordination/KeeperServer.cpp | 2 +- src/Coordination/KeeperServer.h | 11 ++++---- 4 files changed, 16 insertions(+), 38 deletions(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 299768e170d..e9d1c60de1d 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -250,7 +250,7 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ auto & leader_info_ctx = leader_info_result->get(); - /// If we get some errors, than send them to clients + /// If we get some errors, then send them to clients if (!leader_info_result->get_accepted() || leader_info_result->get_result_code() == nuraft::cmd_result_code::TIMEOUT) { addErrorResponses({ request_info }, Coordination::Error::ZOPERATIONTIMEOUT); @@ -262,18 +262,18 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ return false; } - KeeperServer::LeaderInfo leader_info; + KeeperServer::NodeInfo leader_info; leader_info.term = leader_info_ctx->get_ulong(); leader_info.last_committed_index = leader_info_ctx->get_ulong(); LOG_INFO(log, "Leader is {}, idx {}", leader_info.term, leader_info.last_committed_index); - auto current_status = server->getCurrentState(); - LOG_INFO(log, "Current is {}, idx {}", current_status.term, current_status.last_committed_index); + auto node_info = server->getNodeInfo(); + LOG_INFO(log, "Current is {}, idx {}", node_info.term, node_info.last_committed_index); - if (current_status.term < leader_info.term || current_status.last_committed_index < leader_info.last_committed_index) + if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) { std::lock_guard lock(leader_waiter_mutex); - leader_waiters[leader_info].push_back(std::move(request_info)); + leader_waiters[node_info].push_back(std::move(request_info)); return true; } } @@ -624,12 +624,8 @@ void KeeperDispatcher::updateConfigurationThread() } } -<<<<<<< HEAD -======= -void KeeperDispatcher::onRequestCommit(KeeperStorage::RequestForSession & request_for_session, uint64_t log_idx) +void KeeperDispatcher::onRequestCommit(KeeperStorage::RequestForSession & /* request_for_session */, uint64_t log_idx) { - const auto committed_zxid = request_for_session.zxid; - const auto process_requests = [this](auto & request_queue) { for (auto & request_info : request_queue) @@ -653,28 +649,12 @@ void KeeperDispatcher::onRequestCommit(KeeperStorage::RequestForSession & reques request_queue.clear(); }; - { - std::lock_guard lock(session_waiter_mutex); - - for (auto it = session_waiters.begin(); it != session_waiters.end();) - { - auto & [session_id, session_waiter] = *it; - - if (session_waiter.wait_for_zxid <= committed_zxid) - { - process_requests(session_waiter.request_queue); - it = session_waiters.erase(it); - } - else - ++it; - } - } { std::lock_guard lock(leader_waiter_mutex); - auto current_status = server->getCurrentState(); + auto current_status = server->getNodeInfo(); LOG_INFO(log, "Got term {}, idx {}", current_status.term, log_idx); - auto request_queue_it = leader_waiters.find(KeeperServer::LeaderInfo{.term = current_status.term, .last_committed_index = log_idx}); + auto request_queue_it = leader_waiters.find(KeeperServer::NodeInfo{.term = current_status.term, .last_committed_index = log_idx}); if (request_queue_it != leader_waiters.end()) { process_requests(request_queue_it->second); @@ -683,7 +663,6 @@ void KeeperDispatcher::onRequestCommit(KeeperStorage::RequestForSession & reques } } ->>>>>>> Initial version for linearizable reads bool KeeperDispatcher::isServerActive() const { return checkInit() && hasLeader() && !server->isRecovering(); diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index 9668fb34653..1140457d56b 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -77,7 +77,7 @@ private: /// Counter for new session_id requests. std::atomic internal_session_id_counter{0}; - std::unordered_map> leader_waiters; + std::unordered_map> leader_waiters; std::mutex leader_waiter_mutex; /// Thread put requests to raft diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index 6b48221a0db..b1b8a0976d2 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -657,7 +657,7 @@ RaftAppendResult KeeperServer::getLeaderInfo() return raft_instance->getLeaderInfo(); } -KeeperServer::LeaderInfo KeeperServer::getCurrentState() +KeeperServer::NodeInfo KeeperServer::getNodeInfo() { return { .term = raft_instance->get_term(), .last_committed_index = state_machine->last_commit_index() }; } diff --git a/src/Coordination/KeeperServer.h b/src/Coordination/KeeperServer.h index a18e1a20c5c..472cb67361c 100644 --- a/src/Coordination/KeeperServer.h +++ b/src/Coordination/KeeperServer.h @@ -112,16 +112,16 @@ public: int getServerID() const { return server_id; } - struct LeaderInfo + struct NodeInfo { uint64_t term; uint64_t last_committed_index; - bool operator==(const LeaderInfo &) const = default; + bool operator==(const NodeInfo &) const = default; }; RaftAppendResult getLeaderInfo(); - LeaderInfo getCurrentState(); + NodeInfo getNodeInfo(); /// Get configuration diff between current configuration in RAFT and in XML file ConfigUpdateActions getConfigurationDiff(const Poco::Util::AbstractConfiguration & config); @@ -130,7 +130,6 @@ public: /// Synchronously check for update results with retries. void applyConfigurationUpdate(const ConfigUpdateAction & task); - /// Wait configuration update for action. Used by followers. /// Return true if update was successfully received. bool waitConfigurationUpdate(const ConfigUpdateAction & task); @@ -140,9 +139,9 @@ public: namespace std { template <> - struct hash + struct hash { - size_t operator()(const DB::KeeperServer::LeaderInfo & info) const + size_t operator()(const DB::KeeperServer::NodeInfo & info) const { SipHash hash_state; hash_state.update(info.term); From d2f3d581eb008ee08191b76a7d45e83324695ce2 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Sat, 18 Jun 2022 18:43:02 +0000 Subject: [PATCH 04/74] Use fast lin reads in jepsen --- src/Coordination/CoordinationSettings.h | 1 + src/Coordination/KeeperDispatcher.cpp | 11 ++++------- src/Coordination/KeeperDispatcher.h | 2 +- src/Coordination/KeeperServer.cpp | 1 + src/Coordination/KeeperStateMachine.cpp | 10 +++++----- src/Coordination/KeeperStateMachine.h | 6 +++--- .../resources/keeper_config.xml | 3 ++- .../src/jepsen/clickhouse_keeper/register.clj | 3 ++- 8 files changed, 19 insertions(+), 18 deletions(-) diff --git a/src/Coordination/CoordinationSettings.h b/src/Coordination/CoordinationSettings.h index b702b553a03..c1e41f46670 100644 --- a/src/Coordination/CoordinationSettings.h +++ b/src/Coordination/CoordinationSettings.h @@ -26,6 +26,7 @@ struct Settings; M(Milliseconds, heart_beat_interval_ms, 500, "Heartbeat interval between quorum nodes", 0) \ M(Milliseconds, election_timeout_lower_bound_ms, 1000, "Lower bound of election timer (avoid too often leader elections)", 0) \ M(Milliseconds, election_timeout_upper_bound_ms, 2000, "Upper bound of election timer (avoid too often leader elections)", 0) \ + M(Milliseconds, leadership_expiry, 10000, "How often will leader node check if it still has majority. Set it lower or equal to election_timeout_lower_bound_ms to have linearizable reads.", 0) \ M(UInt64, reserved_log_items, 100000, "How many log items to store (don't remove during compaction)", 0) \ M(UInt64, snapshot_distance, 100000, "How many log items we have to collect to write new snapshot", 0) \ M(Bool, auto_forwarding, true, "Allow to forward write requests from followers to leader", 0) \ diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index e9d1c60de1d..a7b40e9f128 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -265,15 +265,14 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ KeeperServer::NodeInfo leader_info; leader_info.term = leader_info_ctx->get_ulong(); leader_info.last_committed_index = leader_info_ctx->get_ulong(); - LOG_INFO(log, "Leader is {}, idx {}", leader_info.term, leader_info.last_committed_index); auto node_info = server->getNodeInfo(); - LOG_INFO(log, "Current is {}, idx {}", node_info.term, node_info.last_committed_index); if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) { std::lock_guard lock(leader_waiter_mutex); leader_waiters[node_info].push_back(std::move(request_info)); + LOG_INFO(log, "waiting for {}, idx {}", leader_info.term, leader_info.last_committed_index); return true; } } @@ -307,7 +306,7 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf responses_thread = ThreadFromGlobalPool([this] { responseThread(); }); snapshot_thread = ThreadFromGlobalPool([this] { snapshotThread(); }); - server = std::make_unique(configuration_and_settings, config, responses_queue, snapshots_queue, [this](KeeperStorage::RequestForSession & request_for_session, uint64_t log_idx) { onRequestCommit(request_for_session, log_idx); }); + server = std::make_unique(configuration_and_settings, config, responses_queue, snapshots_queue, [this](uint64_t log_term, uint64_t log_idx) { onRequestCommit(log_term, log_idx); }); try { @@ -624,7 +623,7 @@ void KeeperDispatcher::updateConfigurationThread() } } -void KeeperDispatcher::onRequestCommit(KeeperStorage::RequestForSession & /* request_for_session */, uint64_t log_idx) +void KeeperDispatcher::onRequestCommit(uint64_t log_term, uint64_t log_idx) { const auto process_requests = [this](auto & request_queue) { @@ -652,9 +651,7 @@ void KeeperDispatcher::onRequestCommit(KeeperStorage::RequestForSession & /* req { std::lock_guard lock(leader_waiter_mutex); - auto current_status = server->getNodeInfo(); - LOG_INFO(log, "Got term {}, idx {}", current_status.term, log_idx); - auto request_queue_it = leader_waiters.find(KeeperServer::NodeInfo{.term = current_status.term, .last_committed_index = log_idx}); + auto request_queue_it = leader_waiters.find(KeeperServer::NodeInfo{.term = log_term, .last_committed_index = log_idx}); if (request_queue_it != leader_waiters.end()) { process_requests(request_queue_it->second); diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index 1140457d56b..1f5de4007f2 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -119,7 +119,7 @@ public: return server && server->checkInit(); } - void onRequestCommit(KeeperStorage::RequestForSession & request_for_session, uint64_t log_idx); + void onRequestCommit(uint64_t log_term, uint64_t log_idx); /// Is server accepting requests, i.e. connected to the cluster /// and achieved quorum diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index b1b8a0976d2..0a7b1948495 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -258,6 +258,7 @@ void KeeperServer::launchRaftServer(bool enable_ipv6) coordination_settings->election_timeout_lower_bound_ms.totalMilliseconds(), "election_timeout_lower_bound_ms", log); params.election_timeout_upper_bound_ = getValueOrMaxInt32AndLogWarning( coordination_settings->election_timeout_upper_bound_ms.totalMilliseconds(), "election_timeout_upper_bound_ms", log); + params.leadership_expiry_ = getValueOrMaxInt32AndLogWarning(coordination_settings->leadership_expiry.totalMilliseconds(), "leadership_expiry", log); params.reserved_log_items_ = getValueOrMaxInt32AndLogWarning(coordination_settings->reserved_log_items, "reserved_log_items", log); params.snapshot_distance_ = getValueOrMaxInt32AndLogWarning(coordination_settings->snapshot_distance, "snapshot_distance", log); diff --git a/src/Coordination/KeeperStateMachine.cpp b/src/Coordination/KeeperStateMachine.cpp index db0d0a09e3e..8080f373497 100644 --- a/src/Coordination/KeeperStateMachine.cpp +++ b/src/Coordination/KeeperStateMachine.cpp @@ -197,11 +197,11 @@ void KeeperStateMachine::preprocess(const KeeperStorage::RequestForSession & req assertDigest(*request_for_session.digest, storage->getNodesDigest(false), *request_for_session.request, false); } -nuraft::ptr KeeperStateMachine::commit(const uint64_t log_idx, nuraft::buffer & data) +nuraft::ptr KeeperStateMachine::commit_ext(const ext_op_params& params) { - auto request_for_session = parseRequest(data); + auto request_for_session = parseRequest(*params.data); if (!request_for_session.zxid) - request_for_session.zxid = log_idx; + request_for_session.zxid = params.log_idx; /// Special processing of session_id request if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID) @@ -242,8 +242,8 @@ nuraft::ptr KeeperStateMachine::commit(const uint64_t log_idx, n } } - commit_callback(request_for_session, log_idx); - last_committed_idx = log_idx; + commit_callback(params.log_term, params.log_idx); + last_committed_idx = params.log_idx; return nullptr; } diff --git a/src/Coordination/KeeperStateMachine.h b/src/Coordination/KeeperStateMachine.h index cfebc81cbf8..f5a38228c86 100644 --- a/src/Coordination/KeeperStateMachine.h +++ b/src/Coordination/KeeperStateMachine.h @@ -19,7 +19,7 @@ using SnapshotsQueue = ConcurrentBoundedQueue; class KeeperStateMachine : public nuraft::state_machine { public: - using CommitCallback = std::function; + using CommitCallback = std::function; KeeperStateMachine( ResponsesQueue & responses_queue_, @@ -28,7 +28,7 @@ public: const CoordinationSettingsPtr & coordination_settings_, const std::string & superdigest_ = "", bool digest_enabled_ = true, - CommitCallback commit_callback_ = [](KeeperStorage::RequestForSession &, uint64_t){}); + CommitCallback commit_callback_ = [](uint64_t, uint64_t){}); /// Read state from the latest snapshot void init(); @@ -39,7 +39,7 @@ public: nuraft::ptr pre_commit(uint64_t log_idx, nuraft::buffer & data) override; - nuraft::ptr commit(const uint64_t log_idx, nuraft::buffer & data) override; /// NOLINT + nuraft::ptr commit_ext(const ext_op_params& params) override; /// NOLINT /// Save new cluster config to our snapshot (copy of the config stored in StateManager) void commit_config(const uint64_t log_idx, nuraft::ptr & new_conf) override; /// NOLINT diff --git a/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml b/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml index 2ab747fbd71..41bdcdbd2b4 100644 --- a/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml +++ b/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml @@ -1,5 +1,5 @@ - :: + 0.0.0.0 trace @@ -19,6 +19,7 @@ 120000 trace 1000 + 1900 2000 4000 {quorum_reads} diff --git a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/register.clj b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/register.clj index b2f381168bd..fb2375e6c54 100644 --- a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/register.clj +++ b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/register.clj @@ -20,7 +20,8 @@ (assoc this :conn (zk-connect node 9181 30000))) (setup! [this test] - (zk-create-range conn 300)) ; 300 nodes to be sure + (exec-with-retries 30 (fn [] + (zk-create-range conn 300)))) (invoke! [_ test op] (let [[k v] (:value op) From d56894e04e69f8f0a36297491e6f8c8a6588ae57 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Sat, 18 Jun 2022 19:16:17 +0000 Subject: [PATCH 05/74] Don't rollback session_id --- src/Coordination/KeeperStateMachine.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/Coordination/KeeperStateMachine.cpp b/src/Coordination/KeeperStateMachine.cpp index 8080f373497..c2bd7772b1b 100644 --- a/src/Coordination/KeeperStateMachine.cpp +++ b/src/Coordination/KeeperStateMachine.cpp @@ -286,6 +286,10 @@ void KeeperStateMachine::commit_config(const uint64_t /* log_idx */, nuraft::ptr void KeeperStateMachine::rollback(uint64_t log_idx, nuraft::buffer & data) { auto request_for_session = parseRequest(data); + + if (request_for_session.request->getOpNum() == Coordination::OpNum::SessionID) + return; + // If we received a log from an older node, use the log_idx as the zxid // log_idx will always be larger or equal to the zxid so we can safely do this // (log_idx is increased for all logs, while zxid is only increased for requests) From e484dff9257b46494abf5a793151fd110f5b0641 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Sat, 18 Jun 2022 19:19:26 +0000 Subject: [PATCH 06/74] Disable quorum reads --- tests/jepsen.clickhouse-keeper/resources/keeper_config.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml b/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml index 41bdcdbd2b4..db857693a7b 100644 --- a/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml +++ b/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml @@ -22,7 +22,7 @@ 1900 2000 4000 - {quorum_reads} + 0 {snapshot_distance} {stale_log_gap} {reserved_log_items} From fdef4cc44dde8af61c4ade92fceb79efd62ff9cf Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Sat, 18 Jun 2022 19:25:43 +0000 Subject: [PATCH 07/74] Update NuRaft --- contrib/NuRaft | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/NuRaft b/contrib/NuRaft index 6cf55ad8c34..31d44b03092 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit 6cf55ad8c3426cb8b65e4225baf8a182dd48fc7d +Subproject commit 31d44b030928258d14e7d6bdfb8682d11f2bd7bf From ecd545fe4f9b1742f59dac1512f85f6029e79927 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 24 Jun 2022 11:36:12 +0000 Subject: [PATCH 08/74] Batch read requests --- src/Coordination/KeeperDispatcher.cpp | 164 ++++++++++++-------------- 1 file changed, 75 insertions(+), 89 deletions(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index a7b40e9f128..b07997882e5 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -39,6 +39,13 @@ void KeeperDispatcher::requestThread() /// to send errors to the client. KeeperStorage::RequestsForSessions prev_batch; + std::optional previous_request; + bool collecting_quorum_requests = false; + const auto needs_quorum = [&](const auto & coordination_settings, const auto & request) + { + return coordination_settings->quorum_reads || !request.request->isReadRequest(); + }; + while (!shutdown_called) { KeeperStorage::RequestForSession request; @@ -55,18 +62,20 @@ void KeeperDispatcher::requestThread() /// read request. So reads are some kind of "separator" for writes. try { + KeeperStorage::RequestsForSessions current_batch; + + if (previous_request) + { + current_batch.push_back(std::move(*previous_request)); + previous_request.reset(); + } + if (requests_queue->tryPop(request, max_wait)) { if (shutdown_called) break; - KeeperStorage::RequestsForSessions current_batch; - - bool has_read_request = false; - - /// If new request is not read request or we must to process it through quorum. - /// Otherwise we will process it locally. - if (coordination_settings->quorum_reads || !request.request->isReadRequest()) + if (collecting_quorum_requests == needs_quorum(coordination_settings, request)) { current_batch.emplace_back(request); @@ -79,16 +88,14 @@ void KeeperDispatcher::requestThread() if (requests_queue->tryPop(request, 1)) { /// Don't append read request into batch, we have to process them separately - if (!coordination_settings->quorum_reads && request.request->isReadRequest()) + if (collecting_quorum_requests != needs_quorum(coordination_settings, request)) { - has_read_request = true; + previous_request.emplace(std::move(request)); + collecting_quorum_requests = !collecting_quorum_requests; break; } - else - { - current_batch.emplace_back(request); - } + current_batch.emplace_back(request); } if (shutdown_called) @@ -96,7 +103,10 @@ void KeeperDispatcher::requestThread() } } else - has_read_request = true; + { + previous_request.emplace(std::move(request)); + collecting_quorum_requests = !collecting_quorum_requests; + } if (shutdown_called) break; @@ -105,34 +115,59 @@ void KeeperDispatcher::requestThread() if (prev_result) forceWaitAndProcessResult(prev_result, prev_batch); - /// Process collected write requests batch + /// Process collected requests batch if (!current_batch.empty()) { - auto result = server->putRequestBatch(current_batch); - - if (result) + if (collecting_quorum_requests) { - if (has_read_request) /// If we will execute read request next, than we have to process result now - forceWaitAndProcessResult(result, current_batch); + auto result = server->putRequestBatch(current_batch); + prev_batch = std::move(current_batch); + prev_result = result; } else { - addErrorResponses(current_batch, Coordination::Error::ZCONNECTIONLOSS); - current_batch.clear(); + auto leader_info_result = server->getLeaderInfo(); + + auto & leader_info_ctx = leader_info_result->get(); + + /// If we get some errors, then send them to clients + if (!leader_info_result->get_accepted() || leader_info_result->get_result_code() == nuraft::cmd_result_code::TIMEOUT) + { + addErrorResponses(current_batch, Coordination::Error::ZOPERATIONTIMEOUT); + } + else if (leader_info_result->get_result_code() != nuraft::cmd_result_code::OK) + { + addErrorResponses(current_batch, Coordination::Error::ZCONNECTIONLOSS); + } + else + { + KeeperServer::NodeInfo leader_info; + leader_info.term = leader_info_ctx->get_ulong(); + leader_info.last_committed_index = leader_info_ctx->get_ulong(); + + std::lock_guard lock(leader_waiter_mutex); + auto node_info = server->getNodeInfo(); + + if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) + { + auto & leader_waiter = leader_waiters[node_info]; + leader_waiter.insert(leader_waiter.end(), current_batch.begin(), current_batch.end()); + LOG_INFO(log, "waiting for {}, idx {}", leader_info.term, leader_info.last_committed_index); + } + else + { + for (const auto & read_request : current_batch) + { + if (server->isLeaderAlive()) + server->putLocalReadRequest(read_request); + else + addErrorResponses({read_request}, Coordination::Error::ZCONNECTIONLOSS); + } + } + } } - - prev_batch = std::move(current_batch); - prev_result = result; - } - - /// Read request always goes after write batch (last request) - if (has_read_request) - { - if (server->isLeaderAlive()) - server->putLocalReadRequest(request); - else - addErrorResponses({request}, Coordination::Error::ZCONNECTIONLOSS); } + current_batch.clear(); } } catch (...) @@ -244,39 +279,6 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ request_info.time = duration_cast(system_clock::now().time_since_epoch()).count(); request_info.session_id = session_id; - if (request->isReadRequest()) - { - auto leader_info_result = server->getLeaderInfo(); - - auto & leader_info_ctx = leader_info_result->get(); - - /// If we get some errors, then send them to clients - if (!leader_info_result->get_accepted() || leader_info_result->get_result_code() == nuraft::cmd_result_code::TIMEOUT) - { - addErrorResponses({ request_info }, Coordination::Error::ZOPERATIONTIMEOUT); - return false; - } - else if (leader_info_result->get_result_code() != nuraft::cmd_result_code::OK) - { - addErrorResponses({ request_info }, Coordination::Error::ZCONNECTIONLOSS); - return false; - } - - KeeperServer::NodeInfo leader_info; - leader_info.term = leader_info_ctx->get_ulong(); - leader_info.last_committed_index = leader_info_ctx->get_ulong(); - - auto node_info = server->getNodeInfo(); - - if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) - { - std::lock_guard lock(leader_waiter_mutex); - leader_waiters[node_info].push_back(std::move(request_info)); - LOG_INFO(log, "waiting for {}, idx {}", leader_info.term, leader_info.last_committed_index); - return true; - } - } - std::lock_guard lock(push_request_mutex); if (shutdown_called) @@ -629,34 +631,18 @@ void KeeperDispatcher::onRequestCommit(uint64_t log_term, uint64_t log_idx) { for (auto & request_info : request_queue) { - std::lock_guard lock(push_request_mutex); - - if (shutdown_called) - return; - - /// Put close requests without timeouts - if (request_info.request->getOpNum() == Coordination::OpNum::Close) - { - if (!requests_queue->push(std::move(request_info))) - throw Exception("Cannot push request to queue", ErrorCodes::SYSTEM_ERROR); - } - else if (!requests_queue->tryPush(std::move(request_info), configuration_and_settings->coordination_settings->operation_timeout_ms.totalMilliseconds())) - { - throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED); - } + server->putLocalReadRequest(request_info); } request_queue.clear(); }; + std::lock_guard lock(leader_waiter_mutex); + auto request_queue_it = leader_waiters.find(KeeperServer::NodeInfo{.term = log_term, .last_committed_index = log_idx}); + if (request_queue_it != leader_waiters.end()) { - std::lock_guard lock(leader_waiter_mutex); - auto request_queue_it = leader_waiters.find(KeeperServer::NodeInfo{.term = log_term, .last_committed_index = log_idx}); - if (request_queue_it != leader_waiters.end()) - { - process_requests(request_queue_it->second); - leader_waiters.erase(request_queue_it); - } + process_requests(request_queue_it->second); + leader_waiters.erase(request_queue_it); } } From 197ae25aa3ec53ff2affc4e61a42cff438e3f9e3 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 24 Jun 2022 11:38:09 +0000 Subject: [PATCH 09/74] Allow only jepsen test --- .github/workflows/pull_request.yml | 3348 +--------------------------- 1 file changed, 1 insertion(+), 3347 deletions(-) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 79d54d77f06..f021554dd7b 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -109,196 +109,11 @@ jobs: with: name: changed_images path: ${{ runner.temp }}/changed_images.json - StyleCheck: - needs: DockerHubPush - runs-on: [self-hosted, style-checker] - if: ${{ success() || failure() }} - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{ runner.temp }}/style_check - EOF - - name: Download changed images - # even if artifact does not exist, e.g. on `do not test` label or failed Docker job - continue-on-error: true - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.TEMP_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Style Check - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 style_check.py - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FastTest: - needs: DockerHubPush - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/fasttest - REPO_COPY=${{runner.temp}}/fasttest/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - EOF - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" - mkdir "$GITHUB_WORKSPACE" - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.TEMP_PATH }} - - name: Fast Test - run: | - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 fast_test_check.py - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" - CompatibilityCheck: - needs: [BuilderDebRelease] - runs-on: [self-hosted, style-checker] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/compatibility_check - REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse - REPORTS_PATH=${{runner.temp}}/reports_dir - EOF - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: CompatibilityCheck - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - SplitBuildSmokeTest: - needs: [BuilderDebSplitted] - runs-on: [self-hosted, style-checker] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/split_build_check - REPO_COPY=${{runner.temp}}/split_build_check/ClickHouse - REPORTS_PATH=${{runner.temp}}/reports_dir - EOF - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Split build check - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 split_build_smoke_check.py - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" ######################################################################################### #################################### ORDINARY BUILDS #################################### ######################################################################################### - BuilderDebRelease: - needs: [DockerHubPush, FastTest] - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=package_release - EOF - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - with: - fetch-depth: 0 # for performance artifact - - name: Build - run: | - git -C "$GITHUB_WORKSPACE" submodule sync --recursive - git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" BuilderBinRelease: - needs: [DockerHubPush, FastTest] + needs: [DockerHubPush] runs-on: [self-hosted, builder] steps: - name: Set envs @@ -342,3170 +157,9 @@ jobs: # shellcheck disable=SC2046 docker rm -f $(docker ps -a -q) ||: sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" - # BuilderBinGCC: - # needs: [DockerHubPush, FastTest] - # runs-on: [self-hosted, builder] - # steps: - # - name: Set envs - # run: | - # cat >> "$GITHUB_ENV" << 'EOF' - # TEMP_PATH=${{runner.temp}}/build_check - # IMAGES_PATH=${{runner.temp}}/images_path - # REPO_COPY=${{runner.temp}}/build_check/ClickHouse - # CACHES_PATH=${{runner.temp}}/../ccaches - # BUILD_NAME=binary_gcc - # EOF - # - name: Download changed images - # uses: actions/download-artifact@v2 - # with: - # name: changed_images - # path: ${{ runner.temp }}/images_path - # - name: Clear repository - # run: | - # sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - # - name: Check out repository code - # uses: actions/checkout@v2 - # - name: Build - # run: | - # git -C "$GITHUB_WORKSPACE" submodule sync --recursive - # git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - # sudo rm -fr "$TEMP_PATH" - # mkdir -p "$TEMP_PATH" - # cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - # cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - # - name: Upload build URLs to artifacts - # if: ${{ success() || failure() }} - # uses: actions/upload-artifact@v2 - # with: - # name: ${{ env.BUILD_URLS }} - # path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - # - name: Cleanup - # if: always() - # run: | - # # shellcheck disable=SC2046 - # docker kill $(docker ps -q) ||: - # # shellcheck disable=SC2046 - # docker rm -f $(docker ps -a -q) ||: - # sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" - BuilderDebAarch64: - needs: [DockerHubPush, FastTest] - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=package_aarch64 - EOF - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ runner.temp }}/images_path - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - with: - fetch-depth: 0 # for performance artifact - - name: Build - run: | - git -C "$GITHUB_WORKSPACE" submodule sync --recursive - git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" - BuilderDebAsan: - needs: [DockerHubPush, FastTest] - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=package_asan - EOF - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Build - run: | - git -C "$GITHUB_WORKSPACE" submodule sync --recursive - git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" - BuilderDebUBsan: - needs: [DockerHubPush, FastTest] - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=package_ubsan - EOF - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Build - run: | - git -C "$GITHUB_WORKSPACE" submodule sync --recursive - git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" - BuilderDebTsan: - needs: [DockerHubPush, FastTest] - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=package_tsan - EOF - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Build - run: | - git -C "$GITHUB_WORKSPACE" submodule sync --recursive - git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" - BuilderDebMsan: - needs: [DockerHubPush, FastTest] - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=package_msan - EOF - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Build - run: | - git -C "$GITHUB_WORKSPACE" submodule sync --recursive - git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" - BuilderDebDebug: - needs: [DockerHubPush, FastTest] - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=package_debug - EOF - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Build - run: | - git -C "$GITHUB_WORKSPACE" submodule sync --recursive - git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" -########################################################################################## -##################################### SPECIAL BUILDS ##################################### -########################################################################################## - BuilderDebSplitted: - needs: [DockerHubPush, FastTest] - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=binary_splitted - EOF - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Build - run: | - git -C "$GITHUB_WORKSPACE" submodule sync --recursive - git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" - BuilderBinTidy: - needs: [DockerHubPush, FastTest] - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=binary_tidy - EOF - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Build - run: | - git -C "$GITHUB_WORKSPACE" submodule sync --recursive - git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" - BuilderBinDarwin: - needs: [DockerHubPush, FastTest] - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=binary_darwin - EOF - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Build - run: | - git -C "$GITHUB_WORKSPACE" submodule sync --recursive - git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" - BuilderBinAarch64: - needs: [DockerHubPush, FastTest] - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=binary_aarch64 - EOF - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Build - run: | - git -C "$GITHUB_WORKSPACE" submodule sync --recursive - git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" - BuilderBinFreeBSD: - needs: [DockerHubPush, FastTest] - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=binary_freebsd - EOF - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Build - run: | - git -C "$GITHUB_WORKSPACE" submodule sync --recursive - git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" - BuilderBinDarwinAarch64: - needs: [DockerHubPush, FastTest] - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=binary_darwin_aarch64 - EOF - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Build - run: | - git -C "$GITHUB_WORKSPACE" submodule sync --recursive - git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" - BuilderBinPPC64: - needs: [DockerHubPush, FastTest] - runs-on: [self-hosted, builder] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/build_check - IMAGES_PATH=${{runner.temp}}/images_path - REPO_COPY=${{runner.temp}}/build_check/ClickHouse - CACHES_PATH=${{runner.temp}}/../ccaches - BUILD_NAME=binary_ppc64le - EOF - - name: Download changed images - uses: actions/download-artifact@v2 - with: - name: changed_images - path: ${{ env.IMAGES_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Build - run: | - git -C "$GITHUB_WORKSPACE" submodule sync --recursive - git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" - - name: Upload build URLs to artifacts - if: ${{ success() || failure() }} - uses: actions/upload-artifact@v2 - with: - name: ${{ env.BUILD_URLS }} - path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" -############################################################################################ -##################################### Docker images ####################################### -############################################################################################ - DockerServerImages: - needs: - - BuilderDebRelease - - BuilderDebAarch64 - runs-on: [self-hosted, style-checker] - steps: - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - with: - fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself - - name: Check docker clickhouse/clickhouse-server building - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_server.py --release-type head --no-push - python3 docker_server.py --release-type head --no-push --no-ubuntu \ - --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" -############################################################################################ -##################################### BUILD REPORTER ####################################### -############################################################################################ - BuilderReport: - needs: - - BuilderBinRelease - - BuilderDebAarch64 - - BuilderDebAsan - - BuilderDebDebug - - BuilderDebMsan - - BuilderDebRelease - - BuilderDebTsan - - BuilderDebUBsan - runs-on: [self-hosted, style-checker] - if: ${{ success() || failure() }} - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - CHECK_NAME=ClickHouse build check (actions) - REPORTS_PATH=${{runner.temp}}/reports_dir - TEMP_PATH=${{runner.temp}}/report_check - NEEDS_DATA_PATH=${{runner.temp}}/needs.json - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Report Builder - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cat > "$NEEDS_DATA_PATH" << 'EOF' - ${{ toJSON(needs) }} - EOF - cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - BuilderSpecialReport: - needs: - - BuilderBinAarch64 - - BuilderBinDarwin - - BuilderBinDarwinAarch64 - - BuilderBinFreeBSD - # - BuilderBinGCC - - BuilderBinPPC64 - - BuilderBinTidy - - BuilderDebSplitted - runs-on: [self-hosted, style-checker] - if: ${{ success() || failure() }} - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/report_check - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=ClickHouse special build check (actions) - NEEDS_DATA_PATH=${{runner.temp}}/needs.json - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Report Builder - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cat > "$NEEDS_DATA_PATH" << 'EOF' - ${{ toJSON(needs) }} - EOF - cd "$GITHUB_WORKSPACE/tests/ci" - python3 build_report_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" -############################################################################################## -########################### FUNCTIONAl STATELESS TESTS ####################################### -############################################################################################## - FunctionalStatelessTestRelease: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, actions) - REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseDatabaseReplicated0: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_database_replicated - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, DatabaseReplicated, actions) - REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseDatabaseReplicated1: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_database_replicated - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, DatabaseReplicated, actions) - REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseWideParts: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_wide_parts - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, wide parts enabled, actions) - REPO_COPY=${{runner.temp}}/stateless_wide_parts/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestReleaseS3: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_s3_storage - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (release, s3 storage, actions) - REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestAarch64: - needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (aarch64, actions) - REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestAsan0: - needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (address, actions) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestAsan1: - needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (address, actions) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan0: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (thread, actions) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan1: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (thread, actions) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestTsan2: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (thread, actions) - REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestUBsan: - needs: [BuilderDebUBsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (ubsan, actions) - REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse - KILL_TIMEOUT=10800 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan0: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (memory, actions) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan1: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (memory, actions) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestMsan2: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (memory, actions) - REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug0: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug, actions) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug1: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug, actions) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestDebug2: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests (debug, actions) - REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse - KILL_TIMEOUT=10800 - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatelessTestFlakyCheck: - needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateless_flaky_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateless tests flaky check (address, actions) - REPO_COPY=${{runner.temp}}/stateless_flaky_asan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - TestsBugfixCheck: - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/tests_bugfix_check - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Tests bugfix validate check (actions) - KILL_TIMEOUT=3600 - REPO_COPY=${{runner.temp}}/tests_bugfix_check/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Bugfix test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - - TEMP_PATH="${TEMP_PATH}/integration" \ - REPORTS_PATH="${REPORTS_PATH}/integration" \ - python3 integration_test_check.py "Integration tests bugfix validate check" \ - --validate-bugfix --post-commit-status=file || echo 'ignore exit code' - - TEMP_PATH="${TEMP_PATH}/stateless" \ - REPORTS_PATH="${REPORTS_PATH}/stateless" \ - python3 functional_test_check.py "Stateless tests bugfix validate check" "$KILL_TIMEOUT" \ - --validate-bugfix --post-commit-status=file || echo 'ignore exit code' - - python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/post_commit_status.tsv" "${TEMP_PATH}/integration/post_commit_status.tsv" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" -############################################################################################## -############################ FUNCTIONAl STATEFUL TESTS ####################################### -############################################################################################## - FunctionalStatefulTestRelease: - needs: [BuilderDebRelease] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (release, actions) - REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatefulTestAarch64: - needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (aarch64, actions) - REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatefulTestAsan: - needs: [BuilderDebAsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (address, actions) - REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatefulTestTsan: - needs: [BuilderDebTsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (thread, actions) - REPO_COPY=${{runner.temp}}/stateful_tsan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatefulTestMsan: - needs: [BuilderDebMsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_msan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (memory, actions) - REPO_COPY=${{runner.temp}}/stateful_msan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatefulTestUBsan: - needs: [BuilderDebUBsan] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (ubsan, actions) - REPO_COPY=${{runner.temp}}/stateful_ubsan/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - FunctionalStatefulTestDebug: - needs: [BuilderDebDebug] - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stateful_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stateful tests (debug, actions) - REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse - KILL_TIMEOUT=3600 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Functional test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" -############################################################################################## -######################################### STRESS TESTS ####################################### -############################################################################################## - StressTestAsan: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_thread - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (address, actions) - REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - StressTestTsan: - needs: [BuilderDebTsan] - # func testers have 16 cores + 128 GB memory - # while stress testers have 36 cores + 72 memory - # It would be better to have something like 32 + 128, - # but such servers almost unavailable as spot instances. - runs-on: [self-hosted, func-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_thread - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (thread, actions) - REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - StressTestMsan: - needs: [BuilderDebMsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_memory - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (memory, actions) - REPO_COPY=${{runner.temp}}/stress_memory/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - StressTestUBsan: - needs: [BuilderDebUBsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_undefined - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (undefined, actions) - REPO_COPY=${{runner.temp}}/stress_undefined/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - StressTestDebug: - needs: [BuilderDebDebug] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/stress_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Stress test (debug, actions) - REPO_COPY=${{runner.temp}}/stress_debug/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Stress test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 stress_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" -############################################################################################## -##################################### AST FUZZERS ############################################ -############################################################################################## - ASTFuzzerTestAsan: - needs: [BuilderDebAsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (ASan, actions) - REPO_COPY=${{runner.temp}}/ast_fuzzer_asan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - ASTFuzzerTestTsan: - needs: [BuilderDebTsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (TSan, actions) - REPO_COPY=${{runner.temp}}/ast_fuzzer_tsan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - ASTFuzzerTestUBSan: - needs: [BuilderDebUBsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (UBSan, actions) - REPO_COPY=${{runner.temp}}/ast_fuzzer_ubsan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - ASTFuzzerTestMSan: - needs: [BuilderDebMsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_msan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (MSan, actions) - REPO_COPY=${{runner.temp}}/ast_fuzzer_msan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - ASTFuzzerTestDebug: - needs: [BuilderDebDebug] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/ast_fuzzer_debug - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=AST fuzzer (debug, actions) - REPO_COPY=${{runner.temp}}/ast_fuzzer_debug/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Fuzzer - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 ast_fuzzer_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" -############################################################################################# -############################# INTEGRATION TESTS ############################################# -############################################################################################# - IntegrationTestsAsan0: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, actions) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAsan1: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, actions) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsAsan2: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (asan, actions) - REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=3 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan0: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (thread, actions) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan1: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (thread, actions) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan2: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (thread, actions) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsTsan3: - needs: [BuilderDebTsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (thread, actions) - REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsRelease0: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (release, actions) - REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsRelease1: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_release - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests (release, actions) - REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=2 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - IntegrationTestsFlakyCheck: - needs: [BuilderDebAsan] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/integration_tests_asan_flaky_check - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Integration tests flaky check (asan, actions) - REPO_COPY=${{runner.temp}}/integration_tests_asan_flaky_check/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Integration test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 integration_test_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" -############################################################################################# -#################################### UNIT TESTS ############################################# -############################################################################################# - UnitTestsAsan: - needs: [BuilderDebAsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (asan, actions) - REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - UnitTestsReleaseClang: - needs: [BuilderBinRelease] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_asan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (release-clang, actions) - REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - # UnitTestsReleaseGCC: - # needs: [BuilderBinGCC] - # runs-on: [self-hosted, fuzzer-unit-tester] - # steps: - # - name: Set envs - # run: | - # cat >> "$GITHUB_ENV" << 'EOF' - # TEMP_PATH=${{runner.temp}}/unit_tests_asan - # REPORTS_PATH=${{runner.temp}}/reports_dir - # CHECK_NAME=Unit tests (release-gcc, actions) - # REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse - # EOF - # - name: Download json reports - # uses: actions/download-artifact@v2 - # with: - # path: ${{ env.REPORTS_PATH }} - # - name: Clear repository - # run: | - # sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - # - name: Check out repository code - # uses: actions/checkout@v2 - # - name: Unit test - # run: | - # sudo rm -fr "$TEMP_PATH" - # mkdir -p "$TEMP_PATH" - # cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - # cd "$REPO_COPY/tests/ci" - # python3 unit_tests_check.py "$CHECK_NAME" - # - name: Cleanup - # if: always() - # run: | - # # shellcheck disable=SC2046 - # docker kill $(docker ps -q) ||: - # # shellcheck disable=SC2046 - # docker rm -f $(docker ps -a -q) ||: - # sudo rm -fr "$TEMP_PATH" - UnitTestsTsan: - needs: [BuilderDebTsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_tsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (tsan, actions) - REPO_COPY=${{runner.temp}}/unit_tests_tsan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - UnitTestsMsan: - needs: [BuilderDebMsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_msan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (msan, actions) - REPO_COPY=${{runner.temp}}/unit_tests_msan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - UnitTestsUBsan: - needs: [BuilderDebUBsan] - runs-on: [self-hosted, fuzzer-unit-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/unit_tests_ubsan - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Unit tests (ubsan, actions) - REPO_COPY=${{runner.temp}}/unit_tests_ubsan/ClickHouse - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Unit test - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 unit_tests_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" -############################################################################################# -#################################### PERFORMANCE TESTS ###################################### -############################################################################################# - PerformanceComparison0: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparison1: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparison2: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparison3: - needs: [BuilderDebRelease] - runs-on: [self-hosted, stress-tester] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonAarch0: - needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison Aarch64 - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=0 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonAarch1: - needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison Aarch64 - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=1 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonAarch2: - needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison Aarch64 - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=2 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" - PerformanceComparisonAarch3: - needs: [BuilderDebAarch64] - runs-on: [self-hosted, func-tester-aarch64] - steps: - - name: Set envs - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/performance_comparison - REPORTS_PATH=${{runner.temp}}/reports_dir - CHECK_NAME=Performance Comparison Aarch64 - REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse - RUN_BY_HASH_NUM=3 - RUN_BY_HASH_TOTAL=4 - EOF - - name: Download json reports - uses: actions/download-artifact@v2 - with: - path: ${{ env.REPORTS_PATH }} - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Performance Comparison - run: | - sudo rm -fr "$TEMP_PATH" - mkdir -p "$TEMP_PATH" - cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" - cd "$REPO_COPY/tests/ci" - python3 performance_comparison_check.py "$CHECK_NAME" - - name: Cleanup - if: always() - run: | - # shellcheck disable=SC2046 - docker kill $(docker ps -q) ||: - # shellcheck disable=SC2046 - docker rm -f $(docker ps -a -q) ||: - sudo rm -fr "$TEMP_PATH" ############################################################################################# ###################################### JEPSEN TESTS ######################################### ############################################################################################# Jepsen: needs: [BuilderBinRelease] uses: ./.github/workflows/jepsen.yml - - FinishCheck: - needs: - - StyleCheck - - DockerHubPush - - DockerServerImages - - CheckLabels - - BuilderReport - - FastTest - - FunctionalStatelessTestDebug0 - - FunctionalStatelessTestDebug1 - - FunctionalStatelessTestDebug2 - - FunctionalStatelessTestRelease - - FunctionalStatelessTestReleaseDatabaseReplicated0 - - FunctionalStatelessTestReleaseDatabaseReplicated1 - - FunctionalStatelessTestReleaseWideParts - - FunctionalStatelessTestAarch64 - - FunctionalStatelessTestAsan0 - - FunctionalStatelessTestAsan1 - - FunctionalStatelessTestTsan0 - - FunctionalStatelessTestTsan1 - - FunctionalStatelessTestTsan2 - - FunctionalStatelessTestMsan0 - - FunctionalStatelessTestMsan1 - - FunctionalStatelessTestMsan2 - - FunctionalStatelessTestUBsan - - FunctionalStatefulTestDebug - - FunctionalStatefulTestRelease - - FunctionalStatefulTestAarch64 - - FunctionalStatefulTestAsan - - FunctionalStatefulTestTsan - - FunctionalStatefulTestMsan - - FunctionalStatefulTestUBsan - - FunctionalStatelessTestReleaseS3 - - StressTestDebug - - StressTestAsan - - StressTestTsan - - StressTestMsan - - StressTestUBsan - - ASTFuzzerTestDebug - - ASTFuzzerTestAsan - - ASTFuzzerTestTsan - - ASTFuzzerTestMSan - - ASTFuzzerTestUBSan - - IntegrationTestsAsan0 - - IntegrationTestsAsan1 - - IntegrationTestsAsan2 - - IntegrationTestsRelease0 - - IntegrationTestsRelease1 - - IntegrationTestsTsan0 - - IntegrationTestsTsan1 - - IntegrationTestsTsan2 - - IntegrationTestsTsan3 - - PerformanceComparison0 - - PerformanceComparison1 - - PerformanceComparison2 - - PerformanceComparison3 - - PerformanceComparisonAarch0 - - PerformanceComparisonAarch1 - - PerformanceComparisonAarch2 - - PerformanceComparisonAarch3 - - UnitTestsAsan - - UnitTestsTsan - - UnitTestsMsan - - UnitTestsUBsan - - UnitTestsReleaseClang - - SplitBuildSmokeTest - - CompatibilityCheck - - IntegrationTestsFlakyCheck - - Jepsen - runs-on: [self-hosted, style-checker] - steps: - - name: Clear repository - run: | - sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" - - name: Check out repository code - uses: actions/checkout@v2 - - name: Finish label - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 finish_check.py From 76c4fd9c8a27b0f4ea35a04a11ed278ee137f258 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 24 Jun 2022 14:42:24 +0000 Subject: [PATCH 10/74] Some fixes for batch reads --- src/Coordination/KeeperDispatcher.cpp | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index b07997882e5..46e9514e2a7 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -41,7 +41,7 @@ void KeeperDispatcher::requestThread() std::optional previous_request; bool collecting_quorum_requests = false; - const auto needs_quorum = [&](const auto & coordination_settings, const auto & request) + const auto needs_quorum = [](const auto & coordination_settings, const auto & request) { return coordination_settings->quorum_reads || !request.request->isReadRequest(); }; @@ -64,8 +64,11 @@ void KeeperDispatcher::requestThread() { KeeperStorage::RequestsForSessions current_batch; + auto next_collecting_quorum_requests = collecting_quorum_requests; + if (previous_request) { + assert(collecting_quorum_requests == needs_quorum(coordination_settings, *previous_request)); current_batch.push_back(std::move(*previous_request)); previous_request.reset(); } @@ -91,7 +94,7 @@ void KeeperDispatcher::requestThread() if (collecting_quorum_requests != needs_quorum(coordination_settings, request)) { previous_request.emplace(std::move(request)); - collecting_quorum_requests = !collecting_quorum_requests; + next_collecting_quorum_requests = !collecting_quorum_requests; break; } @@ -105,7 +108,7 @@ void KeeperDispatcher::requestThread() else { previous_request.emplace(std::move(request)); - collecting_quorum_requests = !collecting_quorum_requests; + next_collecting_quorum_requests = !collecting_quorum_requests; } if (shutdown_called) @@ -169,6 +172,8 @@ void KeeperDispatcher::requestThread() } current_batch.clear(); } + + collecting_quorum_requests = next_collecting_quorum_requests; } catch (...) { @@ -629,9 +634,12 @@ void KeeperDispatcher::onRequestCommit(uint64_t log_term, uint64_t log_idx) { const auto process_requests = [this](auto & request_queue) { - for (auto & request_info : request_queue) + for (const auto & request_info : request_queue) { - server->putLocalReadRequest(request_info); + if (server->isLeaderAlive()) + server->putLocalReadRequest(request_info); + else + addErrorResponses({request_info}, Coordination::Error::ZCONNECTIONLOSS); } request_queue.clear(); From 8a3531c2dbe53832a0d998a9439b1cf9101eef9c Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 27 Jun 2022 07:30:59 +0000 Subject: [PATCH 11/74] Refactor batching requests logic --- src/Coordination/KeeperDispatcher.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 46e9514e2a7..ce8d1a0d576 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -85,7 +85,7 @@ void KeeperDispatcher::requestThread() /// Waiting until previous append will be successful, or batch is big enough /// has_result == false && get_result_code == OK means that our request still not processed. /// Sometimes NuRaft set errorcode without setting result, so we check both here. - while (prev_result && (!prev_result->has_result() && prev_result->get_result_code() == nuraft::cmd_result_code::OK) && current_batch.size() <= max_batch_size) + while (current_batch.size() <= max_batch_size) { /// Trying to get batch requests as fast as possible if (requests_queue->tryPop(request, 1)) @@ -100,6 +100,8 @@ void KeeperDispatcher::requestThread() current_batch.emplace_back(request); } + else if (!prev_result || (prev_result->has_result() || prev_result->get_result_code() != nuraft::cmd_result_code::OK)) + break; if (shutdown_called) break; @@ -169,8 +171,9 @@ void KeeperDispatcher::requestThread() } } } + + current_batch.clear(); } - current_batch.clear(); } collecting_quorum_requests = next_collecting_quorum_requests; From f2e15197c66454ae10fe8d3ae3e18062f09bc8b2 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 27 Jun 2022 07:31:12 +0000 Subject: [PATCH 12/74] Retry only on final read --- .../src/jepsen/clickhouse_keeper/counter.clj | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/counter.clj b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/counter.clj index f82d3f4c348..54f53f9e74f 100644 --- a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/counter.clj +++ b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/counter.clj @@ -24,7 +24,12 @@ (invoke! [this test op] (case (:f op) - :read (exec-with-retries 30 (fn [] + :read (try + (assoc op + :type :ok + :value (count (zk-list conn "/"))) + (catch Exception _ (assoc op :type :info, :error :connect-error))) + :final-read (exec-with-retries 30 (fn [] (assoc op :type :ok :value (count (zk-list conn "/"))))) @@ -46,7 +51,5 @@ :checker (checker/compose {:counter (checker/counter) :perf (checker/perf)}) - :generator (->> (range) - (map (fn [x] - (->> (gen/mix [r add]))))) - :final-generator (gen/once {:type :invoke, :f :read, :value nil})}) + :generator (gen/mix [r add]) + :final-generator (gen/once {:type :invoke, :f :final-read, :value nil})}) From 53e906fdd5191e68d7059eafe12e6ee75a10f1f6 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 27 Jun 2022 12:05:27 +0000 Subject: [PATCH 13/74] Improve batching by processing single request from each session --- src/Coordination/KeeperDispatcher.cpp | 224 ++++++++++++++++-------- src/Coordination/KeeperDispatcher.h | 14 +- src/Coordination/KeeperStateMachine.cpp | 2 +- src/Coordination/KeeperStateMachine.h | 4 +- 4 files changed, 169 insertions(+), 75 deletions(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index ce8d1a0d576..7304a400d6a 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -35,17 +35,16 @@ void KeeperDispatcher::requestThread() /// Result of requests batch from previous iteration RaftAppendResult prev_result = nullptr; - /// Requests from previous iteration. We store them to be able - /// to send errors to the client. KeeperStorage::RequestsForSessions prev_batch; - std::optional previous_request; - bool collecting_quorum_requests = false; const auto needs_quorum = [](const auto & coordination_settings, const auto & request) { return coordination_settings->quorum_reads || !request.request->isReadRequest(); }; + KeeperStorage::RequestsForSessions write_requests; + KeeperStorage::RequestsForSessions read_requests; + while (!shutdown_called) { KeeperStorage::RequestForSession request; @@ -62,55 +61,66 @@ void KeeperDispatcher::requestThread() /// read request. So reads are some kind of "separator" for writes. try { - KeeperStorage::RequestsForSessions current_batch; - - auto next_collecting_quorum_requests = collecting_quorum_requests; - - if (previous_request) - { - assert(collecting_quorum_requests == needs_quorum(coordination_settings, *previous_request)); - current_batch.push_back(std::move(*previous_request)); - previous_request.reset(); - } + KeeperStorage::RequestsForSessions * current_batch{nullptr}; + bool is_current_read = false; if (requests_queue->tryPop(request, max_wait)) { if (shutdown_called) break; - if (collecting_quorum_requests == needs_quorum(coordination_settings, request)) + if (needs_quorum(coordination_settings, request)) { - current_batch.emplace_back(request); + write_requests.emplace_back(request); - /// Waiting until previous append will be successful, or batch is big enough - /// has_result == false && get_result_code == OK means that our request still not processed. - /// Sometimes NuRaft set errorcode without setting result, so we check both here. - while (current_batch.size() <= max_batch_size) + if (!current_batch) { - /// Trying to get batch requests as fast as possible - if (requests_queue->tryPop(request, 1)) - { - /// Don't append read request into batch, we have to process them separately - if (collecting_quorum_requests != needs_quorum(coordination_settings, request)) - { - previous_request.emplace(std::move(request)); - next_collecting_quorum_requests = !collecting_quorum_requests; - break; - } - - current_batch.emplace_back(request); - } - else if (!prev_result || (prev_result->has_result() || prev_result->get_result_code() != nuraft::cmd_result_code::OK)) - break; - - if (shutdown_called) - break; + current_batch = &write_requests; + is_current_read = false; } } else { - previous_request.emplace(std::move(request)); - next_collecting_quorum_requests = !collecting_quorum_requests; + read_requests.emplace_back(request); + if (!current_batch) + { + current_batch = &read_requests; + is_current_read = true; + } + } + + /// Waiting until previous append will be successful, or batch is big enough + /// has_result == false && get_result_code == OK means that our request still not processed. + /// Sometimes NuRaft set errorcode without setting result, so we check both here. + while (true) + { + if (write_requests.size() > max_batch_size) + { + current_batch = &write_requests; + is_current_read = false; + break; + } + + if (read_requests.size() > max_batch_size) + { + current_batch = &read_requests; + is_current_read = true; + break; + } + + /// Trying to get batch requests as fast as possible + if (requests_queue->tryPop(request, 1)) + { + if (needs_quorum(coordination_settings, request)) + write_requests.emplace_back(request); + else + read_requests.emplace_back(request); + } + else if (!prev_result || prev_result->has_result() || prev_result->get_result_code() != nuraft::cmd_result_code::OK) + break; + + if (shutdown_called) + break; } if (shutdown_called) @@ -120,32 +130,29 @@ void KeeperDispatcher::requestThread() if (prev_result) forceWaitAndProcessResult(prev_result, prev_batch); - /// Process collected requests batch - if (!current_batch.empty()) + if (current_batch && !current_batch->empty()) { - if (collecting_quorum_requests) + LOG_INFO(&Poco::Logger::get("BATCHING"), "Processing {} batch of {}", is_current_read, current_batch->size()); + if (!is_current_read) { - auto result = server->putRequestBatch(current_batch); - prev_batch = std::move(current_batch); - prev_result = result; + prev_result = server->putRequestBatch(*current_batch); + prev_batch = std::move(*current_batch); + + if (!read_requests.empty()) + { + current_batch = &read_requests; + is_current_read = true; + } + else + current_batch = nullptr; } else { - auto leader_info_result = server->getLeaderInfo(); + prev_result = server->getLeaderInfo(); + prev_result->when_ready([&, requests_for_sessions = *current_batch](nuraft::cmd_result> & result, nuraft::ptr &) + { + auto & leader_info_ctx = result.get(); - auto & leader_info_ctx = leader_info_result->get(); - - /// If we get some errors, then send them to clients - if (!leader_info_result->get_accepted() || leader_info_result->get_result_code() == nuraft::cmd_result_code::TIMEOUT) - { - addErrorResponses(current_batch, Coordination::Error::ZOPERATIONTIMEOUT); - } - else if (leader_info_result->get_result_code() != nuraft::cmd_result_code::OK) - { - addErrorResponses(current_batch, Coordination::Error::ZCONNECTIONLOSS); - } - else - { KeeperServer::NodeInfo leader_info; leader_info.term = leader_info_ctx->get_ulong(); leader_info.last_committed_index = leader_info_ctx->get_ulong(); @@ -156,27 +163,34 @@ void KeeperDispatcher::requestThread() if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) { auto & leader_waiter = leader_waiters[node_info]; - leader_waiter.insert(leader_waiter.end(), current_batch.begin(), current_batch.end()); + leader_waiter.insert(leader_waiter.end(), requests_for_sessions.begin(), requests_for_sessions.end()); LOG_INFO(log, "waiting for {}, idx {}", leader_info.term, leader_info.last_committed_index); } else { - for (const auto & read_request : current_batch) + for (const auto & read_request : requests_for_sessions) { if (server->isLeaderAlive()) server->putLocalReadRequest(read_request); else addErrorResponses({read_request}, Coordination::Error::ZCONNECTIONLOSS); + finalizeRequest(read_request); } } - } - } + }); - current_batch.clear(); + prev_batch = std::move(*current_batch); + + if (!write_requests.empty()) + { + current_batch = &write_requests; + is_current_read = false; + } + else + current_batch = nullptr; + } } } - - collecting_quorum_requests = next_collecting_quorum_requests; } catch (...) { @@ -287,6 +301,29 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ request_info.time = duration_cast(system_clock::now().time_since_epoch()).count(); request_info.session_id = session_id; + { + std::lock_guard lock{unprocessed_request_mutex}; + auto unprocessed_requests_it = unprocessed_requests_for_session.find(session_id); + if (unprocessed_requests_it == unprocessed_requests_for_session.end()) + { + auto & unprocessed_requests = unprocessed_requests_for_session[session_id]; + unprocessed_requests.unprocessed_num = 1; + unprocessed_requests.is_read = request->isReadRequest(); + } + else + { + auto & unprocessed_requests = unprocessed_requests_it->second; + + if (!unprocessed_requests.request_queue.empty() || unprocessed_requests.is_read != request->isReadRequest()) + { + unprocessed_requests.request_queue.push_back(std::move(request_info)); + return true; + } + + ++unprocessed_requests.unprocessed_num; + } + } + std::lock_guard lock(push_request_mutex); if (shutdown_called) @@ -316,7 +353,7 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf responses_thread = ThreadFromGlobalPool([this] { responseThread(); }); snapshot_thread = ThreadFromGlobalPool([this] { snapshotThread(); }); - server = std::make_unique(configuration_and_settings, config, responses_queue, snapshots_queue, [this](uint64_t log_term, uint64_t log_idx) { onRequestCommit(log_term, log_idx); }); + server = std::make_unique(configuration_and_settings, config, responses_queue, snapshots_queue, [this](const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx) { onRequestCommit(request_for_session, log_term, log_idx); }); try { @@ -506,14 +543,13 @@ void KeeperDispatcher::forceWaitAndProcessResult(RaftAppendResult & result, Keep if (!result->has_result()) result->get(); - /// If we get some errors, than send them to clients if (!result->get_accepted() || result->get_result_code() == nuraft::cmd_result_code::TIMEOUT) addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); else if (result->get_result_code() != nuraft::cmd_result_code::OK) addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); - result = nullptr; requests_for_sessions.clear(); + result = nullptr; } int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms) @@ -633,8 +669,52 @@ void KeeperDispatcher::updateConfigurationThread() } } -void KeeperDispatcher::onRequestCommit(uint64_t log_term, uint64_t log_idx) +void KeeperDispatcher::finalizeRequest(const KeeperStorage::RequestForSession & request_for_session) { + std::lock_guard lock{unprocessed_request_mutex}; + auto unprocessed_requests_it = unprocessed_requests_for_session.find(request_for_session.session_id); + if (unprocessed_requests_it == unprocessed_requests_for_session.end()) + return; + + auto & unprocessed_requests = unprocessed_requests_it->second; + --unprocessed_requests.unprocessed_num; + + if (unprocessed_requests.unprocessed_num == 0) + { + if (!unprocessed_requests.request_queue.empty()) + { + auto & request_queue = unprocessed_requests.request_queue; + unprocessed_requests.is_read = !unprocessed_requests.is_read; + while (!request_queue.empty() && request_queue.front().request->isReadRequest() == unprocessed_requests.is_read) + { + auto & front_request = request_queue.front(); + + /// Put close requests without timeouts + if (front_request.request->getOpNum() == Coordination::OpNum::Close) + { + if (!requests_queue->push(std::move(front_request))) + throw Exception("Cannot push request to queue", ErrorCodes::SYSTEM_ERROR); + } + else if (!requests_queue->tryPush(std::move(front_request), configuration_and_settings->coordination_settings->operation_timeout_ms.totalMilliseconds())) + { + throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED); + } + + ++unprocessed_requests.unprocessed_num; + request_queue.pop_front(); + } + } + else + { + unprocessed_requests_for_session.erase(unprocessed_requests_it); + } + } +} + +void KeeperDispatcher::onRequestCommit(const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx) +{ + finalizeRequest(request_for_session); + const auto process_requests = [this](auto & request_queue) { for (const auto & request_info : request_queue) @@ -643,6 +723,8 @@ void KeeperDispatcher::onRequestCommit(uint64_t log_term, uint64_t log_idx) server->putLocalReadRequest(request_info); else addErrorResponses({request_info}, Coordination::Error::ZCONNECTIONLOSS); + + finalizeRequest(request_info); } request_queue.clear(); diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index 1f5de4007f2..f7f00f3c5f9 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -80,6 +80,18 @@ private: std::unordered_map> leader_waiters; std::mutex leader_waiter_mutex; + struct UnprocessedRequests + { + size_t unprocessed_num{0}; + bool is_read{false}; + std::list request_queue; + }; + + void finalizeRequest(const KeeperStorage::RequestForSession & request_for_session); + + std::unordered_map unprocessed_requests_for_session; + std::mutex unprocessed_request_mutex; + /// Thread put requests to raft void requestThread(); /// Thread put responses for subscribed sessions @@ -119,7 +131,7 @@ public: return server && server->checkInit(); } - void onRequestCommit(uint64_t log_term, uint64_t log_idx); + void onRequestCommit(const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx); /// Is server accepting requests, i.e. connected to the cluster /// and achieved quorum diff --git a/src/Coordination/KeeperStateMachine.cpp b/src/Coordination/KeeperStateMachine.cpp index c2bd7772b1b..2aa176423d1 100644 --- a/src/Coordination/KeeperStateMachine.cpp +++ b/src/Coordination/KeeperStateMachine.cpp @@ -242,7 +242,7 @@ nuraft::ptr KeeperStateMachine::commit_ext(const ext_op_params& } } - commit_callback(params.log_term, params.log_idx); + commit_callback(request_for_session, params.log_term, params.log_idx); last_committed_idx = params.log_idx; return nullptr; } diff --git a/src/Coordination/KeeperStateMachine.h b/src/Coordination/KeeperStateMachine.h index f5a38228c86..ceda081735a 100644 --- a/src/Coordination/KeeperStateMachine.h +++ b/src/Coordination/KeeperStateMachine.h @@ -19,7 +19,7 @@ using SnapshotsQueue = ConcurrentBoundedQueue; class KeeperStateMachine : public nuraft::state_machine { public: - using CommitCallback = std::function; + using CommitCallback = std::function; KeeperStateMachine( ResponsesQueue & responses_queue_, @@ -28,7 +28,7 @@ public: const CoordinationSettingsPtr & coordination_settings_, const std::string & superdigest_ = "", bool digest_enabled_ = true, - CommitCallback commit_callback_ = [](uint64_t, uint64_t){}); + CommitCallback commit_callback_ = [](const KeeperStorage::RequestForSession &, uint64_t, uint64_t){}); /// Read state from the latest snapshot void init(); From dc5cc3376962af868090e0ff8f3bedc506fec868 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 27 Jun 2022 14:22:53 +0000 Subject: [PATCH 14/74] Process outside of lock --- src/Coordination/KeeperDispatcher.cpp | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 7304a400d6a..bfd98205413 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -726,17 +726,19 @@ void KeeperDispatcher::onRequestCommit(const KeeperStorage::RequestForSession & finalizeRequest(request_info); } - - request_queue.clear(); }; - std::lock_guard lock(leader_waiter_mutex); - auto request_queue_it = leader_waiters.find(KeeperServer::NodeInfo{.term = log_term, .last_committed_index = log_idx}); - if (request_queue_it != leader_waiters.end()) + KeeperStorage::RequestsForSessions requests; { - process_requests(request_queue_it->second); - leader_waiters.erase(request_queue_it); + std::lock_guard lock(leader_waiter_mutex); + auto request_queue_it = leader_waiters.find(KeeperServer::NodeInfo{.term = log_term, .last_committed_index = log_idx}); + if (request_queue_it != leader_waiters.end()) + { + requests = std::move(request_queue_it->second); + leader_waiters.erase(request_queue_it); + } } + process_requests(requests); } bool KeeperDispatcher::isServerActive() const From a6c628cab4635f07405380477ff09f42930fb720 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 27 Jun 2022 14:34:27 +0000 Subject: [PATCH 15/74] Small fixes for queues --- src/Coordination/KeeperDispatcher.cpp | 111 ++++++++++++++---------- src/Coordination/KeeperDispatcher.h | 4 +- src/Coordination/KeeperStateMachine.cpp | 2 +- 3 files changed, 69 insertions(+), 48 deletions(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index bfd98205413..33e6ff57253 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -137,6 +137,7 @@ void KeeperDispatcher::requestThread() { prev_result = server->putRequestBatch(*current_batch); prev_batch = std::move(*current_batch); + current_batch->clear(); if (!read_requests.empty()) { @@ -149,7 +150,7 @@ void KeeperDispatcher::requestThread() else { prev_result = server->getLeaderInfo(); - prev_result->when_ready([&, requests_for_sessions = *current_batch](nuraft::cmd_result> & result, nuraft::ptr &) + server->getLeaderInfo()->when_ready([&, requests_for_sessions = *current_batch](nuraft::cmd_result> & result, nuraft::ptr &) mutable { auto & leader_info_ctx = result.get(); @@ -168,18 +169,23 @@ void KeeperDispatcher::requestThread() } else { - for (const auto & read_request : requests_for_sessions) + bg_read_request.scheduleOrThrow([&, requests = std::move(requests_for_sessions)] { - if (server->isLeaderAlive()) - server->putLocalReadRequest(read_request); - else - addErrorResponses({read_request}, Coordination::Error::ZCONNECTIONLOSS); - finalizeRequest(read_request); - } + for (const auto & request_info : requests) + { + if (server->isLeaderAlive()) + server->putLocalReadRequest(request_info); + else + addErrorResponses({request_info}, Coordination::Error::ZCONNECTIONLOSS); + } + + finalizeRequests(requests); + }); } }); prev_batch = std::move(*current_batch); + current_batch->clear(); if (!write_requests.empty()) { @@ -669,63 +675,76 @@ void KeeperDispatcher::updateConfigurationThread() } } -void KeeperDispatcher::finalizeRequest(const KeeperStorage::RequestForSession & request_for_session) +void KeeperDispatcher::finalizeRequests(const KeeperStorage::RequestsForSessions & requests_for_sessions) { - std::lock_guard lock{unprocessed_request_mutex}; - auto unprocessed_requests_it = unprocessed_requests_for_session.find(request_for_session.session_id); - if (unprocessed_requests_it == unprocessed_requests_for_session.end()) - return; + std::unordered_map counts_for_session; - auto & unprocessed_requests = unprocessed_requests_it->second; - --unprocessed_requests.unprocessed_num; - - if (unprocessed_requests.unprocessed_num == 0) + for (const auto & request_for_session : requests_for_sessions) { - if (!unprocessed_requests.request_queue.empty()) + ++counts_for_session[request_for_session.session_id]; + } + + std::lock_guard lock{unprocessed_request_mutex}; + for (const auto [session_id, count] : counts_for_session) + { + auto unprocessed_requests_it = unprocessed_requests_for_session.find(session_id); + if (unprocessed_requests_it == unprocessed_requests_for_session.end()) + continue; + + auto & unprocessed_requests = unprocessed_requests_it->second; + unprocessed_requests.unprocessed_num -= count; + + if (unprocessed_requests.unprocessed_num == 0) { - auto & request_queue = unprocessed_requests.request_queue; - unprocessed_requests.is_read = !unprocessed_requests.is_read; - while (!request_queue.empty() && request_queue.front().request->isReadRequest() == unprocessed_requests.is_read) + if (!unprocessed_requests.request_queue.empty()) { - auto & front_request = request_queue.front(); - - /// Put close requests without timeouts - if (front_request.request->getOpNum() == Coordination::OpNum::Close) + auto & request_queue = unprocessed_requests.request_queue; + unprocessed_requests.is_read = !unprocessed_requests.is_read; + while (!request_queue.empty() && request_queue.front().request->isReadRequest() == unprocessed_requests.is_read) { - if (!requests_queue->push(std::move(front_request))) - throw Exception("Cannot push request to queue", ErrorCodes::SYSTEM_ERROR); - } - else if (!requests_queue->tryPush(std::move(front_request), configuration_and_settings->coordination_settings->operation_timeout_ms.totalMilliseconds())) - { - throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED); - } + auto & front_request = request_queue.front(); - ++unprocessed_requests.unprocessed_num; - request_queue.pop_front(); + /// Put close requests without timeouts + if (front_request.request->getOpNum() == Coordination::OpNum::Close) + { + if (!requests_queue->push(std::move(front_request))) + throw Exception("Cannot push request to queue", ErrorCodes::SYSTEM_ERROR); + } + else if (!requests_queue->tryPush(std::move(front_request), configuration_and_settings->coordination_settings->operation_timeout_ms.totalMilliseconds())) + { + throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED); + } + + ++unprocessed_requests.unprocessed_num; + request_queue.pop_front(); + } + } + else + { + unprocessed_requests_for_session.erase(unprocessed_requests_it); } - } - else - { - unprocessed_requests_for_session.erase(unprocessed_requests_it); } } } void KeeperDispatcher::onRequestCommit(const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx) { - finalizeRequest(request_for_session); + finalizeRequests({request_for_session}); const auto process_requests = [this](auto & request_queue) { - for (const auto & request_info : request_queue) + bg_read_request.scheduleOrThrow([&, requests = std::move(request_queue)] { - if (server->isLeaderAlive()) - server->putLocalReadRequest(request_info); - else - addErrorResponses({request_info}, Coordination::Error::ZCONNECTIONLOSS); + for (const auto & request_info : requests) + { + if (server->isLeaderAlive()) + server->putLocalReadRequest(request_info); + else + addErrorResponses({request_info}, Coordination::Error::ZCONNECTIONLOSS); - finalizeRequest(request_info); - } + } + finalizeRequests(requests); + }); }; KeeperStorage::RequestsForSessions requests; diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index f7f00f3c5f9..b19a2362268 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -87,7 +87,7 @@ private: std::list request_queue; }; - void finalizeRequest(const KeeperStorage::RequestForSession & request_for_session); + void finalizeRequests(const KeeperStorage::RequestsForSessions & requests_for_sessions); std::unordered_map unprocessed_requests_for_session; std::mutex unprocessed_request_mutex; @@ -213,6 +213,8 @@ public: { keeper_stats.reset(); } + + ThreadPool bg_read_request{1}; }; } diff --git a/src/Coordination/KeeperStateMachine.cpp b/src/Coordination/KeeperStateMachine.cpp index 2aa176423d1..2dc849eb721 100644 --- a/src/Coordination/KeeperStateMachine.cpp +++ b/src/Coordination/KeeperStateMachine.cpp @@ -242,8 +242,8 @@ nuraft::ptr KeeperStateMachine::commit_ext(const ext_op_params& } } - commit_callback(request_for_session, params.log_term, params.log_idx); last_committed_idx = params.log_idx; + commit_callback(request_for_session, params.log_term, params.log_idx); return nullptr; } From 23c705222f31c6cebc88ac646482399a64af2a23 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 28 Jun 2022 08:48:19 +0000 Subject: [PATCH 16/74] Process read requests in bg thread --- src/Coordination/KeeperDispatcher.cpp | 126 ++++++++++++++------------ src/Coordination/KeeperDispatcher.h | 8 +- src/Coordination/KeeperServer.cpp | 4 - 3 files changed, 74 insertions(+), 64 deletions(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 33e6ff57253..ae670ea0f68 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include namespace fs = std::filesystem; @@ -23,6 +24,7 @@ namespace ErrorCodes KeeperDispatcher::KeeperDispatcher() : responses_queue(std::numeric_limits::max()) + , read_requests_queue(std::numeric_limits::max()) , configuration_and_settings(std::make_shared()) , log(&Poco::Logger::get("KeeperDispatcher")) { @@ -35,7 +37,6 @@ void KeeperDispatcher::requestThread() /// Result of requests batch from previous iteration RaftAppendResult prev_result = nullptr; - KeeperStorage::RequestsForSessions prev_batch; const auto needs_quorum = [](const auto & coordination_settings, const auto & request) { @@ -53,12 +54,6 @@ void KeeperDispatcher::requestThread() uint64_t max_wait = coordination_settings->operation_timeout_ms.totalMilliseconds(); uint64_t max_batch_size = coordination_settings->max_requests_batch_size; - /// The code below do a very simple thing: batch all write (quorum) requests into vector until - /// previous write batch is not finished or max_batch size achieved. The main complexity goes from - /// the ability to process read requests without quorum (from local state). So when we are collecting - /// requests into a batch we must check that the new request is not read request. Otherwise we have to - /// process all already accumulated write requests, wait them synchronously and only after that process - /// read request. So reads are some kind of "separator" for writes. try { KeeperStorage::RequestsForSessions * current_batch{nullptr}; @@ -116,7 +111,7 @@ void KeeperDispatcher::requestThread() else read_requests.emplace_back(request); } - else if (!prev_result || prev_result->has_result() || prev_result->get_result_code() != nuraft::cmd_result_code::OK) + else if (is_current_read || !prev_result || prev_result->has_result() || prev_result->get_result_code() != nuraft::cmd_result_code::OK) break; if (shutdown_called) @@ -127,16 +122,23 @@ void KeeperDispatcher::requestThread() break; /// Forcefully process all previous pending requests - if (prev_result) - forceWaitAndProcessResult(prev_result, prev_batch); + if (!is_current_read && prev_result) + forceWaitAndProcessResult(prev_result); - if (current_batch && !current_batch->empty()) + if (current_batch) { LOG_INFO(&Poco::Logger::get("BATCHING"), "Processing {} batch of {}", is_current_read, current_batch->size()); if (!is_current_read) { prev_result = server->putRequestBatch(*current_batch); - prev_batch = std::move(*current_batch); + prev_result->when_ready([&, requests_for_sessions = std::move(*current_batch)](nuraft::cmd_result> & result, nuraft::ptr &) mutable + { + if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) + addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); + else if (result.get_result_code() != nuraft::cmd_result_code::OK) + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + }); + current_batch->clear(); if (!read_requests.empty()) @@ -149,9 +151,13 @@ void KeeperDispatcher::requestThread() } else { - prev_result = server->getLeaderInfo(); - server->getLeaderInfo()->when_ready([&, requests_for_sessions = *current_batch](nuraft::cmd_result> & result, nuraft::ptr &) mutable + server->getLeaderInfo()->when_ready([&, requests_for_sessions = std::move(*current_batch)](nuraft::cmd_result> & result, nuraft::ptr &) mutable { + if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) + addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); + else if (result.get_result_code() != nuraft::cmd_result_code::OK) + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + auto & leader_info_ctx = result.get(); KeeperServer::NodeInfo leader_info; @@ -163,28 +169,14 @@ void KeeperDispatcher::requestThread() if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) { - auto & leader_waiter = leader_waiters[node_info]; + auto & leader_waiter = leader_waiters[leader_info]; leader_waiter.insert(leader_waiter.end(), requests_for_sessions.begin(), requests_for_sessions.end()); LOG_INFO(log, "waiting for {}, idx {}", leader_info.term, leader_info.last_committed_index); } - else - { - bg_read_request.scheduleOrThrow([&, requests = std::move(requests_for_sessions)] - { - for (const auto & request_info : requests) - { - if (server->isLeaderAlive()) - server->putLocalReadRequest(request_info); - else - addErrorResponses({request_info}, Coordination::Error::ZCONNECTIONLOSS); - } - - finalizeRequests(requests); - }); - } + else if (!read_requests_queue.push(std::move(requests_for_sessions))) + throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); }); - prev_batch = std::move(*current_batch); current_batch->clear(); if (!write_requests.empty()) @@ -254,6 +246,37 @@ void KeeperDispatcher::snapshotThread() } } +void KeeperDispatcher::readRequestThread() +{ + setThreadName("KeeperReadT"); + while (!shutdown_called) + { + KeeperStorage::RequestsForSessions requests; + if (!read_requests_queue.pop(requests)) + break; + + if (shutdown_called) + break; + + try + { + for (const auto & request_info : requests) + { + if (server->isLeaderAlive()) + server->putLocalReadRequest(request_info); + else + addErrorResponses({request_info}, Coordination::Error::ZCONNECTIONLOSS); + } + + finalizeRequests(requests); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + } +} + void KeeperDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response) { std::lock_guard lock(session_to_response_callback_mutex); @@ -301,6 +324,7 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ return false; } + LOG_INFO(&Poco::Logger::get("BATCHING"), "Got request {}", request->getOpNum()); KeeperStorage::RequestForSession request_info; request_info.request = request; using namespace std::chrono; @@ -325,8 +349,10 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ unprocessed_requests.request_queue.push_back(std::move(request_info)); return true; } - - ++unprocessed_requests.unprocessed_num; + else + { + ++unprocessed_requests.unprocessed_num; + } } } @@ -358,6 +384,7 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf request_thread = ThreadFromGlobalPool([this] { requestThread(); }); responses_thread = ThreadFromGlobalPool([this] { responseThread(); }); snapshot_thread = ThreadFromGlobalPool([this] { snapshotThread(); }); + read_request_thread = ThreadFromGlobalPool([this] { readRequestThread(); }); server = std::make_unique(configuration_and_settings, config, responses_queue, snapshots_queue, [this](const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx) { onRequestCommit(request_for_session, log_term, log_idx); }); @@ -423,6 +450,10 @@ void KeeperDispatcher::shutdown() if (snapshot_thread.joinable()) snapshot_thread.join(); + read_requests_queue.finish(); + if (read_request_thread.joinable()) + read_request_thread.join(); + update_configuration_queue.finish(); if (update_configuration_thread.joinable()) update_configuration_thread.join(); @@ -544,17 +575,13 @@ void KeeperDispatcher::addErrorResponses(const KeeperStorage::RequestsForSession } } -void KeeperDispatcher::forceWaitAndProcessResult(RaftAppendResult & result, KeeperStorage::RequestsForSessions & requests_for_sessions) +void KeeperDispatcher::forceWaitAndProcessResult(RaftAppendResult & result) { + LOG_INFO(&Poco::Logger::get("TESTER"), "Waiting for result"); if (!result->has_result()) result->get(); - if (!result->get_accepted() || result->get_result_code() == nuraft::cmd_result_code::TIMEOUT) - addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); - else if (result->get_result_code() != nuraft::cmd_result_code::OK) - addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); - - requests_for_sessions.clear(); + LOG_INFO(&Poco::Logger::get("TESTER"), "Got result"); result = nullptr; } @@ -731,22 +758,6 @@ void KeeperDispatcher::onRequestCommit(const KeeperStorage::RequestForSession & { finalizeRequests({request_for_session}); - const auto process_requests = [this](auto & request_queue) - { - bg_read_request.scheduleOrThrow([&, requests = std::move(request_queue)] - { - for (const auto & request_info : requests) - { - if (server->isLeaderAlive()) - server->putLocalReadRequest(request_info); - else - addErrorResponses({request_info}, Coordination::Error::ZCONNECTIONLOSS); - - } - finalizeRequests(requests); - }); - }; - KeeperStorage::RequestsForSessions requests; { std::lock_guard lock(leader_waiter_mutex); @@ -757,7 +768,8 @@ void KeeperDispatcher::onRequestCommit(const KeeperStorage::RequestForSession & leader_waiters.erase(request_queue_it); } } - process_requests(requests); + if (!read_requests_queue.push(std::move(requests))) + throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); } bool KeeperDispatcher::isServerActive() const diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index b19a2362268..50b842d73b4 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -35,6 +35,7 @@ private: std::unique_ptr requests_queue; ResponsesQueue responses_queue; SnapshotsQueue snapshots_queue{1}; + ConcurrentBoundedQueue read_requests_queue; /// More than 1k updates is definitely misconfiguration. UpdateConfigurationQueue update_configuration_queue{1000}; @@ -64,6 +65,7 @@ private: ThreadFromGlobalPool snapshot_thread; /// Apply or wait for configuration changes ThreadFromGlobalPool update_configuration_thread; + ThreadFromGlobalPool read_request_thread; /// RAFT wrapper. std::unique_ptr server; @@ -103,6 +105,8 @@ private: /// Thread apply or wait configuration changes from leader void updateConfigurationThread(); + void readRequestThread(); + void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response); /// Add error responses for requests to responses queue. @@ -111,7 +115,7 @@ private: /// Forcefully wait for result and sets errors if something when wrong. /// Clears both arguments - void forceWaitAndProcessResult(RaftAppendResult & result, KeeperStorage::RequestsForSessions & requests_for_sessions); + static void forceWaitAndProcessResult(RaftAppendResult & result); public: /// Just allocate some objects, real initialization is done by `intialize method` @@ -213,8 +217,6 @@ public: { keeper_stats.reset(); } - - ThreadPool bg_read_request{1}; }; } diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index 0a7b1948495..b3fc8a11516 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -169,10 +169,6 @@ struct KeeperServer::KeeperRaftServer : public nuraft::raft_server ( 0ull, nuraft::msg_type::leader_status_request, 0, 0, 0ull, 0ull, 0ull ) ; auto result = send_msg_to_leader(req); - - if (!result->has_result()) - result->get(); - return result; } From 6d7050cb42cbdf658cd5e4c5ec141d0cd8f5c25e Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 28 Jun 2022 09:54:16 +0000 Subject: [PATCH 17/74] Additional refactoring for request processing --- src/Coordination/KeeperDispatcher.cpp | 159 ++++++++++---------------- src/Coordination/KeeperServer.cpp | 3 +- 2 files changed, 63 insertions(+), 99 deletions(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index ae670ea0f68..edc04c03d81 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -46,6 +46,53 @@ void KeeperDispatcher::requestThread() KeeperStorage::RequestsForSessions write_requests; KeeperStorage::RequestsForSessions read_requests; + auto process_read_requests = [&, this]() mutable + { + server->getLeaderInfo()->when_ready([&, requests_for_sessions = std::move(read_requests)](nuraft::cmd_result> & result, nuraft::ptr &) mutable + { + if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) + addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); + else if (result.get_result_code() != nuraft::cmd_result_code::OK) + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + + auto & leader_info_ctx = result.get(); + + KeeperServer::NodeInfo leader_info; + leader_info.term = leader_info_ctx->get_ulong(); + leader_info.last_committed_index = leader_info_ctx->get_ulong(); + + std::lock_guard lock(leader_waiter_mutex); + auto node_info = server->getNodeInfo(); + + if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) + { + auto & leader_waiter = leader_waiters[leader_info]; + leader_waiter.insert(leader_waiter.end(), requests_for_sessions.begin(), requests_for_sessions.end()); + LOG_INFO(log, "waiting for {}, idx {}", leader_info.term, leader_info.last_committed_index); + } + else if (!read_requests_queue.push(std::move(requests_for_sessions))) + throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); + }); + read_requests.clear(); + }; + + auto process_write_requests = [&, this]() mutable + { + /// Forcefully process all previous pending requests + if (prev_result) + forceWaitAndProcessResult(prev_result); + + prev_result = server->putRequestBatch(write_requests); + prev_result->when_ready([&, requests_for_sessions = std::move(write_requests)](nuraft::cmd_result> & result, nuraft::ptr &) mutable + { + if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) + addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); + else if (result.get_result_code() != nuraft::cmd_result_code::OK) + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + }); + write_requests.clear(); + }; + while (!shutdown_called) { KeeperStorage::RequestForSession request; @@ -56,33 +103,16 @@ void KeeperDispatcher::requestThread() try { - KeeperStorage::RequestsForSessions * current_batch{nullptr}; - bool is_current_read = false; - if (requests_queue->tryPop(request, max_wait)) { + if (shutdown_called) break; if (needs_quorum(coordination_settings, request)) - { write_requests.emplace_back(request); - - if (!current_batch) - { - current_batch = &write_requests; - is_current_read = false; - } - } else - { read_requests.emplace_back(request); - if (!current_batch) - { - current_batch = &read_requests; - is_current_read = true; - } - } /// Waiting until previous append will be successful, or batch is big enough /// has_result == false && get_result_code == OK means that our request still not processed. @@ -91,15 +121,13 @@ void KeeperDispatcher::requestThread() { if (write_requests.size() > max_batch_size) { - current_batch = &write_requests; - is_current_read = false; + process_write_requests(); break; } if (read_requests.size() > max_batch_size) { - current_batch = &read_requests; - is_current_read = true; + process_read_requests(); break; } @@ -111,8 +139,14 @@ void KeeperDispatcher::requestThread() else read_requests.emplace_back(request); } - else if (is_current_read || !prev_result || prev_result->has_result() || prev_result->get_result_code() != nuraft::cmd_result_code::OK) - break; + else + { + if (!read_requests.empty()) + process_read_requests(); + + if (!prev_result || prev_result->has_result() || prev_result->get_result_code() != nuraft::cmd_result_code::OK) + break; + } if (shutdown_called) break; @@ -121,73 +155,9 @@ void KeeperDispatcher::requestThread() if (shutdown_called) break; - /// Forcefully process all previous pending requests - if (!is_current_read && prev_result) - forceWaitAndProcessResult(prev_result); + if (!write_requests.empty()) + process_write_requests(); - if (current_batch) - { - LOG_INFO(&Poco::Logger::get("BATCHING"), "Processing {} batch of {}", is_current_read, current_batch->size()); - if (!is_current_read) - { - prev_result = server->putRequestBatch(*current_batch); - prev_result->when_ready([&, requests_for_sessions = std::move(*current_batch)](nuraft::cmd_result> & result, nuraft::ptr &) mutable - { - if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) - addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); - else if (result.get_result_code() != nuraft::cmd_result_code::OK) - addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); - }); - - current_batch->clear(); - - if (!read_requests.empty()) - { - current_batch = &read_requests; - is_current_read = true; - } - else - current_batch = nullptr; - } - else - { - server->getLeaderInfo()->when_ready([&, requests_for_sessions = std::move(*current_batch)](nuraft::cmd_result> & result, nuraft::ptr &) mutable - { - if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) - addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); - else if (result.get_result_code() != nuraft::cmd_result_code::OK) - addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); - - auto & leader_info_ctx = result.get(); - - KeeperServer::NodeInfo leader_info; - leader_info.term = leader_info_ctx->get_ulong(); - leader_info.last_committed_index = leader_info_ctx->get_ulong(); - - std::lock_guard lock(leader_waiter_mutex); - auto node_info = server->getNodeInfo(); - - if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) - { - auto & leader_waiter = leader_waiters[leader_info]; - leader_waiter.insert(leader_waiter.end(), requests_for_sessions.begin(), requests_for_sessions.end()); - LOG_INFO(log, "waiting for {}, idx {}", leader_info.term, leader_info.last_committed_index); - } - else if (!read_requests_queue.push(std::move(requests_for_sessions))) - throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); - }); - - current_batch->clear(); - - if (!write_requests.empty()) - { - current_batch = &write_requests; - is_current_read = false; - } - else - current_batch = nullptr; - } - } } } catch (...) @@ -324,7 +294,6 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ return false; } - LOG_INFO(&Poco::Logger::get("BATCHING"), "Got request {}", request->getOpNum()); KeeperStorage::RequestForSession request_info; request_info.request = request; using namespace std::chrono; @@ -349,10 +318,8 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ unprocessed_requests.request_queue.push_back(std::move(request_info)); return true; } - else - { - ++unprocessed_requests.unprocessed_num; - } + + ++unprocessed_requests.unprocessed_num; } } @@ -577,11 +544,9 @@ void KeeperDispatcher::addErrorResponses(const KeeperStorage::RequestsForSession void KeeperDispatcher::forceWaitAndProcessResult(RaftAppendResult & result) { - LOG_INFO(&Poco::Logger::get("TESTER"), "Waiting for result"); if (!result->has_result()) result->get(); - LOG_INFO(&Poco::Logger::get("TESTER"), "Got result"); result = nullptr; } diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index b3fc8a11516..ae1164a65e9 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -168,8 +168,7 @@ struct KeeperServer::KeeperRaftServer : public nuraft::raft_server nuraft::ptr req = nuraft::cs_new ( 0ull, nuraft::msg_type::leader_status_request, 0, 0, 0ull, 0ull, 0ull ) ; - auto result = send_msg_to_leader(req); - return result; + return send_msg_to_leader(req); } using nuraft::raft_server::raft_server; From 73000a042d641e8bf02183e8b292d3dad13cd115 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 28 Jun 2022 13:44:30 +0000 Subject: [PATCH 18/74] Don't reconnect manually --- src/Coordination/KeeperDispatcher.cpp | 6 ++++++ .../src/jepsen/clickhouse_keeper/utils.clj | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index edc04c03d81..f8ff21328f7 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -51,9 +51,15 @@ void KeeperDispatcher::requestThread() server->getLeaderInfo()->when_ready([&, requests_for_sessions = std::move(read_requests)](nuraft::cmd_result> & result, nuraft::ptr &) mutable { if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) + { addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); + return; + } else if (result.get_result_code() != nuraft::cmd_result_code::OK) + { addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + return; + } auto & leader_info_ctx = result.get(); diff --git a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/utils.clj b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/utils.clj index 0457ff6eae2..3e192f66fb2 100644 --- a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/utils.clj +++ b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/utils.clj @@ -45,7 +45,7 @@ (defn zk-connect [host port timeout] - (exec-with-retries 30 (fn [] (zk/connect (str host ":" port) :timeout-msec timeout)))) + (zk/connect (str host ":" port) :timeout-msec timeout)) (defn zk-create-range [conn n] From 77144322d7693b643ac2bd09d378ae223b6559c6 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 29 Jun 2022 07:39:39 +0000 Subject: [PATCH 19/74] Add extra checks for read result --- src/Coordination/KeeperDispatcher.cpp | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index f8ff21328f7..65f71d22a95 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -48,7 +48,7 @@ void KeeperDispatcher::requestThread() auto process_read_requests = [&, this]() mutable { - server->getLeaderInfo()->when_ready([&, requests_for_sessions = std::move(read_requests)](nuraft::cmd_result> & result, nuraft::ptr &) mutable + server->getLeaderInfo()->when_ready([&, requests_for_sessions = std::move(read_requests)](nuraft::cmd_result> & result, nuraft::ptr & exception) mutable { if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) { @@ -60,9 +60,21 @@ void KeeperDispatcher::requestThread() addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); return; } + else if (exception) + { + LOG_INFO(&Poco::Logger::get("KeeperDispatcher"), "Got exception while waiting for read results {}", exception->what()); + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + return; + } auto & leader_info_ctx = result.get(); + if (!leader_info_ctx) + { + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + return; + } + KeeperServer::NodeInfo leader_info; leader_info.term = leader_info_ctx->get_ulong(); leader_info.last_committed_index = leader_info_ctx->get_ulong(); From 6fd24b58e764e495957179c47f5fd94c3bc5ea95 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 29 Jun 2022 13:59:25 +0000 Subject: [PATCH 20/74] Revert "Allow only jepsen test" This reverts commit 197ae25aa3ec53ff2affc4e61a42cff438e3f9e3. --- .github/workflows/pull_request.yml | 3348 +++++++++++++++++++++++++++- 1 file changed, 3347 insertions(+), 1 deletion(-) diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index f021554dd7b..79d54d77f06 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -109,11 +109,196 @@ jobs: with: name: changed_images path: ${{ runner.temp }}/changed_images.json + StyleCheck: + needs: DockerHubPush + runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{ runner.temp }}/style_check + EOF + - name: Download changed images + # even if artifact does not exist, e.g. on `do not test` label or failed Docker job + continue-on-error: true + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.TEMP_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Style Check + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 style_check.py + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FastTest: + needs: DockerHubPush + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/fasttest + REPO_COPY=${{runner.temp}}/fasttest/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + EOF + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" + mkdir "$GITHUB_WORKSPACE" + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.TEMP_PATH }} + - name: Fast Test + run: | + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 fast_test_check.py + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + CompatibilityCheck: + needs: [BuilderDebRelease] + runs-on: [self-hosted, style-checker] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/compatibility_check + REPO_COPY=${{runner.temp}}/compatibility_check/ClickHouse + REPORTS_PATH=${{runner.temp}}/reports_dir + EOF + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: CompatibilityCheck + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 compatibility_check.py + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + SplitBuildSmokeTest: + needs: [BuilderDebSplitted] + runs-on: [self-hosted, style-checker] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/split_build_check + REPO_COPY=${{runner.temp}}/split_build_check/ClickHouse + REPORTS_PATH=${{runner.temp}}/reports_dir + EOF + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Split build check + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 split_build_smoke_check.py + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" ######################################################################################### #################################### ORDINARY BUILDS #################################### ######################################################################################### + BuilderDebRelease: + needs: [DockerHubPush, FastTest] + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + BUILD_NAME=package_release + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + with: + fetch-depth: 0 # for performance artifact + - name: Build + run: | + git -C "$GITHUB_WORKSPACE" submodule sync --recursive + git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts + if: ${{ success() || failure() }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" BuilderBinRelease: - needs: [DockerHubPush] + needs: [DockerHubPush, FastTest] runs-on: [self-hosted, builder] steps: - name: Set envs @@ -157,9 +342,3170 @@ jobs: # shellcheck disable=SC2046 docker rm -f $(docker ps -a -q) ||: sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + # BuilderBinGCC: + # needs: [DockerHubPush, FastTest] + # runs-on: [self-hosted, builder] + # steps: + # - name: Set envs + # run: | + # cat >> "$GITHUB_ENV" << 'EOF' + # TEMP_PATH=${{runner.temp}}/build_check + # IMAGES_PATH=${{runner.temp}}/images_path + # REPO_COPY=${{runner.temp}}/build_check/ClickHouse + # CACHES_PATH=${{runner.temp}}/../ccaches + # BUILD_NAME=binary_gcc + # EOF + # - name: Download changed images + # uses: actions/download-artifact@v2 + # with: + # name: changed_images + # path: ${{ runner.temp }}/images_path + # - name: Clear repository + # run: | + # sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + # - name: Check out repository code + # uses: actions/checkout@v2 + # - name: Build + # run: | + # git -C "$GITHUB_WORKSPACE" submodule sync --recursive + # git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + # sudo rm -fr "$TEMP_PATH" + # mkdir -p "$TEMP_PATH" + # cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + # cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + # - name: Upload build URLs to artifacts + # if: ${{ success() || failure() }} + # uses: actions/upload-artifact@v2 + # with: + # name: ${{ env.BUILD_URLS }} + # path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + # - name: Cleanup + # if: always() + # run: | + # # shellcheck disable=SC2046 + # docker kill $(docker ps -q) ||: + # # shellcheck disable=SC2046 + # docker rm -f $(docker ps -a -q) ||: + # sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + BuilderDebAarch64: + needs: [DockerHubPush, FastTest] + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + BUILD_NAME=package_aarch64 + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ runner.temp }}/images_path + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + with: + fetch-depth: 0 # for performance artifact + - name: Build + run: | + git -C "$GITHUB_WORKSPACE" submodule sync --recursive + git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts + if: ${{ success() || failure() }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + BuilderDebAsan: + needs: [DockerHubPush, FastTest] + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + BUILD_NAME=package_asan + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Build + run: | + git -C "$GITHUB_WORKSPACE" submodule sync --recursive + git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts + if: ${{ success() || failure() }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + BuilderDebUBsan: + needs: [DockerHubPush, FastTest] + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + BUILD_NAME=package_ubsan + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Build + run: | + git -C "$GITHUB_WORKSPACE" submodule sync --recursive + git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts + if: ${{ success() || failure() }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + BuilderDebTsan: + needs: [DockerHubPush, FastTest] + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + BUILD_NAME=package_tsan + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Build + run: | + git -C "$GITHUB_WORKSPACE" submodule sync --recursive + git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts + if: ${{ success() || failure() }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + BuilderDebMsan: + needs: [DockerHubPush, FastTest] + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + BUILD_NAME=package_msan + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Build + run: | + git -C "$GITHUB_WORKSPACE" submodule sync --recursive + git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts + if: ${{ success() || failure() }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + BuilderDebDebug: + needs: [DockerHubPush, FastTest] + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + BUILD_NAME=package_debug + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Build + run: | + git -C "$GITHUB_WORKSPACE" submodule sync --recursive + git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts + if: ${{ success() || failure() }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" +########################################################################################## +##################################### SPECIAL BUILDS ##################################### +########################################################################################## + BuilderDebSplitted: + needs: [DockerHubPush, FastTest] + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + BUILD_NAME=binary_splitted + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Build + run: | + git -C "$GITHUB_WORKSPACE" submodule sync --recursive + git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts + if: ${{ success() || failure() }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + BuilderBinTidy: + needs: [DockerHubPush, FastTest] + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + BUILD_NAME=binary_tidy + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Build + run: | + git -C "$GITHUB_WORKSPACE" submodule sync --recursive + git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts + if: ${{ success() || failure() }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + BuilderBinDarwin: + needs: [DockerHubPush, FastTest] + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + BUILD_NAME=binary_darwin + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Build + run: | + git -C "$GITHUB_WORKSPACE" submodule sync --recursive + git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts + if: ${{ success() || failure() }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + BuilderBinAarch64: + needs: [DockerHubPush, FastTest] + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + BUILD_NAME=binary_aarch64 + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Build + run: | + git -C "$GITHUB_WORKSPACE" submodule sync --recursive + git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts + if: ${{ success() || failure() }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + BuilderBinFreeBSD: + needs: [DockerHubPush, FastTest] + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + BUILD_NAME=binary_freebsd + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Build + run: | + git -C "$GITHUB_WORKSPACE" submodule sync --recursive + git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts + if: ${{ success() || failure() }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + BuilderBinDarwinAarch64: + needs: [DockerHubPush, FastTest] + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + BUILD_NAME=binary_darwin_aarch64 + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Build + run: | + git -C "$GITHUB_WORKSPACE" submodule sync --recursive + git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts + if: ${{ success() || failure() }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" + BuilderBinPPC64: + needs: [DockerHubPush, FastTest] + runs-on: [self-hosted, builder] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/build_check + IMAGES_PATH=${{runner.temp}}/images_path + REPO_COPY=${{runner.temp}}/build_check/ClickHouse + CACHES_PATH=${{runner.temp}}/../ccaches + BUILD_NAME=binary_ppc64le + EOF + - name: Download changed images + uses: actions/download-artifact@v2 + with: + name: changed_images + path: ${{ env.IMAGES_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Build + run: | + git -C "$GITHUB_WORKSPACE" submodule sync --recursive + git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10 + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts + if: ${{ success() || failure() }} + uses: actions/upload-artifact@v2 + with: + name: ${{ env.BUILD_URLS }} + path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" "$CACHES_PATH" +############################################################################################ +##################################### Docker images ####################################### +############################################################################################ + DockerServerImages: + needs: + - BuilderDebRelease + - BuilderDebAarch64 + runs-on: [self-hosted, style-checker] + steps: + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + with: + fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself + - name: Check docker clickhouse/clickhouse-server building + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 docker_server.py --release-type head --no-push + python3 docker_server.py --release-type head --no-push --no-ubuntu \ + --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" +############################################################################################ +##################################### BUILD REPORTER ####################################### +############################################################################################ + BuilderReport: + needs: + - BuilderBinRelease + - BuilderDebAarch64 + - BuilderDebAsan + - BuilderDebDebug + - BuilderDebMsan + - BuilderDebRelease + - BuilderDebTsan + - BuilderDebUBsan + runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + CHECK_NAME=ClickHouse build check (actions) + REPORTS_PATH=${{runner.temp}}/reports_dir + TEMP_PATH=${{runner.temp}}/report_check + NEEDS_DATA_PATH=${{runner.temp}}/needs.json + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Report Builder + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cat > "$NEEDS_DATA_PATH" << 'EOF' + ${{ toJSON(needs) }} + EOF + cd "$GITHUB_WORKSPACE/tests/ci" + python3 build_report_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + BuilderSpecialReport: + needs: + - BuilderBinAarch64 + - BuilderBinDarwin + - BuilderBinDarwinAarch64 + - BuilderBinFreeBSD + # - BuilderBinGCC + - BuilderBinPPC64 + - BuilderBinTidy + - BuilderDebSplitted + runs-on: [self-hosted, style-checker] + if: ${{ success() || failure() }} + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/report_check + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=ClickHouse special build check (actions) + NEEDS_DATA_PATH=${{runner.temp}}/needs.json + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Report Builder + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cat > "$NEEDS_DATA_PATH" << 'EOF' + ${{ toJSON(needs) }} + EOF + cd "$GITHUB_WORKSPACE/tests/ci" + python3 build_report_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" +############################################################################################## +########################### FUNCTIONAl STATELESS TESTS ####################################### +############################################################################################## + FunctionalStatelessTestRelease: + needs: [BuilderDebRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_release + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (release, actions) + REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse + KILL_TIMEOUT=10800 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestReleaseDatabaseReplicated0: + needs: [BuilderDebRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_database_replicated + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (release, DatabaseReplicated, actions) + REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse + KILL_TIMEOUT=10800 + RUN_BY_HASH_NUM=0 + RUN_BY_HASH_TOTAL=2 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestReleaseDatabaseReplicated1: + needs: [BuilderDebRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_database_replicated + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (release, DatabaseReplicated, actions) + REPO_COPY=${{runner.temp}}/stateless_database_replicated/ClickHouse + KILL_TIMEOUT=10800 + RUN_BY_HASH_NUM=1 + RUN_BY_HASH_TOTAL=2 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestReleaseWideParts: + needs: [BuilderDebRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_wide_parts + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (release, wide parts enabled, actions) + REPO_COPY=${{runner.temp}}/stateless_wide_parts/ClickHouse + KILL_TIMEOUT=10800 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestReleaseS3: + needs: [BuilderDebRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_s3_storage + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (release, s3 storage, actions) + REPO_COPY=${{runner.temp}}/stateless_s3_storage/ClickHouse + KILL_TIMEOUT=10800 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestAarch64: + needs: [BuilderDebAarch64] + runs-on: [self-hosted, func-tester-aarch64] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_release + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (aarch64, actions) + REPO_COPY=${{runner.temp}}/stateless_release/ClickHouse + KILL_TIMEOUT=10800 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestAsan0: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_debug + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (address, actions) + REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse + KILL_TIMEOUT=10800 + RUN_BY_HASH_NUM=0 + RUN_BY_HASH_TOTAL=2 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestAsan1: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_debug + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (address, actions) + REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse + KILL_TIMEOUT=10800 + RUN_BY_HASH_NUM=1 + RUN_BY_HASH_TOTAL=2 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestTsan0: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_tsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (thread, actions) + REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse + KILL_TIMEOUT=10800 + RUN_BY_HASH_NUM=0 + RUN_BY_HASH_TOTAL=3 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestTsan1: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_tsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (thread, actions) + REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse + KILL_TIMEOUT=10800 + RUN_BY_HASH_NUM=1 + RUN_BY_HASH_TOTAL=3 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestTsan2: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_tsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (thread, actions) + REPO_COPY=${{runner.temp}}/stateless_tsan/ClickHouse + KILL_TIMEOUT=10800 + RUN_BY_HASH_NUM=2 + RUN_BY_HASH_TOTAL=3 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_ubsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (ubsan, actions) + REPO_COPY=${{runner.temp}}/stateless_ubsan/ClickHouse + KILL_TIMEOUT=10800 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestMsan0: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_memory + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (memory, actions) + REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse + KILL_TIMEOUT=10800 + RUN_BY_HASH_NUM=0 + RUN_BY_HASH_TOTAL=3 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestMsan1: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_memory + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (memory, actions) + REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse + KILL_TIMEOUT=10800 + RUN_BY_HASH_NUM=1 + RUN_BY_HASH_TOTAL=3 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestMsan2: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_memory + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (memory, actions) + REPO_COPY=${{runner.temp}}/stateless_memory/ClickHouse + KILL_TIMEOUT=10800 + RUN_BY_HASH_NUM=2 + RUN_BY_HASH_TOTAL=3 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestDebug0: + needs: [BuilderDebDebug] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_debug + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (debug, actions) + REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse + KILL_TIMEOUT=10800 + RUN_BY_HASH_NUM=0 + RUN_BY_HASH_TOTAL=3 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestDebug1: + needs: [BuilderDebDebug] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_debug + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (debug, actions) + REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse + KILL_TIMEOUT=10800 + RUN_BY_HASH_NUM=1 + RUN_BY_HASH_TOTAL=3 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestDebug2: + needs: [BuilderDebDebug] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_debug + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests (debug, actions) + REPO_COPY=${{runner.temp}}/stateless_debug/ClickHouse + KILL_TIMEOUT=10800 + RUN_BY_HASH_NUM=2 + RUN_BY_HASH_TOTAL=3 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatelessTestFlakyCheck: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateless_flaky_asan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateless tests flaky check (address, actions) + REPO_COPY=${{runner.temp}}/stateless_flaky_asan/ClickHouse + KILL_TIMEOUT=3600 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + TestsBugfixCheck: + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/tests_bugfix_check + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Tests bugfix validate check (actions) + KILL_TIMEOUT=3600 + REPO_COPY=${{runner.temp}}/tests_bugfix_check/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Bugfix test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + + TEMP_PATH="${TEMP_PATH}/integration" \ + REPORTS_PATH="${REPORTS_PATH}/integration" \ + python3 integration_test_check.py "Integration tests bugfix validate check" \ + --validate-bugfix --post-commit-status=file || echo 'ignore exit code' + + TEMP_PATH="${TEMP_PATH}/stateless" \ + REPORTS_PATH="${REPORTS_PATH}/stateless" \ + python3 functional_test_check.py "Stateless tests bugfix validate check" "$KILL_TIMEOUT" \ + --validate-bugfix --post-commit-status=file || echo 'ignore exit code' + + python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/post_commit_status.tsv" "${TEMP_PATH}/integration/post_commit_status.tsv" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" +############################################################################################## +############################ FUNCTIONAl STATEFUL TESTS ####################################### +############################################################################################## + FunctionalStatefulTestRelease: + needs: [BuilderDebRelease] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateful_release + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateful tests (release, actions) + REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse + KILL_TIMEOUT=3600 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatefulTestAarch64: + needs: [BuilderDebAarch64] + runs-on: [self-hosted, func-tester-aarch64] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateful_release + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateful tests (aarch64, actions) + REPO_COPY=${{runner.temp}}/stateful_release/ClickHouse + KILL_TIMEOUT=3600 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatefulTestAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateful_debug + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateful tests (address, actions) + REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse + KILL_TIMEOUT=3600 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatefulTestTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateful_tsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateful tests (thread, actions) + REPO_COPY=${{runner.temp}}/stateful_tsan/ClickHouse + KILL_TIMEOUT=3600 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatefulTestMsan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateful_msan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateful tests (memory, actions) + REPO_COPY=${{runner.temp}}/stateful_msan/ClickHouse + KILL_TIMEOUT=3600 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatefulTestUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateful_ubsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateful tests (ubsan, actions) + REPO_COPY=${{runner.temp}}/stateful_ubsan/ClickHouse + KILL_TIMEOUT=3600 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + FunctionalStatefulTestDebug: + needs: [BuilderDebDebug] + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stateful_debug + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stateful tests (debug, actions) + REPO_COPY=${{runner.temp}}/stateful_debug/ClickHouse + KILL_TIMEOUT=3600 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Functional test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 functional_test_check.py "$CHECK_NAME" "$KILL_TIMEOUT" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" +############################################################################################## +######################################### STRESS TESTS ####################################### +############################################################################################## + StressTestAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stress_thread + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stress test (address, actions) + REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + StressTestTsan: + needs: [BuilderDebTsan] + # func testers have 16 cores + 128 GB memory + # while stress testers have 36 cores + 72 memory + # It would be better to have something like 32 + 128, + # but such servers almost unavailable as spot instances. + runs-on: [self-hosted, func-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stress_thread + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stress test (thread, actions) + REPO_COPY=${{runner.temp}}/stress_thread/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + StressTestMsan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stress_memory + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stress test (memory, actions) + REPO_COPY=${{runner.temp}}/stress_memory/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + StressTestUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stress_undefined + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stress test (undefined, actions) + REPO_COPY=${{runner.temp}}/stress_undefined/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + StressTestDebug: + needs: [BuilderDebDebug] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/stress_debug + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Stress test (debug, actions) + REPO_COPY=${{runner.temp}}/stress_debug/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Stress test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 stress_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" +############################################################################################## +##################################### AST FUZZERS ############################################ +############################################################################################## + ASTFuzzerTestAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, fuzzer-unit-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/ast_fuzzer_asan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=AST fuzzer (ASan, actions) + REPO_COPY=${{runner.temp}}/ast_fuzzer_asan/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + ASTFuzzerTestTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, fuzzer-unit-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/ast_fuzzer_tsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=AST fuzzer (TSan, actions) + REPO_COPY=${{runner.temp}}/ast_fuzzer_tsan/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + ASTFuzzerTestUBSan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, fuzzer-unit-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/ast_fuzzer_ubsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=AST fuzzer (UBSan, actions) + REPO_COPY=${{runner.temp}}/ast_fuzzer_ubsan/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + ASTFuzzerTestMSan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, fuzzer-unit-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/ast_fuzzer_msan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=AST fuzzer (MSan, actions) + REPO_COPY=${{runner.temp}}/ast_fuzzer_msan/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + ASTFuzzerTestDebug: + needs: [BuilderDebDebug] + runs-on: [self-hosted, fuzzer-unit-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/ast_fuzzer_debug + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=AST fuzzer (debug, actions) + REPO_COPY=${{runner.temp}}/ast_fuzzer_debug/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Fuzzer + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 ast_fuzzer_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" +############################################################################################# +############################# INTEGRATION TESTS ############################################# +############################################################################################# + IntegrationTestsAsan0: + needs: [BuilderDebAsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/integration_tests_asan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Integration tests (asan, actions) + REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse + RUN_BY_HASH_NUM=0 + RUN_BY_HASH_TOTAL=3 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + IntegrationTestsAsan1: + needs: [BuilderDebAsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/integration_tests_asan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Integration tests (asan, actions) + REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse + RUN_BY_HASH_NUM=1 + RUN_BY_HASH_TOTAL=3 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + IntegrationTestsAsan2: + needs: [BuilderDebAsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/integration_tests_asan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Integration tests (asan, actions) + REPO_COPY=${{runner.temp}}/integration_tests_asan/ClickHouse + RUN_BY_HASH_NUM=2 + RUN_BY_HASH_TOTAL=3 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + IntegrationTestsTsan0: + needs: [BuilderDebTsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/integration_tests_tsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Integration tests (thread, actions) + REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse + RUN_BY_HASH_NUM=0 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + IntegrationTestsTsan1: + needs: [BuilderDebTsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/integration_tests_tsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Integration tests (thread, actions) + REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse + RUN_BY_HASH_NUM=1 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + IntegrationTestsTsan2: + needs: [BuilderDebTsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/integration_tests_tsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Integration tests (thread, actions) + REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse + RUN_BY_HASH_NUM=2 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + IntegrationTestsTsan3: + needs: [BuilderDebTsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/integration_tests_tsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Integration tests (thread, actions) + REPO_COPY=${{runner.temp}}/integration_tests_tsan/ClickHouse + RUN_BY_HASH_NUM=3 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + IntegrationTestsRelease0: + needs: [BuilderDebRelease] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/integration_tests_release + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Integration tests (release, actions) + REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse + RUN_BY_HASH_NUM=0 + RUN_BY_HASH_TOTAL=2 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + IntegrationTestsRelease1: + needs: [BuilderDebRelease] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/integration_tests_release + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Integration tests (release, actions) + REPO_COPY=${{runner.temp}}/integration_tests_release/ClickHouse + RUN_BY_HASH_NUM=1 + RUN_BY_HASH_TOTAL=2 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + IntegrationTestsFlakyCheck: + needs: [BuilderDebAsan] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/integration_tests_asan_flaky_check + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Integration tests flaky check (asan, actions) + REPO_COPY=${{runner.temp}}/integration_tests_asan_flaky_check/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Integration test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 integration_test_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" +############################################################################################# +#################################### UNIT TESTS ############################################# +############################################################################################# + UnitTestsAsan: + needs: [BuilderDebAsan] + runs-on: [self-hosted, fuzzer-unit-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/unit_tests_asan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Unit tests (asan, actions) + REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + UnitTestsReleaseClang: + needs: [BuilderBinRelease] + runs-on: [self-hosted, fuzzer-unit-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/unit_tests_asan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Unit tests (release-clang, actions) + REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + # UnitTestsReleaseGCC: + # needs: [BuilderBinGCC] + # runs-on: [self-hosted, fuzzer-unit-tester] + # steps: + # - name: Set envs + # run: | + # cat >> "$GITHUB_ENV" << 'EOF' + # TEMP_PATH=${{runner.temp}}/unit_tests_asan + # REPORTS_PATH=${{runner.temp}}/reports_dir + # CHECK_NAME=Unit tests (release-gcc, actions) + # REPO_COPY=${{runner.temp}}/unit_tests_asan/ClickHouse + # EOF + # - name: Download json reports + # uses: actions/download-artifact@v2 + # with: + # path: ${{ env.REPORTS_PATH }} + # - name: Clear repository + # run: | + # sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + # - name: Check out repository code + # uses: actions/checkout@v2 + # - name: Unit test + # run: | + # sudo rm -fr "$TEMP_PATH" + # mkdir -p "$TEMP_PATH" + # cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + # cd "$REPO_COPY/tests/ci" + # python3 unit_tests_check.py "$CHECK_NAME" + # - name: Cleanup + # if: always() + # run: | + # # shellcheck disable=SC2046 + # docker kill $(docker ps -q) ||: + # # shellcheck disable=SC2046 + # docker rm -f $(docker ps -a -q) ||: + # sudo rm -fr "$TEMP_PATH" + UnitTestsTsan: + needs: [BuilderDebTsan] + runs-on: [self-hosted, fuzzer-unit-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/unit_tests_tsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Unit tests (tsan, actions) + REPO_COPY=${{runner.temp}}/unit_tests_tsan/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + UnitTestsMsan: + needs: [BuilderDebMsan] + runs-on: [self-hosted, fuzzer-unit-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/unit_tests_msan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Unit tests (msan, actions) + REPO_COPY=${{runner.temp}}/unit_tests_msan/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + UnitTestsUBsan: + needs: [BuilderDebUBsan] + runs-on: [self-hosted, fuzzer-unit-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/unit_tests_ubsan + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Unit tests (ubsan, actions) + REPO_COPY=${{runner.temp}}/unit_tests_ubsan/ClickHouse + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Unit test + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 unit_tests_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" +############################################################################################# +#################################### PERFORMANCE TESTS ###################################### +############################################################################################# + PerformanceComparison0: + needs: [BuilderDebRelease] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/performance_comparison + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Performance Comparison + REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse + RUN_BY_HASH_NUM=0 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Performance Comparison + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + PerformanceComparison1: + needs: [BuilderDebRelease] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/performance_comparison + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Performance Comparison + REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse + RUN_BY_HASH_NUM=1 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Performance Comparison + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + PerformanceComparison2: + needs: [BuilderDebRelease] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/performance_comparison + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Performance Comparison + REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse + RUN_BY_HASH_NUM=2 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Performance Comparison + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + PerformanceComparison3: + needs: [BuilderDebRelease] + runs-on: [self-hosted, stress-tester] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/performance_comparison + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Performance Comparison + REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse + RUN_BY_HASH_NUM=3 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Performance Comparison + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + PerformanceComparisonAarch0: + needs: [BuilderDebAarch64] + runs-on: [self-hosted, func-tester-aarch64] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/performance_comparison + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Performance Comparison Aarch64 + REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse + RUN_BY_HASH_NUM=0 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Performance Comparison + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + PerformanceComparisonAarch1: + needs: [BuilderDebAarch64] + runs-on: [self-hosted, func-tester-aarch64] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/performance_comparison + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Performance Comparison Aarch64 + REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse + RUN_BY_HASH_NUM=1 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Performance Comparison + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + PerformanceComparisonAarch2: + needs: [BuilderDebAarch64] + runs-on: [self-hosted, func-tester-aarch64] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/performance_comparison + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Performance Comparison Aarch64 + REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse + RUN_BY_HASH_NUM=2 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Performance Comparison + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" + PerformanceComparisonAarch3: + needs: [BuilderDebAarch64] + runs-on: [self-hosted, func-tester-aarch64] + steps: + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/performance_comparison + REPORTS_PATH=${{runner.temp}}/reports_dir + CHECK_NAME=Performance Comparison Aarch64 + REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse + RUN_BY_HASH_NUM=3 + RUN_BY_HASH_TOTAL=4 + EOF + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Performance Comparison + run: | + sudo rm -fr "$TEMP_PATH" + mkdir -p "$TEMP_PATH" + cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH" + cd "$REPO_COPY/tests/ci" + python3 performance_comparison_check.py "$CHECK_NAME" + - name: Cleanup + if: always() + run: | + # shellcheck disable=SC2046 + docker kill $(docker ps -q) ||: + # shellcheck disable=SC2046 + docker rm -f $(docker ps -a -q) ||: + sudo rm -fr "$TEMP_PATH" ############################################################################################# ###################################### JEPSEN TESTS ######################################### ############################################################################################# Jepsen: needs: [BuilderBinRelease] uses: ./.github/workflows/jepsen.yml + + FinishCheck: + needs: + - StyleCheck + - DockerHubPush + - DockerServerImages + - CheckLabels + - BuilderReport + - FastTest + - FunctionalStatelessTestDebug0 + - FunctionalStatelessTestDebug1 + - FunctionalStatelessTestDebug2 + - FunctionalStatelessTestRelease + - FunctionalStatelessTestReleaseDatabaseReplicated0 + - FunctionalStatelessTestReleaseDatabaseReplicated1 + - FunctionalStatelessTestReleaseWideParts + - FunctionalStatelessTestAarch64 + - FunctionalStatelessTestAsan0 + - FunctionalStatelessTestAsan1 + - FunctionalStatelessTestTsan0 + - FunctionalStatelessTestTsan1 + - FunctionalStatelessTestTsan2 + - FunctionalStatelessTestMsan0 + - FunctionalStatelessTestMsan1 + - FunctionalStatelessTestMsan2 + - FunctionalStatelessTestUBsan + - FunctionalStatefulTestDebug + - FunctionalStatefulTestRelease + - FunctionalStatefulTestAarch64 + - FunctionalStatefulTestAsan + - FunctionalStatefulTestTsan + - FunctionalStatefulTestMsan + - FunctionalStatefulTestUBsan + - FunctionalStatelessTestReleaseS3 + - StressTestDebug + - StressTestAsan + - StressTestTsan + - StressTestMsan + - StressTestUBsan + - ASTFuzzerTestDebug + - ASTFuzzerTestAsan + - ASTFuzzerTestTsan + - ASTFuzzerTestMSan + - ASTFuzzerTestUBSan + - IntegrationTestsAsan0 + - IntegrationTestsAsan1 + - IntegrationTestsAsan2 + - IntegrationTestsRelease0 + - IntegrationTestsRelease1 + - IntegrationTestsTsan0 + - IntegrationTestsTsan1 + - IntegrationTestsTsan2 + - IntegrationTestsTsan3 + - PerformanceComparison0 + - PerformanceComparison1 + - PerformanceComparison2 + - PerformanceComparison3 + - PerformanceComparisonAarch0 + - PerformanceComparisonAarch1 + - PerformanceComparisonAarch2 + - PerformanceComparisonAarch3 + - UnitTestsAsan + - UnitTestsTsan + - UnitTestsMsan + - UnitTestsUBsan + - UnitTestsReleaseClang + - SplitBuildSmokeTest + - CompatibilityCheck + - IntegrationTestsFlakyCheck + - Jepsen + runs-on: [self-hosted, style-checker] + steps: + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Finish label + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 finish_check.py From ae0fe1ac85426e95befbd4fcc050eafdedf90706 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 1 Jul 2022 09:15:25 +0000 Subject: [PATCH 21/74] Add setting for read mode --- src/Coordination/CoordinationSettings.h | 5 +- src/Coordination/KeeperDispatcher.cpp | 113 ++++++++++-------- src/Coordination/KeeperServer.cpp | 13 ++ .../resources/keeper_config.xml | 1 + 4 files changed, 78 insertions(+), 54 deletions(-) diff --git a/src/Coordination/CoordinationSettings.h b/src/Coordination/CoordinationSettings.h index c1e41f46670..81edd86f671 100644 --- a/src/Coordination/CoordinationSettings.h +++ b/src/Coordination/CoordinationSettings.h @@ -26,7 +26,7 @@ struct Settings; M(Milliseconds, heart_beat_interval_ms, 500, "Heartbeat interval between quorum nodes", 0) \ M(Milliseconds, election_timeout_lower_bound_ms, 1000, "Lower bound of election timer (avoid too often leader elections)", 0) \ M(Milliseconds, election_timeout_upper_bound_ms, 2000, "Upper bound of election timer (avoid too often leader elections)", 0) \ - M(Milliseconds, leadership_expiry, 10000, "How often will leader node check if it still has majority. Set it lower or equal to election_timeout_lower_bound_ms to have linearizable reads.", 0) \ + M(Milliseconds, leadership_expiry, 0, "How often will leader node check if it still has majority. Set it lower or equal to election_timeout_lower_bound_ms to have linearizable reads.", 0) \ M(UInt64, reserved_log_items, 100000, "How many log items to store (don't remove during compaction)", 0) \ M(UInt64, snapshot_distance, 100000, "How many log items we have to collect to write new snapshot", 0) \ M(Bool, auto_forwarding, true, "Allow to forward write requests from followers to leader", 0) \ @@ -42,7 +42,8 @@ struct Settings; M(Bool, force_sync, true, "Call fsync on each change in RAFT changelog", 0) \ M(Bool, compress_logs, true, "Write compressed coordination logs in ZSTD format", 0) \ M(Bool, compress_snapshots_with_zstd_format, true, "Write compressed snapshots in ZSTD format (instead of custom LZ4)", 0) \ - M(UInt64, configuration_change_tries_count, 20, "How many times we will try to apply configuration change (add/remove server) to the cluster", 0) + M(UInt64, configuration_change_tries_count, 20, "How many times we will try to apply configuration change (add/remove server) to the cluster", 0) \ + M(String, read_mode, "nonlinear", "How should reads be processed", 0) DECLARE_SETTINGS_TRAITS(CoordinationSettingsTraits, LIST_OF_COORDINATION_SETTINGS) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 65f71d22a95..a31eb9195a2 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -40,75 +40,84 @@ void KeeperDispatcher::requestThread() const auto needs_quorum = [](const auto & coordination_settings, const auto & request) { - return coordination_settings->quorum_reads || !request.request->isReadRequest(); + return coordination_settings->quorum_reads || coordination_settings->read_mode.toString() == "quorum" || !request.request->isReadRequest(); }; - KeeperStorage::RequestsForSessions write_requests; + KeeperStorage::RequestsForSessions quorum_requests; KeeperStorage::RequestsForSessions read_requests; - auto process_read_requests = [&, this]() mutable + auto process_read_requests = [&, this](const auto & coordination_settings) mutable { - server->getLeaderInfo()->when_ready([&, requests_for_sessions = std::move(read_requests)](nuraft::cmd_result> & result, nuraft::ptr & exception) mutable + if (coordination_settings->read_mode.toString() == "fastlinear") { - if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) + server->getLeaderInfo()->when_ready([&, requests_for_sessions = std::move(read_requests)](nuraft::cmd_result> & result, nuraft::ptr & exception) mutable { - addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); - return; - } - else if (result.get_result_code() != nuraft::cmd_result_code::OK) - { - addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); - return; - } - else if (exception) - { - LOG_INFO(&Poco::Logger::get("KeeperDispatcher"), "Got exception while waiting for read results {}", exception->what()); - addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); - return; - } + if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) + { + addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); + return; + } + else if (result.get_result_code() != nuraft::cmd_result_code::OK) + { + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + return; + } + else if (exception) + { + LOG_INFO(&Poco::Logger::get("KeeperDispatcher"), "Got exception while waiting for read results {}", exception->what()); + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + return; + } - auto & leader_info_ctx = result.get(); + auto & leader_info_ctx = result.get(); - if (!leader_info_ctx) - { - addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); - return; - } + if (!leader_info_ctx) + { + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + return; + } - KeeperServer::NodeInfo leader_info; - leader_info.term = leader_info_ctx->get_ulong(); - leader_info.last_committed_index = leader_info_ctx->get_ulong(); + KeeperServer::NodeInfo leader_info; + leader_info.term = leader_info_ctx->get_ulong(); + leader_info.last_committed_index = leader_info_ctx->get_ulong(); + std::lock_guard lock(leader_waiter_mutex); + auto node_info = server->getNodeInfo(); - std::lock_guard lock(leader_waiter_mutex); - auto node_info = server->getNodeInfo(); - - if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) - { - auto & leader_waiter = leader_waiters[leader_info]; - leader_waiter.insert(leader_waiter.end(), requests_for_sessions.begin(), requests_for_sessions.end()); - LOG_INFO(log, "waiting for {}, idx {}", leader_info.term, leader_info.last_committed_index); - } - else if (!read_requests_queue.push(std::move(requests_for_sessions))) + if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) + { + auto & leader_waiter = leader_waiters[leader_info]; + leader_waiter.insert(leader_waiter.end(), requests_for_sessions.begin(), requests_for_sessions.end()); + LOG_INFO(log, "waiting for {}, idx {}", leader_info.term, leader_info.last_committed_index); + } + else if (!read_requests_queue.push(std::move(requests_for_sessions))) + throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); + }); + } + else + { + assert(coordination_settings->read_mode.toString() == "nonlinear"); + if (!read_requests_queue.push(std::move(read_requests))) throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); - }); + } + read_requests.clear(); }; - auto process_write_requests = [&, this]() mutable + auto process_quorum_requests = [&, this]() mutable { /// Forcefully process all previous pending requests if (prev_result) forceWaitAndProcessResult(prev_result); - prev_result = server->putRequestBatch(write_requests); - prev_result->when_ready([&, requests_for_sessions = std::move(write_requests)](nuraft::cmd_result> & result, nuraft::ptr &) mutable + prev_result = server->putRequestBatch(quorum_requests); + prev_result->when_ready([&, requests_for_sessions = std::move(quorum_requests)](nuraft::cmd_result> & result, nuraft::ptr &) mutable { if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); else if (result.get_result_code() != nuraft::cmd_result_code::OK) addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); }); - write_requests.clear(); + quorum_requests.clear(); }; while (!shutdown_called) @@ -128,7 +137,7 @@ void KeeperDispatcher::requestThread() break; if (needs_quorum(coordination_settings, request)) - write_requests.emplace_back(request); + quorum_requests.emplace_back(request); else read_requests.emplace_back(request); @@ -137,15 +146,15 @@ void KeeperDispatcher::requestThread() /// Sometimes NuRaft set errorcode without setting result, so we check both here. while (true) { - if (write_requests.size() > max_batch_size) + if (quorum_requests.size() > max_batch_size) { - process_write_requests(); + process_quorum_requests(); break; } if (read_requests.size() > max_batch_size) { - process_read_requests(); + process_read_requests(coordination_settings); break; } @@ -153,14 +162,14 @@ void KeeperDispatcher::requestThread() if (requests_queue->tryPop(request, 1)) { if (needs_quorum(coordination_settings, request)) - write_requests.emplace_back(request); + quorum_requests.emplace_back(request); else read_requests.emplace_back(request); } - else + else { if (!read_requests.empty()) - process_read_requests(); + process_read_requests(coordination_settings); if (!prev_result || prev_result->has_result() || prev_result->get_result_code() != nuraft::cmd_result_code::OK) break; @@ -173,8 +182,8 @@ void KeeperDispatcher::requestThread() if (shutdown_called) break; - if (!write_requests.empty()) - process_write_requests(); + if (!quorum_requests.empty()) + process_quorum_requests(); } } diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index dfea73de67b..3facf9df9e5 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -253,7 +253,20 @@ void KeeperServer::launchRaftServer(bool enable_ipv6) coordination_settings->election_timeout_lower_bound_ms.totalMilliseconds(), "election_timeout_lower_bound_ms", log); params.election_timeout_upper_bound_ = getValueOrMaxInt32AndLogWarning( coordination_settings->election_timeout_upper_bound_ms.totalMilliseconds(), "election_timeout_upper_bound_ms", log); + params.leadership_expiry_ = getValueOrMaxInt32AndLogWarning(coordination_settings->leadership_expiry.totalMilliseconds(), "leadership_expiry", log); + + if (coordination_settings->read_mode.toString() == "fastlinear") + { + if (params.leadership_expiry_ == 0) + params.leadership_expiry_ = params.election_timeout_lower_bound_; + else if (params.leadership_expiry_ > params.election_timeout_lower_bound_) + { + LOG_WARNING(log, "To use fast linearizable reads, leadership_expiry should be set to a value thath is less or equal to the election_timeout_upper_bound_ms. " + "Based on current settings, there are no guarantees for linearizability of reads."); + } + } + params.reserved_log_items_ = getValueOrMaxInt32AndLogWarning(coordination_settings->reserved_log_items, "reserved_log_items", log); params.snapshot_distance_ = getValueOrMaxInt32AndLogWarning(coordination_settings->snapshot_distance, "snapshot_distance", log); diff --git a/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml b/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml index db857693a7b..86a3da08bf9 100644 --- a/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml +++ b/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml @@ -23,6 +23,7 @@ 2000 4000 0 + fastlinear {snapshot_distance} {stale_log_gap} {reserved_log_items} From 5a97c0fc8bad77547f43429fc08bd9c1d9d660aa Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 1 Jul 2022 13:19:28 +0000 Subject: [PATCH 22/74] Finalize requests in separate thread --- src/Coordination/KeeperDispatcher.cpp | 45 ++++++++++++++++++++++----- src/Coordination/KeeperDispatcher.h | 4 +++ 2 files changed, 42 insertions(+), 7 deletions(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index a31eb9195a2..aebd58cc83f 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -25,6 +26,7 @@ namespace ErrorCodes KeeperDispatcher::KeeperDispatcher() : responses_queue(std::numeric_limits::max()) , read_requests_queue(std::numeric_limits::max()) + , finalize_requests_queue(std::numeric_limits::max()) , configuration_and_settings(std::make_shared()) , log(&Poco::Logger::get("KeeperDispatcher")) { @@ -37,6 +39,7 @@ void KeeperDispatcher::requestThread() /// Result of requests batch from previous iteration RaftAppendResult prev_result = nullptr; + const auto previous_quorum_done = [&] { return !prev_result || prev_result->has_result() || prev_result->get_result_code() != nuraft::cmd_result_code::OK; }; const auto needs_quorum = [](const auto & coordination_settings, const auto & request) { @@ -132,7 +135,6 @@ void KeeperDispatcher::requestThread() { if (requests_queue->tryPop(request, max_wait)) { - if (shutdown_called) break; @@ -147,15 +149,14 @@ void KeeperDispatcher::requestThread() while (true) { if (quorum_requests.size() > max_batch_size) - { - process_quorum_requests(); break; - } if (read_requests.size() > max_batch_size) { process_read_requests(coordination_settings); - break; + + if (previous_quorum_done()) + break; } /// Trying to get batch requests as fast as possible @@ -171,7 +172,7 @@ void KeeperDispatcher::requestThread() if (!read_requests.empty()) process_read_requests(coordination_settings); - if (!prev_result || prev_result->has_result() || prev_result->get_result_code() != nuraft::cmd_result_code::OK) + if (previous_quorum_done()) break; } @@ -265,6 +266,30 @@ void KeeperDispatcher::readRequestThread() addErrorResponses({request_info}, Coordination::Error::ZCONNECTIONLOSS); } + if (!finalize_requests_queue.push(std::move(requests))) + throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } + } +} + +void KeeperDispatcher::finalizeRequestsThread() +{ + setThreadName("KeeperFinalT"); + while (!shutdown_called) + { + KeeperStorage::RequestsForSessions requests; + if (!finalize_requests_queue.pop(requests)) + break; + + if (shutdown_called) + break; + + try + { finalizeRequests(requests); } catch (...) @@ -379,6 +404,7 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf responses_thread = ThreadFromGlobalPool([this] { responseThread(); }); snapshot_thread = ThreadFromGlobalPool([this] { snapshotThread(); }); read_request_thread = ThreadFromGlobalPool([this] { readRequestThread(); }); + finalize_requests_thread= ThreadFromGlobalPool([this] { finalizeRequestsThread(); }); server = std::make_unique(configuration_and_settings, config, responses_queue, snapshots_queue, [this](const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx) { onRequestCommit(request_for_session, log_term, log_idx); }); @@ -448,6 +474,10 @@ void KeeperDispatcher::shutdown() if (read_request_thread.joinable()) read_request_thread.join(); + finalize_requests_queue.finish(); + if (finalize_requests_thread.joinable()) + finalize_requests_thread.join(); + update_configuration_queue.finish(); if (update_configuration_thread.joinable()) update_configuration_thread.join(); @@ -748,7 +778,8 @@ void KeeperDispatcher::finalizeRequests(const KeeperStorage::RequestsForSessions void KeeperDispatcher::onRequestCommit(const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx) { - finalizeRequests({request_for_session}); + if (!finalize_requests_queue.push({request_for_session})) + throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); KeeperStorage::RequestsForSessions requests; { diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index 50b842d73b4..b2c88d7ef30 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -36,6 +36,7 @@ private: ResponsesQueue responses_queue; SnapshotsQueue snapshots_queue{1}; ConcurrentBoundedQueue read_requests_queue; + ConcurrentBoundedQueue finalize_requests_queue; /// More than 1k updates is definitely misconfiguration. UpdateConfigurationQueue update_configuration_queue{1000}; @@ -66,6 +67,7 @@ private: /// Apply or wait for configuration changes ThreadFromGlobalPool update_configuration_thread; ThreadFromGlobalPool read_request_thread; + ThreadFromGlobalPool finalize_requests_thread; /// RAFT wrapper. std::unique_ptr server; @@ -107,6 +109,8 @@ private: void readRequestThread(); + void finalizeRequestsThread(); + void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response); /// Add error responses for requests to responses queue. From 97e32e6ad9ce874576845802d9de58cb4d9db5b5 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 1 Jul 2022 14:55:26 +0000 Subject: [PATCH 23/74] Use commit_ext in unit tests --- src/Coordination/tests/gtest_coordination.cpp | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/Coordination/tests/gtest_coordination.cpp b/src/Coordination/tests/gtest_coordination.cpp index ee75f2a0860..bf054ac5de3 100644 --- a/src/Coordination/tests/gtest_coordination.cpp +++ b/src/Coordination/tests/gtest_coordination.cpp @@ -1312,8 +1312,9 @@ void testLogAndStateMachine(Coordination::CoordinationSettingsPtr settings, uint changelog.append(entry); changelog.end_of_append_batch(0, 0); - state_machine->pre_commit(i, changelog.entry_at(i)->get_buf()); - state_machine->commit(i, changelog.entry_at(i)->get_buf()); + auto entry_buf = changelog.entry_at(i)->get_buf_ptr(); + state_machine->pre_commit(i, *entry_buf); + state_machine->commit_ext(nuraft::state_machine::ext_op_params{i, entry_buf}); bool snapshot_created = false; if (i % settings->snapshot_distance == 0) { @@ -1357,8 +1358,9 @@ void testLogAndStateMachine(Coordination::CoordinationSettingsPtr settings, uint for (size_t i = restore_machine->last_commit_index() + 1; i < restore_changelog.next_slot(); ++i) { - restore_machine->pre_commit(i, changelog.entry_at(i)->get_buf()); - restore_machine->commit(i, changelog.entry_at(i)->get_buf()); + auto entry = changelog.entry_at(i)->get_buf_ptr(); + restore_machine->pre_commit(i, *entry); + restore_machine->commit_ext(nuraft::state_machine::ext_op_params{i, entry}); } auto & source_storage = state_machine->getStorage(); @@ -1459,18 +1461,18 @@ TEST_P(CoordinationTest, TestEphemeralNodeRemove) std::shared_ptr request_c = std::make_shared(); request_c->path = "/hello"; request_c->is_ephemeral = true; - auto entry_c = getLogEntryFromZKRequest(0, 1, state_machine->getNextZxid(), request_c); - state_machine->pre_commit(1, entry_c->get_buf()); - state_machine->commit(1, entry_c->get_buf()); + auto entry_c = getLogEntryFromZKRequest(0, 1, state_machine->getNextZxid(), request_c)->get_buf_ptr(); + state_machine->pre_commit(1, *entry_c); + state_machine->commit_ext(nuraft::state_machine::ext_op_params{1, entry_c}); const auto & storage = state_machine->getStorage(); EXPECT_EQ(storage.ephemerals.size(), 1); std::shared_ptr request_d = std::make_shared(); request_d->path = "/hello"; /// Delete from other session - auto entry_d = getLogEntryFromZKRequest(0, 2, state_machine->getNextZxid(), request_d); - state_machine->pre_commit(2, entry_d->get_buf()); - state_machine->commit(2, entry_d->get_buf()); + auto entry_d = getLogEntryFromZKRequest(0, 2, state_machine->getNextZxid(), request_d)->get_buf_ptr(); + state_machine->pre_commit(2, *entry_d); + state_machine->commit_ext(nuraft::state_machine::ext_op_params{2, entry_d}); EXPECT_EQ(storage.ephemerals.size(), 0); } From 8c776328812cafe32b61caafd0ad66929024e5ba Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Fri, 1 Jul 2022 14:58:19 +0000 Subject: [PATCH 24/74] Fix style --- src/Coordination/KeeperServer.cpp | 5 ++--- src/Coordination/KeeperServer.h | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index 3facf9df9e5..d20b8c78f5e 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -165,9 +165,8 @@ struct KeeperServer::KeeperRaftServer : public nuraft::raft_server RaftAppendResult getLeaderInfo() { - nuraft::ptr req = nuraft::cs_new - ( 0ull, nuraft::msg_type::leader_status_request, 0, 0, - 0ull, 0ull, 0ull ) ; + nuraft::ptr req + = nuraft::cs_new(0ull, nuraft::msg_type::leader_status_request, 0, 0, 0ull, 0ull, 0ull); return send_msg_to_leader(req); } diff --git a/src/Coordination/KeeperServer.h b/src/Coordination/KeeperServer.h index 472cb67361c..fa219f73aeb 100644 --- a/src/Coordination/KeeperServer.h +++ b/src/Coordination/KeeperServer.h @@ -136,8 +136,8 @@ public: }; } -namespace std { - +namespace std +{ template <> struct hash { @@ -149,5 +149,4 @@ namespace std { return hash_state.get64(); } }; - } From fd1e5d43a036f6e965635c80acb6002ca9ab7ead Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 12 Jul 2022 07:52:50 +0000 Subject: [PATCH 25/74] fix unit tests in debug mode --- src/Coordination/tests/gtest_coordination.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/Coordination/tests/gtest_coordination.cpp b/src/Coordination/tests/gtest_coordination.cpp index 66e0dfe704b..1674194c13c 100644 --- a/src/Coordination/tests/gtest_coordination.cpp +++ b/src/Coordination/tests/gtest_coordination.cpp @@ -1064,6 +1064,13 @@ void addNode(DB::KeeperStorage & storage, const std::string & path, const std::s node.setData(data); node.stat.ephemeralOwner = ephemeral_owner; storage.container.insertOrReplace(path, node); + auto child_it = storage.container.find(path); + auto child_path = DB::getBaseName(child_it->key); + storage.container.updateValue(DB::parentPath(StringRef{path}), [&](auto & parent) + { + parent.addChild(child_path); + parent.stat.numChildren++; + }); } TEST_P(CoordinationTest, TestStorageSnapshotSimple) @@ -1221,7 +1228,7 @@ TEST_P(CoordinationTest, TestStorageSnapshotMode) storage.container.erase("/hello_" + std::to_string(i)); } EXPECT_EQ(storage.container.size(), 26); - EXPECT_EQ(storage.container.snapshotSizeWithVersion().first, 101); + EXPECT_EQ(storage.container.snapshotSizeWithVersion().first, 102); EXPECT_EQ(storage.container.snapshotSizeWithVersion().second, 1); auto buf = manager.serializeSnapshotToBuffer(snapshot); manager.serializeSnapshotBufferToDisk(*buf, 50); @@ -1778,6 +1785,7 @@ TEST_P(CoordinationTest, TestStorageSnapshotEqual) DB::KeeperSnapshotManager manager("./snapshots", 3, params.enable_compression); DB::KeeperStorage storage(500, "", true); + addNode(storage, "/hello", ""); for (size_t j = 0; j < 5000; ++j) { addNode(storage, "/hello_" + std::to_string(j), "world", 1); From 8bd86c39f6fa54ae492848bdacc1e123c3313d96 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 12 Jul 2022 08:00:31 +0000 Subject: [PATCH 26/74] Add valid values for read_mode setting --- src/Coordination/CoordinationSettings.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Coordination/CoordinationSettings.h b/src/Coordination/CoordinationSettings.h index 81edd86f671..5597bb7f82f 100644 --- a/src/Coordination/CoordinationSettings.h +++ b/src/Coordination/CoordinationSettings.h @@ -43,7 +43,7 @@ struct Settings; M(Bool, compress_logs, true, "Write compressed coordination logs in ZSTD format", 0) \ M(Bool, compress_snapshots_with_zstd_format, true, "Write compressed snapshots in ZSTD format (instead of custom LZ4)", 0) \ M(UInt64, configuration_change_tries_count, 20, "How many times we will try to apply configuration change (add/remove server) to the cluster", 0) \ - M(String, read_mode, "nonlinear", "How should reads be processed", 0) + M(String, read_mode, "nonlinear", "How should reads be processed. Valid values: 'nonlinear', 'fastlinear', 'quorum'", 0) DECLARE_SETTINGS_TRAITS(CoordinationSettingsTraits, LIST_OF_COORDINATION_SETTINGS) From 2634f80956bcea78d1577a65ae938097cd47e06b Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 13 Jul 2022 11:06:58 +0000 Subject: [PATCH 27/74] Remove trailing whitespaces --- src/Coordination/KeeperServer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Coordination/KeeperServer.h b/src/Coordination/KeeperServer.h index 40620832b5a..870d56fff39 100644 --- a/src/Coordination/KeeperServer.h +++ b/src/Coordination/KeeperServer.h @@ -139,7 +139,7 @@ public: }; } -namespace std +namespace std { template <> struct hash From 838a11a85a9121c5ae9f96b497de51441b485432 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 13 Jul 2022 11:35:06 +0000 Subject: [PATCH 28/74] Rename RaftAppendResult --- src/Coordination/KeeperDispatcher.cpp | 97 +++++++++++++++------------ src/Coordination/KeeperDispatcher.h | 2 +- src/Coordination/KeeperServer.cpp | 10 ++- src/Coordination/KeeperServer.h | 26 +++---- 4 files changed, 74 insertions(+), 61 deletions(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 8162fe277b2..3a744b41a05 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -45,7 +45,7 @@ void KeeperDispatcher::requestThread() setThreadName("KeeperReqT"); /// Result of requests batch from previous iteration - RaftAppendResult prev_result = nullptr; + RaftResult prev_result = nullptr; const auto previous_quorum_done = [&] { return !prev_result || prev_result->has_result() || prev_result->get_result_code() != nuraft::cmd_result_code::OK; }; const auto needs_quorum = [](const auto & coordination_settings, const auto & request) @@ -60,48 +60,52 @@ void KeeperDispatcher::requestThread() { if (coordination_settings->read_mode.toString() == "fastlinear") { - server->getLeaderInfo()->when_ready([&, requests_for_sessions = std::move(read_requests)](nuraft::cmd_result> & result, nuraft::ptr & exception) mutable + auto leader_info_result = server->getLeaderInfo(); + if (leader_info_result) { - if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) + leader_info_result->when_ready([&, requests_for_sessions = std::move(read_requests)](nuraft::cmd_result> & result, nuraft::ptr & exception) mutable { - addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); - return; - } - else if (result.get_result_code() != nuraft::cmd_result_code::OK) - { - addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); - return; - } - else if (exception) - { - LOG_INFO(&Poco::Logger::get("KeeperDispatcher"), "Got exception while waiting for read results {}", exception->what()); - addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); - return; - } + if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) + { + addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); + return; + } + else if (result.get_result_code() != nuraft::cmd_result_code::OK) + { + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + return; + } + else if (exception) + { + LOG_INFO(&Poco::Logger::get("KeeperDispatcher"), "Got exception while waiting for read results {}", exception->what()); + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + return; + } - auto & leader_info_ctx = result.get(); + auto & leader_info_ctx = result.get(); - if (!leader_info_ctx) - { - addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); - return; - } + if (!leader_info_ctx) + { + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + return; + } - KeeperServer::NodeInfo leader_info; - leader_info.term = leader_info_ctx->get_ulong(); - leader_info.last_committed_index = leader_info_ctx->get_ulong(); - std::lock_guard lock(leader_waiter_mutex); - auto node_info = server->getNodeInfo(); + KeeperServer::NodeInfo leader_info; + leader_info.term = leader_info_ctx->get_ulong(); + leader_info.last_committed_index = leader_info_ctx->get_ulong(); + std::lock_guard lock(leader_waiter_mutex); + auto node_info = server->getNodeInfo(); - if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) - { - auto & leader_waiter = leader_waiters[leader_info]; - leader_waiter.insert(leader_waiter.end(), requests_for_sessions.begin(), requests_for_sessions.end()); - LOG_INFO(log, "waiting for {}, idx {}", leader_info.term, leader_info.last_committed_index); - } - else if (!read_requests_queue.push(std::move(requests_for_sessions))) - throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); - }); + if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) + { + auto & leader_waiter = leader_waiters[leader_info]; + leader_waiter.insert(leader_waiter.end(), requests_for_sessions.begin(), requests_for_sessions.end()); + LOG_INFO(log, "waiting for {}, idx {}", leader_info.term, leader_info.last_committed_index); + } + else if (!read_requests_queue.push(std::move(requests_for_sessions))) + throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); + }); + } } else { @@ -120,13 +124,18 @@ void KeeperDispatcher::requestThread() forceWaitAndProcessResult(prev_result); prev_result = server->putRequestBatch(quorum_requests); - prev_result->when_ready([&, requests_for_sessions = std::move(quorum_requests)](nuraft::cmd_result> & result, nuraft::ptr &) mutable + + if (prev_result) { - if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) - addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); - else if (result.get_result_code() != nuraft::cmd_result_code::OK) - addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); - }); + prev_result->when_ready([&, requests_for_sessions = std::move(quorum_requests)](nuraft::cmd_result> & result, nuraft::ptr &) mutable + { + if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) + addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); + else if (result.get_result_code() != nuraft::cmd_result_code::OK) + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + }); + } + quorum_requests.clear(); }; @@ -618,7 +627,7 @@ void KeeperDispatcher::addErrorResponses(const KeeperStorage::RequestsForSession } } -void KeeperDispatcher::forceWaitAndProcessResult(RaftAppendResult & result) +void KeeperDispatcher::forceWaitAndProcessResult(RaftResult & result) { if (!result->has_result()) result->get(); diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index 11cfd5afd08..6148b57b1f4 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -119,7 +119,7 @@ private: /// Forcefully wait for result and sets errors if something when wrong. /// Clears both arguments - static void forceWaitAndProcessResult(RaftAppendResult & result); + static void forceWaitAndProcessResult(RaftResult & result); public: /// Just allocate some objects, real initialization is done by `intialize method` diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index 60e8e3b107a..bfbea82ed0c 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -163,7 +163,7 @@ struct KeeperServer::KeeperRaftServer : public nuraft::raft_server reconfigure(new_config); } - RaftAppendResult getLeaderInfo() + RaftResult getLeaderInfo() { nuraft::ptr req = nuraft::cs_new(0ull, nuraft::msg_type::leader_status_request, 0, 0, 0ull, 0ull, 0ull); @@ -447,7 +447,7 @@ void KeeperServer::putLocalReadRequest(const KeeperStorage::RequestForSession & state_machine->processReadRequest(request_for_session); } -RaftAppendResult KeeperServer::putRequestBatch(const KeeperStorage::RequestsForSessions & requests_for_sessions) +RaftResult KeeperServer::putRequestBatch(const KeeperStorage::RequestsForSessions & requests_for_sessions) { std::vector> entries; for (const auto & request_for_session : requests_for_sessions) @@ -658,8 +658,12 @@ std::vector KeeperServer::getDeadSessions() return state_machine->getDeadSessions(); } -RaftAppendResult KeeperServer::getLeaderInfo() +RaftResult KeeperServer::getLeaderInfo() { + std::lock_guard lock{server_write_mutex}; + if (is_recovering) + return nullptr; + return raft_instance->getLeaderInfo(); } diff --git a/src/Coordination/KeeperServer.h b/src/Coordination/KeeperServer.h index 870d56fff39..813ddd00b9d 100644 --- a/src/Coordination/KeeperServer.h +++ b/src/Coordination/KeeperServer.h @@ -13,7 +13,7 @@ namespace DB { -using RaftAppendResult = nuraft::ptr>>; +using RaftResult = nuraft::ptr>>; class KeeperServer { @@ -80,7 +80,7 @@ public: /// Put batch of requests into Raft and get result of put. Responses will be set separately into /// responses_queue. - RaftAppendResult putRequestBatch(const KeeperStorage::RequestsForSessions & requests); + RaftResult putRequestBatch(const KeeperStorage::RequestsForSessions & requests); /// Return set of the non-active sessions std::vector getDeadSessions(); @@ -123,7 +123,7 @@ public: bool operator==(const NodeInfo &) const = default; }; - RaftAppendResult getLeaderInfo(); + RaftResult getLeaderInfo(); NodeInfo getNodeInfo(); /// Get configuration diff between current configuration in RAFT and in XML file @@ -141,15 +141,15 @@ public: } namespace std { - template <> - struct hash - { - size_t operator()(const DB::KeeperServer::NodeInfo & info) const + template <> + struct hash { - SipHash hash_state; - hash_state.update(info.term); - hash_state.update(info.last_committed_index); - return hash_state.get64(); - } - }; + size_t operator()(const DB::KeeperServer::NodeInfo & info) const + { + SipHash hash_state; + hash_state.update(info.term); + hash_state.update(info.last_committed_index); + return hash_state.get64(); + } + }; } From 46882791cdabbc1a2540675146ef1fea9cd30856 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 23 Aug 2022 07:29:47 +0000 Subject: [PATCH 29/74] Fix build --- src/Coordination/KeeperServer.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index 6a6b9e14233..c9f9d6ab31d 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -127,7 +127,6 @@ KeeperServer::KeeperServer( coordination_settings, keeper_context, checkAndGetSuperdigest(configuration_and_settings_->super_digest), - config.getBool("keeper_server.digest_enabled", true), std::move(commit_callback)); state_manager = nuraft::cs_new( From 5d45f2faa9789af41c319edc130cb443c4cb8d1b Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 23 Aug 2022 08:22:03 +0000 Subject: [PATCH 30/74] Add comments --- src/Coordination/KeeperDispatcher.cpp | 19 ++++++++++++++++++- src/Coordination/KeeperDispatcher.h | 6 ++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 3a744b41a05..2bf6100d33e 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -60,6 +60,7 @@ void KeeperDispatcher::requestThread() { if (coordination_settings->read_mode.toString() == "fastlinear") { + // we just want to know what's the current latest committed log on Leader node auto leader_info_result = server->getLeaderInfo(); if (leader_info_result) { @@ -96,12 +97,14 @@ void KeeperDispatcher::requestThread() std::lock_guard lock(leader_waiter_mutex); auto node_info = server->getNodeInfo(); + // we're behind, we need to wait if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) { auto & leader_waiter = leader_waiters[leader_info]; leader_waiter.insert(leader_waiter.end(), requests_for_sessions.begin(), requests_for_sessions.end()); - LOG_INFO(log, "waiting for {}, idx {}", leader_info.term, leader_info.last_committed_index); + LOG_TRACE(log, "waiting for term {}, idx {}", leader_info.term, leader_info.last_committed_index); } + // process it in background thread else if (!read_requests_queue.push(std::move(requests_for_sessions))) throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); }); @@ -139,6 +142,10 @@ void KeeperDispatcher::requestThread() quorum_requests.clear(); }; + // ZooKeeper requires that the requests inside a single session are processed in a strict order + // (we cannot process later requests before all the previous once are processed) + // By making sure that at this point we can either have just read requests or just write requests + // from a single session, we can process them independently while (!shutdown_called) { KeeperStorage::RequestForSession request; @@ -187,9 +194,13 @@ void KeeperDispatcher::requestThread() } else { + // batch of read requests can send at most one request + // so we don't care if the previous batch hasn't received response if (!read_requests.empty()) process_read_requests(coordination_settings); + // if we still didn't process previous batch we can + // increase are current batch even more if (previous_quorum_done()) break; } @@ -262,6 +273,7 @@ void KeeperDispatcher::snapshotThread() } } +// Background thread for processing read requests void KeeperDispatcher::readRequestThread() { setThreadName("KeeperReadT"); @@ -294,6 +306,9 @@ void KeeperDispatcher::readRequestThread() } } +// We finalize requests every time we commit a single log with request +// or process a batch of read requests. +// Because it can get heavy, we do it in background thread. void KeeperDispatcher::finalizeRequestsThread() { setThreadName("KeeperFinalT"); @@ -385,6 +400,7 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ { auto & unprocessed_requests = unprocessed_requests_it->second; + // queue is not empty, or the request types don't match, put it in the waiting queue if (!unprocessed_requests.request_queue.empty() || unprocessed_requests.is_read != request->isReadRequest()) { unprocessed_requests.request_queue.push_back(std::move(request_info)); @@ -778,6 +794,7 @@ void KeeperDispatcher::finalizeRequests(const KeeperStorage::RequestsForSessions { auto & request_queue = unprocessed_requests.request_queue; unprocessed_requests.is_read = !unprocessed_requests.is_read; + // start adding next type of requests while (!request_queue.empty() && request_queue.front().request->isReadRequest() == unprocessed_requests.is_read) { auto & front_request = request_queue.front(); diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index 6148b57b1f4..6531031f458 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -84,6 +84,12 @@ private: std::unordered_map> leader_waiters; std::mutex leader_waiter_mutex; + // We can be actively processing one type of requests (either read or write) from a single session. + // If we receive a request of a type that is not currently being processed, we put it in the waiting queue. + // Also, we want to process them in ariving order, so if we have a different type in the queue, we cannot process that request + // but wait for all the previous requests to finish. + // E.g. READ -> WRITE -> READ, the last READ will go to the waiting queue even though we are currently processing the first READ + // because we have WRITE request before it that needs to be processed. struct UnprocessedRequests { size_t unprocessed_num{0}; From 6c180d0c44bc134f3b57d126ff079b180b489bce Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Tue, 23 Aug 2022 13:18:17 +0000 Subject: [PATCH 31/74] Use default expiry for jepsen --- tests/jepsen.clickhouse-keeper/resources/keeper_config.xml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml b/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml index 86a3da08bf9..677de5f6769 100644 --- a/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml +++ b/tests/jepsen.clickhouse-keeper/resources/keeper_config.xml @@ -1,5 +1,5 @@ - 0.0.0.0 + :: trace @@ -19,7 +19,6 @@ 120000 trace 1000 - 1900 2000 4000 0 From 073b168b31a9842ffa498295b90abba70e55cc1e Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 24 Aug 2022 07:50:08 +0000 Subject: [PATCH 32/74] add msg type string for leader status --- contrib/NuRaft | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/NuRaft b/contrib/NuRaft index 29e7ffa4275..dc63ab3cdc1 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit 29e7ffa42756d1fbb6530b5e723ddc7b42bbeee3 +Subproject commit dc63ab3cdc1233ae6d6f343c42e52a9fc50b9e39 From c20b9e554b006aa5c256fd7522bb08f080f078cc Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 24 Aug 2022 09:12:52 +0000 Subject: [PATCH 33/74] Fix counter test --- .../src/jepsen/clickhouse_keeper/counter.clj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/counter.clj b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/counter.clj index 8501d521a11..e6e94371501 100644 --- a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/counter.clj +++ b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/counter.clj @@ -30,7 +30,7 @@ :read (try (assoc op :type :ok - :value (count (zk-list conn "/"))) + :value (count (zk-list conn root-path))) (catch Exception _ (assoc op :type :info, :error :connect-error))) :final-read (exec-with-retries 30 (fn [] (assoc op From 50af0b193884609019c4385b876a63d2e2f799f8 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 29 Aug 2022 10:50:18 +0000 Subject: [PATCH 34/74] Add bunch of comments for request processing --- src/Coordination/CoordinationSettings.cpp | 3 + src/Coordination/CoordinationSettings.h | 4 +- src/Coordination/KeeperDispatcher.cpp | 105 ++++++++++++------ src/Coordination/KeeperDispatcher.h | 23 ++-- src/Coordination/KeeperServer.cpp | 4 +- src/Coordination/KeeperStateMachine.cpp | 2 +- src/Coordination/KeeperStateMachine.h | 3 +- .../src/jepsen/clickhouse_keeper/db.clj | 1 - .../src/jepsen/clickhouse_keeper/main.clj | 2 +- 9 files changed, 98 insertions(+), 49 deletions(-) diff --git a/src/Coordination/CoordinationSettings.cpp b/src/Coordination/CoordinationSettings.cpp index 3e03ee0d6f4..3bc611809f4 100644 --- a/src/Coordination/CoordinationSettings.cpp +++ b/src/Coordination/CoordinationSettings.cpp @@ -189,6 +189,9 @@ KeeperConfigurationAndSettings::loadFromConfig(const Poco::Util::AbstractConfigu ret->coordination_settings->loadFromConfig("keeper_server.coordination_settings", config); + if (ret->coordination_settings->quorum_reads) + LOG_WARNING(&Poco::Logger::get("KeeperConfigurationAndSettings"), "Setting 'quorum_reads' is depricated. Please use 'read_mode'"); + return ret; } diff --git a/src/Coordination/CoordinationSettings.h b/src/Coordination/CoordinationSettings.h index 9636c0ffd32..1a7cb7b11fb 100644 --- a/src/Coordination/CoordinationSettings.h +++ b/src/Coordination/CoordinationSettings.h @@ -38,12 +38,12 @@ struct Settings; M(UInt64, stale_log_gap, 10000, "When node became stale and should receive snapshots from leader", 0) \ M(UInt64, fresh_log_gap, 200, "When node became fresh", 0) \ M(UInt64, max_requests_batch_size, 100, "Max size of batch in requests count before it will be sent to RAFT", 0) \ - M(Bool, quorum_reads, false, "Execute read requests as writes through whole RAFT consesus with similar speed", 0) \ + M(Bool, quorum_reads, false, "Depricated - use read_mode. Execute read requests as writes through whole RAFT consesus with similar speed", 0) \ M(Bool, force_sync, true, "Call fsync on each change in RAFT changelog", 0) \ M(Bool, compress_logs, true, "Write compressed coordination logs in ZSTD format", 0) \ M(Bool, compress_snapshots_with_zstd_format, true, "Write compressed snapshots in ZSTD format (instead of custom LZ4)", 0) \ M(UInt64, configuration_change_tries_count, 20, "How many times we will try to apply configuration change (add/remove server) to the cluster", 0) \ - M(String, read_mode, "nonlinear", "How should reads be processed. Valid values: 'nonlinear', 'fastlinear', 'quorum'", 0) + M(String, read_mode, "nonlinear", "How should reads be processed. Valid values: 'nonlinear', 'fastlinear', 'quorum'. 'nonlinear' is the fastest option because there are no consistency requirements", 0) DECLARE_SETTINGS_TRAITS(CoordinationSettingsTraits, LIST_OF_COORDINATION_SETTINGS) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 2bf6100d33e..987fcb274ce 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -40,6 +40,33 @@ KeeperDispatcher::KeeperDispatcher() } +/// ZooKeepers has 2 requirements: +/// - writes need to be linearizable +/// - all requests from single session need to be processed in the order of their arrival +/// +/// Because of that, we cannot process read and write requests from SAME session at the same time. +/// To be able to process read and write reqeusts in parallel we need to make sure that only 1 type +/// of request is being processed from a single session. +/// Multiple types from different sessions can be processed at the same time. +/// +/// We do some in-session housekeeping to make sure that the multithreaded request processing is correct. +/// When a request is received from a client, we check if there are requests being processed from that same +/// session, and if yes, of what type. If the types are the same, and there are no requests of different +/// type inbetetween, we can instanly add it to active request queue. Otherwise, we need to wait until +/// all requests of the other tpye are processed. +/// +/// There are multiple threads used for processing the request, each of them communicating with a queue. +/// Assumption: only one type of request is being processed from a same session at any point in time (read or write). +/// +/// requestThread -> requests currently being processed +/// readRequestThread -> thread for processing read requests +/// finalizeRequestThread -> thread for finalizing requests: +/// - in-session housekeeping, add requests to the active request queue if there are any +/// +/// If reads are linearizable without quorum, a request can possibly wait for a certain log to be committed. +/// In that case we add it to the waiting queue for that log. +/// When that log is committed, the committing thread will send that read request to readRequestThread so it can be processed. +/// void KeeperDispatcher::requestThread() { setThreadName("KeeperReqT"); @@ -78,7 +105,7 @@ void KeeperDispatcher::requestThread() } else if (exception) { - LOG_INFO(&Poco::Logger::get("KeeperDispatcher"), "Got exception while waiting for read results {}", exception->what()); + LOG_INFO(log, "Got exception while waiting for read results {}", exception->what()); addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); return; } @@ -97,14 +124,14 @@ void KeeperDispatcher::requestThread() std::lock_guard lock(leader_waiter_mutex); auto node_info = server->getNodeInfo(); - // we're behind, we need to wait + /// we're behind, we need to wait if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) { auto & leader_waiter = leader_waiters[leader_info]; leader_waiter.insert(leader_waiter.end(), requests_for_sessions.begin(), requests_for_sessions.end()); LOG_TRACE(log, "waiting for term {}, idx {}", leader_info.term, leader_info.last_committed_index); } - // process it in background thread + /// process it in background thread else if (!read_requests_queue.push(std::move(requests_for_sessions))) throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); }); @@ -142,10 +169,10 @@ void KeeperDispatcher::requestThread() quorum_requests.clear(); }; - // ZooKeeper requires that the requests inside a single session are processed in a strict order - // (we cannot process later requests before all the previous once are processed) - // By making sure that at this point we can either have just read requests or just write requests - // from a single session, we can process them independently + /// ZooKeeper requires that the requests inside a single session are processed in a strict order + /// (we cannot process later requests before all the previous once are processed) + /// By making sure that at this point we can either have just read requests or just write requests + /// from a single session, we can process them independently while (!shutdown_called) { KeeperStorage::RequestForSession request; @@ -156,7 +183,7 @@ void KeeperDispatcher::requestThread() try { - if (requests_queue->tryPop(request, max_wait)) + if (active_requests_queue->tryPop(request, max_wait)) { CurrentMetrics::sub(CurrentMetrics::KeeperOutstandingRequets); if (shutdown_called) @@ -184,7 +211,7 @@ void KeeperDispatcher::requestThread() } /// Trying to get batch requests as fast as possible - if (requests_queue->tryPop(request, 1)) + if (active_requests_queue->tryPop(request, 1)) { CurrentMetrics::sub(CurrentMetrics::KeeperOutstandingRequets); if (needs_quorum(coordination_settings, request)) @@ -194,13 +221,13 @@ void KeeperDispatcher::requestThread() } else { - // batch of read requests can send at most one request - // so we don't care if the previous batch hasn't received response + /// batch of read requests can send at most one request + /// so we don't care if the previous batch hasn't received response if (!read_requests.empty()) process_read_requests(coordination_settings); - // if we still didn't process previous batch we can - // increase are current batch even more + /// if we still didn't process previous batch we can + /// increase are current batch even more if (previous_quorum_done()) break; } @@ -273,7 +300,7 @@ void KeeperDispatcher::snapshotThread() } } -// Background thread for processing read requests +/// Background thread for processing read requests void KeeperDispatcher::readRequestThread() { setThreadName("KeeperReadT"); @@ -306,9 +333,9 @@ void KeeperDispatcher::readRequestThread() } } -// We finalize requests every time we commit a single log with request -// or process a batch of read requests. -// Because it can get heavy, we do it in background thread. +/// We finalize requests every time we commit a single log with request +/// or process a batch of read requests. +/// Because it can get heavy, we do it in background thread. void KeeperDispatcher::finalizeRequestsThread() { setThreadName("KeeperFinalT"); @@ -400,7 +427,7 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ { auto & unprocessed_requests = unprocessed_requests_it->second; - // queue is not empty, or the request types don't match, put it in the waiting queue + /// queue is not empty, or the request types don't match, put it in the waiting queue if (!unprocessed_requests.request_queue.empty() || unprocessed_requests.is_read != request->isReadRequest()) { unprocessed_requests.request_queue.push_back(std::move(request_info)); @@ -419,10 +446,10 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ /// Put close requests without timeouts if (request->getOpNum() == Coordination::OpNum::Close) { - if (!requests_queue->push(std::move(request_info))) + if (!active_requests_queue->push(std::move(request_info))) throw Exception("Cannot push request to queue", ErrorCodes::SYSTEM_ERROR); } - else if (!requests_queue->tryPush(std::move(request_info), configuration_and_settings->coordination_settings->operation_timeout_ms.totalMilliseconds())) + else if (!active_requests_queue->tryPush(std::move(request_info), configuration_and_settings->coordination_settings->operation_timeout_ms.totalMilliseconds())) { throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED); } @@ -435,13 +462,13 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf LOG_DEBUG(log, "Initializing storage dispatcher"); configuration_and_settings = KeeperConfigurationAndSettings::loadFromConfig(config, standalone_keeper); - requests_queue = std::make_unique(configuration_and_settings->coordination_settings->max_requests_batch_size); + active_requests_queue = std::make_unique(configuration_and_settings->coordination_settings->max_requests_batch_size); request_thread = ThreadFromGlobalPool([this] { requestThread(); }); responses_thread = ThreadFromGlobalPool([this] { responseThread(); }); snapshot_thread = ThreadFromGlobalPool([this] { snapshotThread(); }); read_request_thread = ThreadFromGlobalPool([this] { readRequestThread(); }); - finalize_requests_thread= ThreadFromGlobalPool([this] { finalizeRequestsThread(); }); + finalize_requests_thread = ThreadFromGlobalPool([this] { finalizeRequestsThread(); }); server = std::make_unique(configuration_and_settings, config, responses_queue, snapshots_queue, [this](const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx) { onRequestCommit(request_for_session, log_term, log_idx); }); @@ -491,9 +518,9 @@ void KeeperDispatcher::shutdown() if (session_cleaner_thread.joinable()) session_cleaner_thread.join(); - if (requests_queue) + if (active_requests_queue) { - requests_queue->finish(); + active_requests_queue->finish(); if (request_thread.joinable()) request_thread.join(); @@ -526,7 +553,7 @@ void KeeperDispatcher::shutdown() KeeperStorage::RequestForSession request_for_session; /// Set session expired for all pending requests - while (requests_queue && requests_queue->tryPop(request_for_session)) + while (active_requests_queue && active_requests_queue->tryPop(request_for_session)) { CurrentMetrics::sub(CurrentMetrics::KeeperOutstandingRequets); auto response = request_for_session.request->makeResponse(); @@ -593,7 +620,7 @@ void KeeperDispatcher::sessionCleanerTask() request_info.session_id = dead_session; { std::lock_guard lock(push_request_mutex); - if (!requests_queue->push(std::move(request_info))) + if (!active_requests_queue->push(std::move(request_info))) LOG_INFO(log, "Cannot push close request to queue while cleaning outdated sessions"); CurrentMetrics::add(CurrentMetrics::KeeperOutstandingRequets); } @@ -696,7 +723,7 @@ int64_t KeeperDispatcher::getSessionID(int64_t session_timeout_ms) /// Push new session request to queue { std::lock_guard lock(push_request_mutex); - if (!requests_queue->tryPush(std::move(request_info), session_timeout_ms)) + if (!active_requests_queue->tryPush(std::move(request_info), session_timeout_ms)) throw Exception("Cannot push session id request to queue within session timeout", ErrorCodes::TIMEOUT_EXCEEDED); CurrentMetrics::add(CurrentMetrics::KeeperOutstandingRequets); } @@ -769,6 +796,10 @@ void KeeperDispatcher::updateConfigurationThread() } } +// Used to update the state for a session based on the requests +// - update the number of current unprocessed requests for the session +// - if the number of unprocessed requests is 0, we can start adding next type of requests +// from unprocessed requests queue to the active queue void KeeperDispatcher::finalizeRequests(const KeeperStorage::RequestsForSessions & requests_for_sessions) { std::unordered_map counts_for_session; @@ -792,26 +823,26 @@ void KeeperDispatcher::finalizeRequests(const KeeperStorage::RequestsForSessions { if (!unprocessed_requests.request_queue.empty()) { - auto & request_queue = unprocessed_requests.request_queue; + auto & unprocessed_requests_queue = unprocessed_requests.request_queue; unprocessed_requests.is_read = !unprocessed_requests.is_read; // start adding next type of requests - while (!request_queue.empty() && request_queue.front().request->isReadRequest() == unprocessed_requests.is_read) + while (!unprocessed_requests_queue.empty() && unprocessed_requests_queue.front().request->isReadRequest() == unprocessed_requests.is_read) { - auto & front_request = request_queue.front(); + auto & front_request = unprocessed_requests_queue.front(); /// Put close requests without timeouts if (front_request.request->getOpNum() == Coordination::OpNum::Close) { - if (!requests_queue->push(std::move(front_request))) + if (!active_requests_queue->push(std::move(front_request))) throw Exception("Cannot push request to queue", ErrorCodes::SYSTEM_ERROR); } - else if (!requests_queue->tryPush(std::move(front_request), configuration_and_settings->coordination_settings->operation_timeout_ms.totalMilliseconds())) + else if (!active_requests_queue->tryPush(std::move(front_request), configuration_and_settings->coordination_settings->operation_timeout_ms.totalMilliseconds())) { throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED); } ++unprocessed_requests.unprocessed_num; - request_queue.pop_front(); + unprocessed_requests_queue.pop_front(); } } else @@ -822,6 +853,8 @@ void KeeperDispatcher::finalizeRequests(const KeeperStorage::RequestsForSessions } } +// Finalize request +// Process read requests that were waiting for this commit void KeeperDispatcher::onRequestCommit(const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx) { if (!finalize_requests_queue.push({request_for_session})) @@ -837,6 +870,10 @@ void KeeperDispatcher::onRequestCommit(const KeeperStorage::RequestForSession & leader_waiters.erase(request_queue_it); } } + + if (requests.empty()) + return; + if (!read_requests_queue.push(std::move(requests))) throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); } @@ -905,7 +942,7 @@ Keeper4LWInfo KeeperDispatcher::getKeeper4LWInfo() const Keeper4LWInfo result = server->getPartiallyFilled4LWInfo(); { std::lock_guard lock(push_request_mutex); - result.outstanding_requests_count = requests_queue->size(); + result.outstanding_requests_count = active_requests_queue->size(); } { std::lock_guard lock(session_to_response_callback_mutex); diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index 6531031f458..851f0165ae3 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -32,7 +32,8 @@ private: using UpdateConfigurationQueue = ConcurrentBoundedQueue; /// Size depends on coordination settings - std::unique_ptr requests_queue; + /// Request currently being processed + std::unique_ptr active_requests_queue; ResponsesQueue responses_queue; SnapshotsQueue snapshots_queue{1}; ConcurrentBoundedQueue read_requests_queue; @@ -81,22 +82,29 @@ private: /// Counter for new session_id requests. std::atomic internal_session_id_counter{0}; + /// A read request needs to have at least the log it was the last committed log on the leader + /// at the time the request was being made. + /// If the node is stale, we need to wait to commit that log before doing local read requests to achieve + /// linearizability. std::unordered_map> leader_waiters; std::mutex leader_waiter_mutex; - // We can be actively processing one type of requests (either read or write) from a single session. - // If we receive a request of a type that is not currently being processed, we put it in the waiting queue. - // Also, we want to process them in ariving order, so if we have a different type in the queue, we cannot process that request - // but wait for all the previous requests to finish. - // E.g. READ -> WRITE -> READ, the last READ will go to the waiting queue even though we are currently processing the first READ - // because we have WRITE request before it that needs to be processed. + /// We can be actively processing one type of requests (either read or write) from a single session. + /// If we receive a request of a type that is not currently being processed, we put it in the waiting queue. + /// Also, we want to process them in ariving order, so if we have a different type in the queue, we cannot process that request + /// but wait for all the previous requests to finish. + /// E.g. READ -> WRITE -> READ, the last READ will go to the waiting queue even though we are currently processing the first READ + /// because we have WRITE request before it that needs to be processed. struct UnprocessedRequests { + /// how many requests are currently in the active request queue size_t unprocessed_num{0}; + /// is_read currently being processed bool is_read{false}; std::list request_queue; }; + // Called every time a batch of reqeusts are processed. void finalizeRequests(const KeeperStorage::RequestsForSessions & requests_for_sessions); std::unordered_map unprocessed_requests_for_session; @@ -145,6 +153,7 @@ public: return server && server->checkInit(); } + /// Called when a single log with request is committed. void onRequestCommit(const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx); /// Is server accepting requests, i.e. connected to the cluster diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index c9f9d6ab31d..7601881b326 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -114,7 +114,7 @@ KeeperServer::KeeperServer( , keeper_context{std::make_shared()} , create_snapshot_on_exit(config.getBool("keeper_server.create_snapshot_on_exit", true)) { - if (coordination_settings->quorum_reads) + if (coordination_settings->quorum_reads || coordination_settings->read_mode.toString() == "quorum") LOG_WARNING(log, "Quorum reads enabled, Keeper will work slower."); keeper_context->digest_enabled = config.getBool("keeper_server.digest_enabled", false); @@ -287,7 +287,7 @@ void KeeperServer::launchRaftServer(const Poco::Util::AbstractConfiguration & co params.leadership_expiry_ = params.election_timeout_lower_bound_; else if (params.leadership_expiry_ > params.election_timeout_lower_bound_) { - LOG_WARNING(log, "To use fast linearizable reads, leadership_expiry should be set to a value thath is less or equal to the election_timeout_upper_bound_ms. " + LOG_WARNING(log, "To use fast linearizable reads, leadership_expiry should be set to a value that is less or equal to the election_timeout_upper_bound_ms. " "Based on current settings, there are no guarantees for linearizability of reads."); } } diff --git a/src/Coordination/KeeperStateMachine.cpp b/src/Coordination/KeeperStateMachine.cpp index c4486918dd2..dc52ad019cd 100644 --- a/src/Coordination/KeeperStateMachine.cpp +++ b/src/Coordination/KeeperStateMachine.cpp @@ -219,7 +219,7 @@ void KeeperStateMachine::preprocess(const KeeperStorage::RequestForSession & req assertDigest(*request_for_session.digest, storage->getNodesDigest(false), *request_for_session.request, false); } -nuraft::ptr KeeperStateMachine::commit_ext(const ext_op_params& params) +nuraft::ptr KeeperStateMachine::commit_ext(const ext_op_params & params) { auto request_for_session = parseRequest(*params.data); if (!request_for_session.zxid) diff --git a/src/Coordination/KeeperStateMachine.h b/src/Coordination/KeeperStateMachine.h index 4a4c887ba1b..de8f30437c0 100644 --- a/src/Coordination/KeeperStateMachine.h +++ b/src/Coordination/KeeperStateMachine.h @@ -40,7 +40,7 @@ public: nuraft::ptr pre_commit(uint64_t log_idx, nuraft::buffer & data) override; - nuraft::ptr commit_ext(const ext_op_params& params) override; /// NOLINT + nuraft::ptr commit_ext(const ext_op_params & params) override; /// NOLINT /// Save new cluster config to our snapshot (copy of the config stored in StateManager) void commit_config(const uint64_t log_idx, nuraft::ptr & new_conf) override; /// NOLINT @@ -148,6 +148,7 @@ private: /// Special part of ACL system -- superdigest specified in server config. const std::string superdigest; + // call when a request is committed const CommitCallback commit_callback; KeeperContextPtr keeper_context; diff --git a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/db.clj b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/db.clj index c354e36e430..9e85b37dd75 100644 --- a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/db.clj +++ b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/db.clj @@ -98,7 +98,6 @@ #"\{srv2\}" (get nodes 1) #"\{srv3\}" (get nodes 2) #"\{id\}" (str (inc (.indexOf nodes node))) - #"\{quorum_reads\}" (str (boolean (:quorum test))) #"\{snapshot_distance\}" (str (:snapshot-distance test)) #"\{stale_log_gap\}" (str (:stale-log-gap test)) #"\{reserved_log_items\}" (str (:reserved-log-items test))}] diff --git a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/main.clj b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/main.clj index cd1aa540e45..1919c8ce3ec 100644 --- a/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/main.clj +++ b/tests/jepsen.clickhouse-keeper/src/jepsen/clickhouse_keeper/main.clj @@ -103,7 +103,7 @@ current-nemesis (get custom-nemesis/custom-nemesises (:nemesis opts))] (merge tests/noop-test opts - {:name (str "clickhouse-keeper-quorum=" quorum "-" (name (:workload opts)) "-" (name (:nemesis opts))) + {:name (str "clickhouse-keeper-" (name (:workload opts)) "-" (name (:nemesis opts))) :os ubuntu/os :db (get-db opts) :pure-generators true From 9acb2533341c72a42041717528f414bef5a24ff8 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 1 Sep 2022 08:44:20 +0000 Subject: [PATCH 35/74] Update NuRaft --- contrib/NuRaft | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/NuRaft b/contrib/NuRaft index dc63ab3cdc1..362a8442dad 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit dc63ab3cdc1233ae6d6f343c42e52a9fc50b9e39 +Subproject commit 362a8442dadcd230febfa7f19b86c41765c45575 From fa4c7259f9b991f983dc90ff5c752a9c83bd4272 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 29 Aug 2022 13:56:51 +0200 Subject: [PATCH 36/74] Fix typos --- src/Coordination/CoordinationSettings.cpp | 2 +- src/Coordination/CoordinationSettings.h | 2 +- src/Coordination/KeeperDispatcher.cpp | 2 +- src/Coordination/KeeperDispatcher.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Coordination/CoordinationSettings.cpp b/src/Coordination/CoordinationSettings.cpp index 3bc611809f4..f634bcbb281 100644 --- a/src/Coordination/CoordinationSettings.cpp +++ b/src/Coordination/CoordinationSettings.cpp @@ -190,7 +190,7 @@ KeeperConfigurationAndSettings::loadFromConfig(const Poco::Util::AbstractConfigu ret->coordination_settings->loadFromConfig("keeper_server.coordination_settings", config); if (ret->coordination_settings->quorum_reads) - LOG_WARNING(&Poco::Logger::get("KeeperConfigurationAndSettings"), "Setting 'quorum_reads' is depricated. Please use 'read_mode'"); + LOG_WARNING(&Poco::Logger::get("KeeperConfigurationAndSettings"), "Setting 'quorum_reads' is deprecated. Please use 'read_mode'"); return ret; } diff --git a/src/Coordination/CoordinationSettings.h b/src/Coordination/CoordinationSettings.h index 1a7cb7b11fb..971a0344e49 100644 --- a/src/Coordination/CoordinationSettings.h +++ b/src/Coordination/CoordinationSettings.h @@ -38,7 +38,7 @@ struct Settings; M(UInt64, stale_log_gap, 10000, "When node became stale and should receive snapshots from leader", 0) \ M(UInt64, fresh_log_gap, 200, "When node became fresh", 0) \ M(UInt64, max_requests_batch_size, 100, "Max size of batch in requests count before it will be sent to RAFT", 0) \ - M(Bool, quorum_reads, false, "Depricated - use read_mode. Execute read requests as writes through whole RAFT consesus with similar speed", 0) \ + M(Bool, quorum_reads, false, "Deprecated - use read_mode. Execute read requests as writes through whole RAFT consesus with similar speed", 0) \ M(Bool, force_sync, true, "Call fsync on each change in RAFT changelog", 0) \ M(Bool, compress_logs, true, "Write compressed coordination logs in ZSTD format", 0) \ M(Bool, compress_snapshots_with_zstd_format, true, "Write compressed snapshots in ZSTD format (instead of custom LZ4)", 0) \ diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 987fcb274ce..9b2f4e5f74e 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -45,7 +45,7 @@ KeeperDispatcher::KeeperDispatcher() /// - all requests from single session need to be processed in the order of their arrival /// /// Because of that, we cannot process read and write requests from SAME session at the same time. -/// To be able to process read and write reqeusts in parallel we need to make sure that only 1 type +/// To be able to process read and write requests in parallel we need to make sure that only 1 type /// of request is being processed from a single session. /// Multiple types from different sessions can be processed at the same time. /// diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index 851f0165ae3..ae3334f17ec 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -104,7 +104,7 @@ private: std::list request_queue; }; - // Called every time a batch of reqeusts are processed. + // Called every time a batch of requests are processed. void finalizeRequests(const KeeperStorage::RequestsForSessions & requests_for_sessions); std::unordered_map unprocessed_requests_for_session; From 310e1484f6301097698a025d0346adce55ad80a4 Mon Sep 17 00:00:00 2001 From: Aleksandr Musorin Date: Thu, 1 Sep 2022 13:28:56 +0200 Subject: [PATCH 37/74] docs - updated optional parameters for table functions --- docs/en/sql-reference/table-functions/file.md | 2 +- docs/en/sql-reference/table-functions/s3.md | 2 +- docs/en/sql-reference/table-functions/s3Cluster.md | 2 +- docs/en/sql-reference/table-functions/url.md | 2 +- docs/ru/sql-reference/table-functions/file.md | 2 +- docs/ru/sql-reference/table-functions/s3.md | 2 +- docs/ru/sql-reference/table-functions/s3Cluster.md | 2 +- docs/ru/sql-reference/table-functions/url.md | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index a110bfbd15c..f40107aaaca 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -13,7 +13,7 @@ Creates a table from a file. This table function is similar to [url](../../sql-r **Syntax** ``` sql -file(path, format, structure) +file(path [,format] [,structure]) ``` **Parameters** diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 2df7d6e46b3..545037665bb 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -11,7 +11,7 @@ Provides table-like interface to select/insert files in [Amazon S3](https://aws. **Syntax** ``` sql -s3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compression]) +s3(path [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression]) ``` **Arguments** diff --git a/docs/en/sql-reference/table-functions/s3Cluster.md b/docs/en/sql-reference/table-functions/s3Cluster.md index ec6a73e4cbb..dab76ade780 100644 --- a/docs/en/sql-reference/table-functions/s3Cluster.md +++ b/docs/en/sql-reference/table-functions/s3Cluster.md @@ -11,7 +11,7 @@ Allows processing files from [Amazon S3](https://aws.amazon.com/s3/) in parallel **Syntax** ``` sql -s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, structure) +s3Cluster(cluster_name, source, [,access_key_id, secret_access_key] [,format] [,structure]) ``` **Arguments** diff --git a/docs/en/sql-reference/table-functions/url.md b/docs/en/sql-reference/table-functions/url.md index f1ed7b4dfe4..014dc3ae853 100644 --- a/docs/en/sql-reference/table-functions/url.md +++ b/docs/en/sql-reference/table-functions/url.md @@ -13,7 +13,7 @@ sidebar_label: url **Syntax** ``` sql -url(URL, format, structure) +url(URL [,format] [,structure]) ``` **Parameters** diff --git a/docs/ru/sql-reference/table-functions/file.md b/docs/ru/sql-reference/table-functions/file.md index 1f262c9403a..df35a1c4ac0 100644 --- a/docs/ru/sql-reference/table-functions/file.md +++ b/docs/ru/sql-reference/table-functions/file.md @@ -13,7 +13,7 @@ sidebar_label: file **Синтаксис** ``` sql -file(path, format, structure) +file(path [,format] [,structure]) ``` **Параметры** diff --git a/docs/ru/sql-reference/table-functions/s3.md b/docs/ru/sql-reference/table-functions/s3.md index ae0419a4b84..14c8204fd1d 100644 --- a/docs/ru/sql-reference/table-functions/s3.md +++ b/docs/ru/sql-reference/table-functions/s3.md @@ -11,7 +11,7 @@ sidebar_label: s3 **Синтаксис** ``` sql -s3(path, [aws_access_key_id, aws_secret_access_key,] format, structure, [compression]) +s3(path [,aws_access_key_id, aws_secret_access_key] [,format] [,structure] [,compression]) ``` **Aргументы** diff --git a/docs/ru/sql-reference/table-functions/s3Cluster.md b/docs/ru/sql-reference/table-functions/s3Cluster.md index e6b317253c0..1c12913fabe 100644 --- a/docs/ru/sql-reference/table-functions/s3Cluster.md +++ b/docs/ru/sql-reference/table-functions/s3Cluster.md @@ -11,7 +11,7 @@ sidebar_label: s3Cluster **Синтаксис** ``` sql -s3Cluster(cluster_name, source, [access_key_id, secret_access_key,] format, structure) +s3Cluster(cluster_name, source, [,access_key_id, secret_access_key] [,format] [,structure]) ``` **Аргументы** diff --git a/docs/ru/sql-reference/table-functions/url.md b/docs/ru/sql-reference/table-functions/url.md index d4fb11b0de7..e5d9faeec00 100644 --- a/docs/ru/sql-reference/table-functions/url.md +++ b/docs/ru/sql-reference/table-functions/url.md @@ -13,7 +13,7 @@ sidebar_label: url **Синтаксис** ``` sql -url(URL, format, structure) +url(URL [,format] [,structure]) ``` **Параметры** From f0ba3ee92702ded969a2fe4b839b2ecdfde2c101 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Mon, 5 Sep 2022 09:51:02 +0200 Subject: [PATCH 38/74] Fix typo --- src/Coordination/KeeperDispatcher.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 9b2f4e5f74e..847aeb2e02e 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -53,7 +53,7 @@ KeeperDispatcher::KeeperDispatcher() /// When a request is received from a client, we check if there are requests being processed from that same /// session, and if yes, of what type. If the types are the same, and there are no requests of different /// type inbetetween, we can instanly add it to active request queue. Otherwise, we need to wait until -/// all requests of the other tpye are processed. +/// all requests of the other type are processed. /// /// There are multiple threads used for processing the request, each of them communicating with a queue. /// Assumption: only one type of request is being processed from a same session at any point in time (read or write). From 951ee4422904eda4c31b14194305495437aebb58 Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Wed, 7 Sep 2022 11:51:56 +0000 Subject: [PATCH 39/74] Process read requests with snapshot install --- src/Coordination/KeeperDispatcher.cpp | 45 ++++++++++++++++++++++++- src/Coordination/KeeperDispatcher.h | 5 ++- src/Coordination/KeeperServer.cpp | 6 ++-- src/Coordination/KeeperServer.h | 3 +- src/Coordination/KeeperStateMachine.cpp | 5 ++- src/Coordination/KeeperStateMachine.h | 8 +++-- 6 files changed, 64 insertions(+), 8 deletions(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index 847aeb2e02e..40f5f539b4e 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -470,7 +471,15 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf read_request_thread = ThreadFromGlobalPool([this] { readRequestThread(); }); finalize_requests_thread = ThreadFromGlobalPool([this] { finalizeRequestsThread(); }); - server = std::make_unique(configuration_and_settings, config, responses_queue, snapshots_queue, [this](const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx) { onRequestCommit(request_for_session, log_term, log_idx); }); + server = std::make_unique( + configuration_and_settings, + config, + responses_queue, + snapshots_queue, + [this](const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx) + { onRequestCommit(request_for_session, log_term, log_idx); }, + [this](uint64_t term, uint64_t last_idx) + { onApplySnapshot(term, last_idx); }); try { @@ -878,6 +887,40 @@ void KeeperDispatcher::onRequestCommit(const KeeperStorage::RequestForSession & throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); } +/// Process all read request that are waiting for lower or currently last processed log index +void KeeperDispatcher::onApplySnapshot(uint64_t term, uint64_t last_idx) +{ + KeeperServer::NodeInfo current_node_info{term, last_idx}; + KeeperStorage::RequestsForSessions requests; + { + std::lock_guard lock(leader_waiter_mutex); + for (auto leader_waiter_it = leader_waiters.begin(); leader_waiter_it != leader_waiters.end();) + { + auto waiting_node_info = leader_waiter_it->first; + if (waiting_node_info.term <= current_node_info.term + && waiting_node_info.last_committed_index <= current_node_info.last_committed_index) + { + for (auto & request : leader_waiter_it->second) + { + requests.push_back(std::move(request)); + } + + leader_waiter_it = leader_waiters.erase(leader_waiter_it); + } + else + { + ++leader_waiter_it; + } + } + } + + if (requests.empty()) + return; + + if (!read_requests_queue.push(std::move(requests))) + throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); +} + bool KeeperDispatcher::isServerActive() const { return checkInit() && hasLeader() && !server->isRecovering(); diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index ae3334f17ec..0ebe67a4f39 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -86,7 +86,7 @@ private: /// at the time the request was being made. /// If the node is stale, we need to wait to commit that log before doing local read requests to achieve /// linearizability. - std::unordered_map> leader_waiters; + std::unordered_map leader_waiters; std::mutex leader_waiter_mutex; /// We can be actively processing one type of requests (either read or write) from a single session. @@ -156,6 +156,9 @@ public: /// Called when a single log with request is committed. void onRequestCommit(const KeeperStorage::RequestForSession & request_for_session, uint64_t log_term, uint64_t log_idx); + /// Called when a snapshot is applied + void onApplySnapshot(uint64_t term, uint64_t last_idx); + /// Is server accepting requests, i.e. connected to the cluster /// and achieved quorum bool isServerActive() const; diff --git a/src/Coordination/KeeperServer.cpp b/src/Coordination/KeeperServer.cpp index 4b6b3826834..4ff19f9df2e 100644 --- a/src/Coordination/KeeperServer.cpp +++ b/src/Coordination/KeeperServer.cpp @@ -106,7 +106,8 @@ KeeperServer::KeeperServer( const Poco::Util::AbstractConfiguration & config, ResponsesQueue & responses_queue_, SnapshotsQueue & snapshots_queue_, - KeeperStateMachine::CommitCallback commit_callback) + KeeperStateMachine::CommitCallback commit_callback, + KeeperStateMachine::ApplySnapshotCallback apply_snapshot_callback) : server_id(configuration_and_settings_->server_id) , coordination_settings(configuration_and_settings_->coordination_settings) , log(&Poco::Logger::get("KeeperServer")) @@ -127,7 +128,8 @@ KeeperServer::KeeperServer( coordination_settings, keeper_context, checkAndGetSuperdigest(configuration_and_settings_->super_digest), - std::move(commit_callback)); + std::move(commit_callback), + std::move(apply_snapshot_callback)); state_manager = nuraft::cs_new( server_id, diff --git a/src/Coordination/KeeperServer.h b/src/Coordination/KeeperServer.h index 7207af3e4a2..02ab643044a 100644 --- a/src/Coordination/KeeperServer.h +++ b/src/Coordination/KeeperServer.h @@ -72,7 +72,8 @@ public: const Poco::Util::AbstractConfiguration & config_, ResponsesQueue & responses_queue_, SnapshotsQueue & snapshots_queue_, - KeeperStateMachine::CommitCallback commit_callback); + KeeperStateMachine::CommitCallback commit_callback, + KeeperStateMachine::ApplySnapshotCallback apply_snapshot_callback); /// Load state machine from the latest snapshot and load log storage. Start NuRaft with required settings. void startup(const Poco::Util::AbstractConfiguration & config, bool enable_ipv6 = true); diff --git a/src/Coordination/KeeperStateMachine.cpp b/src/Coordination/KeeperStateMachine.cpp index dc52ad019cd..8a002279096 100644 --- a/src/Coordination/KeeperStateMachine.cpp +++ b/src/Coordination/KeeperStateMachine.cpp @@ -45,7 +45,8 @@ KeeperStateMachine::KeeperStateMachine( const CoordinationSettingsPtr & coordination_settings_, const KeeperContextPtr & keeper_context_, const std::string & superdigest_, - CommitCallback commit_callback_) + CommitCallback commit_callback_, + ApplySnapshotCallback apply_snapshot_callback_) : coordination_settings(coordination_settings_) , snapshot_manager( snapshots_path_, @@ -60,6 +61,7 @@ KeeperStateMachine::KeeperStateMachine( , log(&Poco::Logger::get("KeeperStateMachine")) , superdigest(superdigest_) , commit_callback(std::move(commit_callback_)) + , apply_snapshot_callback(std::move(apply_snapshot_callback_)) , keeper_context(keeper_context_) { } @@ -303,6 +305,7 @@ bool KeeperStateMachine::apply_snapshot(nuraft::snapshot & s) ProfileEvents::increment(ProfileEvents::KeeperSnapshotApplys); last_committed_idx = s.get_last_log_idx(); + apply_snapshot_callback(s.get_last_log_term(), s.get_last_log_idx()); return true; } diff --git a/src/Coordination/KeeperStateMachine.h b/src/Coordination/KeeperStateMachine.h index de8f30437c0..7310eadd19a 100644 --- a/src/Coordination/KeeperStateMachine.h +++ b/src/Coordination/KeeperStateMachine.h @@ -21,6 +21,7 @@ class KeeperStateMachine : public nuraft::state_machine { public: using CommitCallback = std::function; + using ApplySnapshotCallback = std::function; KeeperStateMachine( ResponsesQueue & responses_queue_, @@ -29,7 +30,8 @@ public: const CoordinationSettingsPtr & coordination_settings_, const KeeperContextPtr & keeper_context_, const std::string & superdigest_ = "", - CommitCallback commit_callback_ = [](const KeeperStorage::RequestForSession &, uint64_t, uint64_t){}); + CommitCallback commit_callback_ = [](const KeeperStorage::RequestForSession &, uint64_t, uint64_t){}, + ApplySnapshotCallback apply_snapshot_callback_ = [](uint64_t, uint64_t){}); /// Read state from the latest snapshot void init(); @@ -148,8 +150,10 @@ private: /// Special part of ACL system -- superdigest specified in server config. const std::string superdigest; - // call when a request is committed + /// call when a request is committed const CommitCallback commit_callback; + /// call when snapshot is applied + const ApplySnapshotCallback apply_snapshot_callback; KeeperContextPtr keeper_context; }; From ced30a2072cdd098ca07f061161a8697b723203b Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 6 Sep 2022 14:12:05 +0200 Subject: [PATCH 40/74] Switch to llvm/clang 15 It had been released few hours ago, and I want to check how clang-15 generates DWARF-5 Signed-off-by: Azat Khuzhin --- cmake/clang_tidy.cmake | 2 +- docker/packager/binary/Dockerfile | 19 ---------------- docker/packager/packager | 17 +++++++-------- docker/test/base/Dockerfile | 9 ++++---- docker/test/codebrowser/Dockerfile | 35 +++++++++++++++++++++++++----- docker/test/fuzzer/run-fuzzer.sh | 2 +- docker/test/keeper-jepsen/run.sh | 2 +- docker/test/util/Dockerfile | 4 +++- tests/ci/ci_config.py | 32 +++++++++++++-------------- 9 files changed, 64 insertions(+), 58 deletions(-) diff --git a/cmake/clang_tidy.cmake b/cmake/clang_tidy.cmake index fc25c68b11a..200282234ca 100644 --- a/cmake/clang_tidy.cmake +++ b/cmake/clang_tidy.cmake @@ -3,7 +3,7 @@ option (ENABLE_CLANG_TIDY "Use clang-tidy static analyzer" OFF) if (ENABLE_CLANG_TIDY) - find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-14" "clang-tidy-13" "clang-tidy-12") + find_program (CLANG_TIDY_PATH NAMES "clang-tidy" "clang-tidy-15" "clang-tidy-14" "clang-tidy-13" "clang-tidy-12") if (CLANG_TIDY_PATH) message(STATUS diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index b9b0c5c2c6c..c4244504923 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -67,24 +67,5 @@ ENV GOCACHE=/workdir/ RUN mkdir /workdir && chmod 777 /workdir WORKDIR /workdir -# NOTE: thread sanitizer is broken in clang-14, we have to build it with clang-15 -# https://github.com/ClickHouse/ClickHouse/pull/39450 -# https://github.com/google/sanitizers/issues/1540 -# https://github.com/google/sanitizers/issues/1552 - -RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \ - && echo "deb [trusted=yes] https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-15 main" >> \ - /etc/apt/sources.list.d/clang.list \ - && apt-get update \ - && apt-get install \ - clang-15 \ - llvm-15 \ - clang-tidy-15 \ - --yes --no-install-recommends \ - && apt-get clean - -# for external_symbolizer_path -RUN ln -s /usr/bin/llvm-symbolizer-15 /usr/bin/llvm-symbolizer - COPY build.sh / CMD ["bash", "-c", "/build.sh 2>&1"] diff --git a/docker/packager/packager b/docker/packager/packager index 591262959b4..9da787e9006 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -339,17 +339,16 @@ if __name__ == "__main__": parser.add_argument( "--compiler", choices=( - "clang-15", # For TSAN builds, see #39450 - "clang-14", - "clang-14-darwin", - "clang-14-darwin-aarch64", - "clang-14-aarch64", - "clang-14-ppc64le", - "clang-14-amd64sse2", - "clang-14-freebsd", + "clang-15", + "clang-15-darwin", + "clang-15-darwin-aarch64", + "clang-15-aarch64", + "clang-15-ppc64le", + "clang-15-amd64sse2", + "clang-15-freebsd", "gcc-11", ), - default="clang-14", + default="clang-15", help="a compiler to use", ) parser.add_argument( diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index 43cfca1fdfc..4e42fce1a1d 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -16,11 +16,10 @@ RUN apt-get update \ # and MEMORY_LIMIT_EXCEEDED exceptions in Functional tests (total memory limit in Functional tests is ~55.24 GiB). # TSAN will flush shadow memory when reaching this limit. # It may cause false-negatives, but it's better than OOM. -RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 history_size=7 memory_limit_mb=46080'" >> /etc/environment; \ - echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment; \ - echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'" >> /etc/environment; \ - echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt'" >> /etc/environment; \ - ln -s /usr/lib/llvm-${LLVM_VERSION}/bin/llvm-symbolizer /usr/bin/llvm-symbolizer; +RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 history_size=7 memory_limit_mb=46080'" >> /etc/environment +RUN echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment +RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'" >> /etc/environment +RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt'" >> /etc/environment # Sanitizer options for current shell (not current, but the one that will be spawned on "docker run") # (but w/o verbosity for TSAN, otherwise test.reference will not match) ENV TSAN_OPTIONS='halt_on_error=1 history_size=7 memory_limit_mb=46080' diff --git a/docker/test/codebrowser/Dockerfile b/docker/test/codebrowser/Dockerfile index c7aed618f6a..ceed93c3ac7 100644 --- a/docker/test/codebrowser/Dockerfile +++ b/docker/test/codebrowser/Dockerfile @@ -8,16 +8,41 @@ FROM clickhouse/binary-builder:$FROM_TAG ARG apt_archive="http://archive.ubuntu.com" RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list -RUN apt-get update && apt-get --yes --allow-unauthenticated install clang-14 libllvm14 libclang-14-dev libmlir-14-dev +RUN apt-get update && apt-get --yes --allow-unauthenticated install libclang-${LLVM_VERSION}-dev libmlir-${LLVM_VERSION}-dev + +# libclang-15-dev does not contain proper symlink: +# +# This is what cmake will search for: +# +# # readlink -f /usr/lib/llvm-15/lib/libclang-15.so.1 +# /usr/lib/x86_64-linux-gnu/libclang-15.so.1 +# +# This is what exists: +# +# # ls -l /usr/lib/x86_64-linux-gnu/libclang-15* +# lrwxrwxrwx 1 root root 16 Sep 5 13:31 /usr/lib/x86_64-linux-gnu/libclang-15.so -> libclang-15.so.1 +# lrwxrwxrwx 1 root root 21 Sep 5 13:31 /usr/lib/x86_64-linux-gnu/libclang-15.so.15 -> libclang-15.so.15.0.0 +# -rw-r--r-- 1 root root 31835760 Sep 5 13:31 /usr/lib/x86_64-linux-gnu/libclang-15.so.15.0.0 +# +ARG TARGETARCH +RUN arch=${TARGETARCH:-amd64} \ + && case $arch in \ + amd64) rarch=x86_64 ;; \ + arm64) rarch=aarch64 ;; \ + *) exit 1 ;; \ + esac \ + && ln -rsf /usr/lib/$rarch-linux-gnu/libclang-15.so.15 /usr/lib/$rarch-linux-gnu/libclang-15.so.1 # repo versions doesn't work correctly with C++17 # also we push reports to s3, so we add index.html to subfolder urls # https://github.com/ClickHouse-Extras/woboq_codebrowser/commit/37e15eaf377b920acb0b48dbe82471be9203f76b # TODO: remove branch in a few weeks after merge, e.g. in May or June 2022 -RUN git clone https://github.com/ClickHouse-Extras/woboq_codebrowser --branch llvm-14 \ +# +# FIXME: update location of a repo +RUN git clone https://github.com/azat/woboq_codebrowser --branch llvm-15 \ && cd woboq_codebrowser \ - && cmake . -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-14 -DCMAKE_C_COMPILER=clang-14 \ - && make -j \ + && cmake . -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-${LLVM_VERSION} -DCMAKE_C_COMPILER=clang-${LLVM_VERSION} \ + && ninja \ && cd .. \ && rm -rf woboq_codebrowser @@ -32,7 +57,7 @@ ENV SHA=nosha ENV DATA="https://s3.amazonaws.com/clickhouse-test-reports/codebrowser/data" CMD mkdir -p $BUILD_DIRECTORY && cd $BUILD_DIRECTORY && \ - cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-14 -DCMAKE_C_COMPILER=/usr/bin/clang-14 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_S3=0 && \ + cmake $SOURCE_DIRECTORY -DCMAKE_CXX_COMPILER=/usr/bin/clang\+\+-${LLVM_VERSION} -DCMAKE_C_COMPILER=/usr/bin/clang-${LLVM_VERSION} -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_S3=0 && \ mkdir -p $HTML_RESULT_DIRECTORY && \ $CODEGEN -b $BUILD_DIRECTORY -a -o $HTML_RESULT_DIRECTORY -p ClickHouse:$SOURCE_DIRECTORY:$SHA -d $DATA | ts '%Y-%m-%d %H:%M:%S' && \ cp -r $STATIC_DATA $HTML_RESULT_DIRECTORY/ &&\ diff --git a/docker/test/fuzzer/run-fuzzer.sh b/docker/test/fuzzer/run-fuzzer.sh index 93e38260395..bab87865b42 100755 --- a/docker/test/fuzzer/run-fuzzer.sh +++ b/docker/test/fuzzer/run-fuzzer.sh @@ -19,7 +19,7 @@ stage=${stage:-} script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" echo "$script_dir" repo_dir=ch -BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-14_debug_none_unsplitted_disable_False_binary"} +BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-15_debug_none_unsplitted_disable_False_binary"} BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"} function clone diff --git a/docker/test/keeper-jepsen/run.sh b/docker/test/keeper-jepsen/run.sh index c43e6b2c54d..adf99c029a9 100644 --- a/docker/test/keeper-jepsen/run.sh +++ b/docker/test/keeper-jepsen/run.sh @@ -2,7 +2,7 @@ set -euo pipefail -CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-14_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"} +CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-15_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"} CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""} diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile index b891b71492c..57880bfc1d6 100644 --- a/docker/test/util/Dockerfile +++ b/docker/test/util/Dockerfile @@ -5,7 +5,7 @@ FROM ubuntu:20.04 ARG apt_archive="http://archive.ubuntu.com" RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list -ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=14 +ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=15 RUN apt-get update \ && apt-get install \ @@ -56,6 +56,8 @@ RUN apt-get update \ # This symlink required by gcc to find lld compiler RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld +# for external_symbolizer_path +RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer ARG CCACHE_VERSION=4.6.1 RUN mkdir /tmp/ccache \ diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index fa68d1982d2..142cecc0b19 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -8,7 +8,7 @@ BuildConfig = Dict[str, ConfValue] CI_CONFIG = { "build_config": { "package_release": { - "compiler": "clang-14", + "compiler": "clang-15", "build_type": "", "sanitizer": "", "package_type": "deb", @@ -19,7 +19,7 @@ CI_CONFIG = { "with_coverage": False, }, "coverity": { - "compiler": "clang-14", + "compiler": "clang-15", "build_type": "", "sanitizer": "", "package_type": "coverity", @@ -29,7 +29,7 @@ CI_CONFIG = { "official": False, }, "package_aarch64": { - "compiler": "clang-14-aarch64", + "compiler": "clang-15-aarch64", "build_type": "", "sanitizer": "", "package_type": "deb", @@ -40,7 +40,7 @@ CI_CONFIG = { "with_coverage": False, }, "package_asan": { - "compiler": "clang-14", + "compiler": "clang-15", "build_type": "", "sanitizer": "address", "package_type": "deb", @@ -49,7 +49,7 @@ CI_CONFIG = { "with_coverage": False, }, "package_ubsan": { - "compiler": "clang-14", + "compiler": "clang-15", "build_type": "", "sanitizer": "undefined", "package_type": "deb", @@ -67,7 +67,7 @@ CI_CONFIG = { "with_coverage": False, }, "package_msan": { - "compiler": "clang-14", + "compiler": "clang-15", "build_type": "", "sanitizer": "memory", "package_type": "deb", @@ -76,7 +76,7 @@ CI_CONFIG = { "with_coverage": False, }, "package_debug": { - "compiler": "clang-14", + "compiler": "clang-15", "build_type": "debug", "sanitizer": "", "package_type": "deb", @@ -85,7 +85,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_release": { - "compiler": "clang-14", + "compiler": "clang-15", "build_type": "", "sanitizer": "", "package_type": "binary", @@ -94,7 +94,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_tidy": { - "compiler": "clang-14", + "compiler": "clang-15", "build_type": "debug", "sanitizer": "", "package_type": "binary", @@ -104,7 +104,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_shared": { - "compiler": "clang-14", + "compiler": "clang-15", "build_type": "", "sanitizer": "", "package_type": "binary", @@ -113,7 +113,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_darwin": { - "compiler": "clang-14-darwin", + "compiler": "clang-15-darwin", "build_type": "", "sanitizer": "", "package_type": "binary", @@ -123,7 +123,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_aarch64": { - "compiler": "clang-14-aarch64", + "compiler": "clang-15-aarch64", "build_type": "", "sanitizer": "", "package_type": "binary", @@ -132,7 +132,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_freebsd": { - "compiler": "clang-14-freebsd", + "compiler": "clang-15-freebsd", "build_type": "", "sanitizer": "", "package_type": "binary", @@ -142,7 +142,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_darwin_aarch64": { - "compiler": "clang-14-darwin-aarch64", + "compiler": "clang-15-darwin-aarch64", "build_type": "", "sanitizer": "", "package_type": "binary", @@ -152,7 +152,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_ppc64le": { - "compiler": "clang-14-ppc64le", + "compiler": "clang-15-ppc64le", "build_type": "", "sanitizer": "", "package_type": "binary", @@ -162,7 +162,7 @@ CI_CONFIG = { "with_coverage": False, }, "binary_amd64sse2": { - "compiler": "clang-14-amd64sse2", + "compiler": "clang-15-amd64sse2", "build_type": "", "sanitizer": "", "package_type": "binary", From 50cc4a6a1fb0ab7ca5773f7465e9252afb425deb Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 10 Sep 2022 21:29:59 +0200 Subject: [PATCH 41/74] Update llvm to add ability to preserve debug symbols There are some issues with JIT, you will find details in a patch that disables one function for it, and it will be great to have an ability to get full backtrace for this problem during investigations. Refs: https://github.com/ClickHouse/llvm/pull/9 Signed-off-by: Azat Khuzhin --- contrib/llvm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/llvm b/contrib/llvm index 20607e61728..0db5bf5bd24 160000 --- a/contrib/llvm +++ b/contrib/llvm @@ -1 +1 @@ -Subproject commit 20607e61728e97c969e536644c3c0c1bb1a50672 +Subproject commit 0db5bf5bd2452cd8f1283a1fcdc04845af705bfc From e1cfde8a8635ad683938a91623ca3d6ef47e70a0 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 10 Sep 2022 21:33:15 +0200 Subject: [PATCH 42/74] Update librdkafka for clang-15 (do not set _POSIX_C_SOURCE on FreeBSD) Refs: https://github.com/ClickHouse/librdkafka/pull/7 Signed-off-by: Azat Khuzhin --- contrib/librdkafka | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/librdkafka b/contrib/librdkafka index ff32b4e9eea..6f3b483426a 160000 --- a/contrib/librdkafka +++ b/contrib/librdkafka @@ -1 +1 @@ -Subproject commit ff32b4e9eeafd0b276f010ee969179e4e9e6d0b2 +Subproject commit 6f3b483426a8c8ec950e27e446bec175cf8b553f From aed0897e778a5ef3a06fada7d8159fa10bc666c5 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 10 Sep 2022 21:36:23 +0200 Subject: [PATCH 43/74] Update libcpuid for clang-15 (for snprintf()) CI: https://s3.amazonaws.com/clickhouse-builds/41046/0e9265ad951d40cdce3716fb8a679360b2e0c156/binary_freebsd/build_log.log Refs: https://github.com/ClickHouse/libcpuid/pull/2 Signed-off-by: Azat Khuzhin --- contrib/libcpuid | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/libcpuid b/contrib/libcpuid index 8db3b8d2d32..503083acb77 160000 --- a/contrib/libcpuid +++ b/contrib/libcpuid @@ -1 +1 @@ -Subproject commit 8db3b8d2d32d22437f063ce692a1b9bb15e42d18 +Subproject commit 503083acb77edf9fbce22a05826307dff2ce96e6 From 32d9bb5a7e090a11cc3eb882a6f13c737882bd82 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 10 Sep 2022 21:34:15 +0200 Subject: [PATCH 44/74] Update sentry for clang-15 (to fix __msan_unpoison()) CI: https://s3.amazonaws.com/clickhouse-builds/41046/0e9265ad951d40cdce3716fb8a679360b2e0c156/package_msan/build_log.log Refs: https://github.com/ClickHouse/sentry-native/pull/5 Signed-off-by: Azat Khuzhin --- contrib/sentry-native | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/sentry-native b/contrib/sentry-native index f431047ac8d..ae10fb8c224 160000 --- a/contrib/sentry-native +++ b/contrib/sentry-native @@ -1 +1 @@ -Subproject commit f431047ac8da13179c488018dddf1c0d0771a997 +Subproject commit ae10fb8c224c3f41571446e1ed7fd57b9e5e366b From 64bb4904606ece2dbbcb55799161f7c85fbafa3a Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 6 Sep 2022 21:51:47 +0200 Subject: [PATCH 45/74] Add a note about why sentry turned off for musl Signed-off-by: Azat Khuzhin --- cmake/target.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/cmake/target.cmake b/cmake/target.cmake index 0fb5e8a20de..ae360758701 100644 --- a/cmake/target.cmake +++ b/cmake/target.cmake @@ -45,6 +45,7 @@ if (CMAKE_CROSSCOMPILING) endif () if (USE_MUSL) + # use of undeclared identifier 'PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP' set (ENABLE_SENTRY OFF CACHE INTERNAL "") set (ENABLE_ODBC OFF CACHE INTERNAL "") set (ENABLE_GRPC OFF CACHE INTERNAL "") From 95b3a02779f6a2b610ec02ce1dcc346c27c6242d Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 6 Sep 2022 21:42:19 +0200 Subject: [PATCH 46/74] Fix strerror_r() configuration for jemalloc with musl under clang-15 Build error [1]: Sep 06 18:40:53 FAILED: contrib/jemalloc-cmake/CMakeFiles/_jemalloc.dir/__/jemalloc/src/malloc_io.c.o Sep 06 18:40:53 /usr/bin/ccache /usr/bin/clang-15 --target=x86_64-linux-musl --sysroot=/build/cmake/linux/../../contrib/sysroot/linux-x86_64-musl -DHAS_RESERVED_IDENTIFIER -DJEMALLOC_NO_PRIVATE_NAMESPACE -DJEMALLOC_PROF=1 -DJEMALLOC_PROF_LIBGCC=1 -DSTD_EXCEPTION_HAS_STACK_TRACE=1 -DUSE_MUSL=1 -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -D_LIBCPP_HAS_MUSL_LIBC=1 -I../contrib/jemalloc/include -isystem ../contrib/jemalloc-cmake/include -isystem contrib/jemalloc-cmake/include_linux_x86_64_musl/jemalloc/internal -isystem ../contrib/libcxx/include -isystem ../contrib/libcxxabi/include -isystem ../contrib/libunwind/include --gcc-toolchain=/build/cmake/linux/../../contrib/sysroot/linux-x86_64-musl --gcc-toolchain=/build/cmake/linux/../../contrib/sysroot/linux-x86_64-musl -fdiagnostics-color=always -Xclang -fuse-ctor-homing -gdwarf-aranges -pipe -mssse3 -msse4.1 -msse4.2 -mpclmul -mpopcnt -fasynchronous-unwind-tables -ffile-prefix-map=/build=. -falign-functions=32 -mbranches-within-32B-boundaries -fdiagnostics-absolute-paths -fexperimental-new-pass-manager -w -O2 -g -DNDEBUG -O3 -g -gdwarf-4 -flto=thin -fwhole-program-vtables -fno-pie -D OS_LINUX -D_GNU_SOURCE -Werror -std=gnu11 -MD -MT contrib/jemalloc-cmake/CMakeFiles/_jemalloc.dir/__/jemalloc/src/malloc_io.c.o -MF contrib/jemalloc-cmake/CMakeFiles/_jemalloc.dir/__/jemalloc/src/malloc_io.c.o.d -o contrib/jemalloc-cmake/CMakeFiles/_jemalloc.dir/__/jemalloc/src/malloc_io.c.o -c ../contrib/jemalloc/src/malloc_io.c Sep 06 18:40:53 /build/contrib/jemalloc/src/malloc_io.c:100:8: error: incompatible integer to pointer conversion initializing 'char *' with an expression of type 'int' [-Wint-conversion] Sep 06 18:40:53 char *b = strerror_r(err, buf, buflen); Sep 06 18:40:53 ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sep 06 18:40:53 1 error generated. [1]: https://s3.amazonaws.com/clickhouse-builds/41046/0e9265ad951d40cdce3716fb8a679360b2e0c156/package_release/build_log.log Signed-off-by: Azat Khuzhin --- .../jemalloc/internal/jemalloc_internal_defs.h.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/jemalloc-cmake/include_linux_x86_64_musl/jemalloc/internal/jemalloc_internal_defs.h.in b/contrib/jemalloc-cmake/include_linux_x86_64_musl/jemalloc/internal/jemalloc_internal_defs.h.in index ff97d297d8f..e08a2bed2ec 100644 --- a/contrib/jemalloc-cmake/include_linux_x86_64_musl/jemalloc/internal/jemalloc_internal_defs.h.in +++ b/contrib/jemalloc-cmake/include_linux_x86_64_musl/jemalloc/internal/jemalloc_internal_defs.h.in @@ -415,7 +415,7 @@ /* * Defined if strerror_r returns char * if _GNU_SOURCE is defined. */ -#define JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE +/* #undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE */ /* Performs additional safety checks when defined. */ /* #undef JEMALLOC_OPT_SAFETY_CHECKS */ From 0bb835c66e1e7c769252e03f55b9f2d5fedb1043 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 6 Sep 2022 21:35:12 +0200 Subject: [PATCH 47/74] Add a workaround for clang-15 in libpq for strerror_r() Signed-off-by: Azat Khuzhin --- contrib/libpq-cmake/CMakeLists.txt | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/contrib/libpq-cmake/CMakeLists.txt b/contrib/libpq-cmake/CMakeLists.txt index 280c0381393..b62186159f3 100644 --- a/contrib/libpq-cmake/CMakeLists.txt +++ b/contrib/libpq-cmake/CMakeLists.txt @@ -63,6 +63,13 @@ target_include_directories (_libpq SYSTEM PUBLIC ${LIBPQ_SOURCE_DIR}) target_include_directories (_libpq SYSTEM PUBLIC "${LIBPQ_SOURCE_DIR}/include") target_include_directories (_libpq SYSTEM PRIVATE "${LIBPQ_SOURCE_DIR}/configs") +# NOTE: this is a dirty hack to avoid and instead pg_config.h should be shipped +# for different OS'es like for jemalloc, not one generic for all OS'es like +# now. +if (OS_DARWIN OR USE_MUSL) + target_compile_definitions(_libpq PRIVATE -DSTRERROR_R_INT=1) +endif() + target_link_libraries (_libpq PRIVATE OpenSSL::SSL) add_library(ch_contrib::libpq ALIAS _libpq) From 38c0b0ee50b79bcc7aea095a9e34bb14953db4af Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 6 Sep 2022 22:42:32 +0200 Subject: [PATCH 48/74] Add a workaround for krb5 and musl for strerror_r() Signed-off-by: Azat Khuzhin --- contrib/krb5-cmake/autoconf_linux.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/contrib/krb5-cmake/autoconf_linux.h b/contrib/krb5-cmake/autoconf_linux.h index 7b71d962d9a..54951f866a5 100644 --- a/contrib/krb5-cmake/autoconf_linux.h +++ b/contrib/krb5-cmake/autoconf_linux.h @@ -440,7 +440,9 @@ #define HAVE_STRERROR 1 /* Define to 1 if you have the `strerror_r' function. */ +#ifndef USE_MUSL #define HAVE_STRERROR_R 1 +#endif /* Define to 1 if you have the header file. */ #define HAVE_STRINGS_H 1 From 3c47d5bbffbfcac070f316cc9183bab82a9f437e Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 7 Sep 2022 22:04:24 +0200 Subject: [PATCH 49/74] Add a workaround for strerror_r() libpq for FreeBSD (for clang-15) Signed-off-by: Azat Khuzhin --- contrib/libpq-cmake/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/libpq-cmake/CMakeLists.txt b/contrib/libpq-cmake/CMakeLists.txt index b62186159f3..91326422b43 100644 --- a/contrib/libpq-cmake/CMakeLists.txt +++ b/contrib/libpq-cmake/CMakeLists.txt @@ -66,7 +66,7 @@ target_include_directories (_libpq SYSTEM PRIVATE "${LIBPQ_SOURCE_DIR}/configs") # NOTE: this is a dirty hack to avoid and instead pg_config.h should be shipped # for different OS'es like for jemalloc, not one generic for all OS'es like # now. -if (OS_DARWIN OR USE_MUSL) +if (OS_DARWIN OR OS_FREEBSD OR USE_MUSL) target_compile_definitions(_libpq PRIVATE -DSTRERROR_R_INT=1) endif() From e8d7403a38706f2d66e2840883505ff02ab3b9ae Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 7 Sep 2022 22:15:49 +0200 Subject: [PATCH 50/74] Suppress warning in FormatFactory::getFormatFromFileDescriptor() for FreeBSD Signed-off-by: Azat Khuzhin --- src/Formats/FormatFactory.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Formats/FormatFactory.cpp b/src/Formats/FormatFactory.cpp index 780b6bb6201..7f04f748eab 100644 --- a/src/Formats/FormatFactory.cpp +++ b/src/Formats/FormatFactory.cpp @@ -521,6 +521,7 @@ String FormatFactory::getFormatFromFileDescriptor(int fd) return getFormatFromFileName(file_path, false); return ""; #else + (void)fd; return ""; #endif } From e0a8f19cfb9b78d015eb58d79c2b58099c7af9a5 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 7 Sep 2022 22:16:15 +0200 Subject: [PATCH 51/74] Fix -Wzero-as-null-pointer-constant in MemoryStatisticsOS::get for FreeBSD Signed-off-by: Azat Khuzhin --- src/Common/MemoryStatisticsOS.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common/MemoryStatisticsOS.cpp b/src/Common/MemoryStatisticsOS.cpp index 22f8446121f..f2d2ab5fea9 100644 --- a/src/Common/MemoryStatisticsOS.cpp +++ b/src/Common/MemoryStatisticsOS.cpp @@ -135,7 +135,7 @@ MemoryStatisticsOS::Data MemoryStatisticsOS::get() const struct kinfo_proc kp; size_t len = sizeof(struct kinfo_proc); - if (-1 == ::sysctl(mib, 4, &kp, &len, NULL, 0)) + if (-1 == ::sysctl(mib, 4, &kp, &len, nullptr, 0)) throwFromErrno("Cannot sysctl(kern.proc.pid." + std::to_string(self) + ")", ErrorCodes::SYSTEM_ERROR); if (sizeof(struct kinfo_proc) != len) From ba4174402a47512c298e4f3f7804cc772f2802a6 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Thu, 8 Sep 2022 18:01:02 +0200 Subject: [PATCH 52/74] Temporary disable complation of compileAddIntoAggregateStatesFunctions Fixes the following use-of-uninitialized-value in llvm [1]: ==696==WARNING: MemorySanitizer: use-of-uninitialized-value 0 0x498141d9 in llvm::ilist_traits::removeNodeFromList(llvm::MachineInstr*) build_docker/../contrib/llvm/llvm/lib/CodeGen/MachineBasicBlock.cpp:155:24 1 0x498141d9 in llvm::iplist_impl>, llvm::ilist_traits>::remove(llvm::ilist_iterator, false, false>&) build_docker/../contrib/llvm/llvm/include/llvm/ADT/ilist.h:253:11 2 0x498141d9 in llvm::iplist_impl>, llvm::ilist_traits>::erase(llvm::ilist_iterator, false, false>) build_docker/../contrib/llvm/llvm/include/llvm/ADT/ilist.h:268:22 3 0x498141d9 in llvm::iplist_impl>, llvm::ilist_traits>::erase(llvm::ilist_iterator, false, false>, llvm::ilist_iterator, false, false>) build_docker/../contrib/llvm/llvm/include/llvm/ADT/ilist.h:305:15 4 0x498141d9 in llvm::iplist_impl>, llvm::ilist_traits>::clear() build_docker/../contrib/llvm/llvm/include/llvm/ADT/ilist.h:309:18 5 0x498141d9 in llvm::iplist_impl>, llvm::ilist_traits>::~iplist_impl() build_docker/../contrib/llvm/llvm/include/llvm/ADT/ilist.h:210:20 6 0x498145ff in llvm::MachineBasicBlock::~MachineBasicBlock() build_docker/../contrib/llvm/llvm/lib/CodeGen/MachineBasicBlock.cpp:56:1 7 0x49969c1f in llvm::MachineFunction::DeleteMachineBasicBlock(llvm::MachineBasicBlock*) build_docker/../contrib/llvm/llvm/lib/CodeGen/MachineFunction.cpp:426:8 8 0x49969c1f in llvm::ilist_alloc_traits::deleteNode(llvm::MachineBasicBlock*) build_docker/../contrib/llvm/llvm/lib/CodeGen/MachineFunction.cpp:127:21 9 0x4983f4d6 in llvm::iplist_impl, llvm::ilist_traits>::erase(llvm::ilist_iterator, false, false>) build_docker/../contrib/llvm/llvm/include/llvm/ADT/ilist.h:268:11 10 0x4983f4d6 in llvm::iplist_impl, llvm::ilist_traits>::erase(llvm::MachineBasicBlock*) build_docker/../contrib/llvm/llvm/include/llvm/ADT/ilist.h:272:39 11 0x4983f4d6 in llvm::MachineFunction::erase(llvm::MachineBasicBlock*) build_docker/../contrib/llvm/llvm/include/llvm/CodeGen/MachineFunction.h:767:53 12 0x4983f4d6 in llvm::MachineBasicBlock::eraseFromParent() build_docker/../contrib/llvm/llvm/lib/CodeGen/MachineBasicBlock.cpp:1317:16 13 0x4a0c9a27 in llvm::TailDuplicator::removeDeadBlock(llvm::MachineBasicBlock*, llvm::function_ref*) build_docker/../contrib/llvm/llvm/lib/CodeGen/TailDuplicator.cpp:1051:8 14 0x4a0c1e41 in llvm::TailDuplicator::tailDuplicateAndUpdate(bool, llvm::MachineBasicBlock*, llvm::MachineBasicBlock*, llvm::SmallVectorImpl*, llvm::function_ref*, llvm::SmallVectorImpl*) build_docker/../contrib/llvm/llvm/lib/CodeGen/TailDuplicator.cpp:189:5 15 0x4a0ca16e in llvm::TailDuplicator::tailDuplicateBlocks() build_docker/../contrib/llvm/llvm/lib/CodeGen/TailDuplicator.cpp:288:19 16 0x4a0be9f9 in (anonymous namespace)::TailDuplicateBase::runOnMachineFunction(llvm::MachineFunction&) build_docker/../contrib/llvm/llvm/lib/CodeGen/TailDuplication.cpp:98:21 17 0x499a2777 in llvm::MachineFunctionPass::runOnFunction(llvm::Function&) build_docker/../contrib/llvm/llvm/lib/CodeGen/MachineFunctionPass.cpp:72:13 18 0x4dbba34d in llvm::FPPassManager::runOnFunction(llvm::Function&) build_docker/../contrib/llvm/llvm/lib/IR/LegacyPassManager.cpp:1435:27 19 0x4dbe3761 in llvm::FPPassManager::runOnModule(llvm::Module&) build_docker/../contrib/llvm/llvm/lib/IR/LegacyPassManager.cpp:1481:16 20 0x4dbbebbb in (anonymous namespace)::MPPassManager::runOnModule(llvm::Module&) build_docker/../contrib/llvm/llvm/lib/IR/LegacyPassManager.cpp:1550:27 21 0x4dbbebbb in llvm::legacy::PassManagerImpl::run(llvm::Module&) build_docker/../contrib/llvm/llvm/lib/IR/LegacyPassManager.cpp:541:44 22 0x4dbe454b in llvm::legacy::PassManager::run(llvm::Module&) build_docker/../contrib/llvm/llvm/lib/IR/LegacyPassManager.cpp:1677:14 23 0x414405df in DB::JITCompiler::compile(llvm::Module&) build_docker/../src/Interpreters/JIT/CHJIT.cpp:78:22 24 0x4143bb7d in DB::CHJIT::compileModule(std::__1::unique_ptr>) build_docker/../src/Interpreters/JIT/CHJIT.cpp:378:29 25 0x4143aded in DB::CHJIT::compileModule(std::__1::function) build_docker/../src/Interpreters/JIT/CHJIT.cpp:359:24 26 0x4147b25e in DB::compileAggregateFunctions(DB::CHJIT&, std::__1::vector> const&, std::__1::basic_string, std::__1::allocator>) build_docker/../src/Interpreters/JIT/compileFunction.cpp:738:32 27 0x3de0a23a in DB::Aggregator::compileAggregateFunctionsIfNeeded()::$_0::operator()() const build_docker/../src/Interpreters/Aggregator.cpp:661:49 28 0x3de0a23a in std::__1::pair, bool> DB::CacheBase, DB::CompiledExpressionCacheEntry, UInt128Hash, DB::CompiledFunctionWeightFunction>::getOrSet(wide::integer<128ul, unsigned int> const&, DB::Aggregator::compileAggregateFunctionsIfNeeded()::$_0&&) build_docker/../src/Common/CacheBase.h:125:24 29 0x3de0a23a in DB::Aggregator::compileAggregateFunctionsIfNeeded() build_docker/../src/Interpreters/Aggregator.cpp:657:70 Memory was marked as uninitialized 0 0xb988ded in __sanitizer_dtor_callback (/usr/bin/clickhouse+0xb988ded) (BuildId: c4a880b742797a1f37bc4f5ed869f055cc86486b) 1 0x498145da in llvm::MachineBasicBlock::~MachineBasicBlock() build_docker/../contrib/llvm/llvm/lib/CodeGen/MachineBasicBlock.cpp:56:1 2 0x49969c1f in llvm::MachineFunction::DeleteMachineBasicBlock(llvm::MachineBasicBlock*) build_docker/../contrib/llvm/llvm/lib/CodeGen/MachineFunction.cpp:426:8 3 0x49969c1f in llvm::ilist_alloc_traits::deleteNode(llvm::MachineBasicBlock*) build_docker/../contrib/llvm/llvm/lib/CodeGen/MachineFunction.cpp:127:21 4 0x4983f4d6 in llvm::iplist_impl, llvm::ilist_traits>::erase(llvm::ilist_iterator, false, false>) build_docker/../contrib/llvm/llvm/include/llvm/ADT/ilist.h:268:11 5 0x4983f4d6 in llvm::iplist_impl, llvm::ilist_traits>::erase(llvm::MachineBasicBlock*) build_docker/../contrib/llvm/llvm/include/llvm/ADT/ilist.h:272:39 6 0x4983f4d6 in llvm::MachineFunction::erase(llvm::MachineBasicBlock*) build_docker/../contrib/llvm/llvm/include/llvm/CodeGen/MachineFunction.h:767:53 7 0x4983f4d6 in llvm::MachineBasicBlock::eraseFromParent() build_docker/../contrib/llvm/llvm/lib/CodeGen/MachineBasicBlock.cpp:1317:16 8 0x4a0c9a27 in llvm::TailDuplicator::removeDeadBlock(llvm::MachineBasicBlock*, llvm::function_ref*) build_docker/../contrib/llvm/llvm/lib/CodeGen/TailDuplicator.cpp:1051:8 9 0x4a0c1e41 in llvm::TailDuplicator::tailDuplicateAndUpdate(bool, llvm::MachineBasicBlock*, llvm::MachineBasicBlock*, llvm::SmallVectorImpl*, llvm::function_ref*, llvm::SmallVectorImpl*) build_docker/../contrib/llvm/llvm/lib/CodeGen/TailDuplicator.cpp:189:5 10 0x4a0ca16e in llvm::TailDuplicator::tailDuplicateBlocks() build_docker/../contrib/llvm/llvm/lib/CodeGen/TailDuplicator.cpp:288:19 11 0x4a0be9f9 in (anonymous namespace)::TailDuplicateBase::runOnMachineFunction(llvm::MachineFunction&) build_docker/../contrib/llvm/llvm/lib/CodeGen/TailDuplication.cpp:98:21 12 0x499a2777 in llvm::MachineFunctionPass::runOnFunction(llvm::Function&) build_docker/../contrib/llvm/llvm/lib/CodeGen/MachineFunctionPass.cpp:72:13 13 0x4dbba34d in llvm::FPPassManager::runOnFunction(llvm::Function&) build_docker/../contrib/llvm/llvm/lib/IR/LegacyPassManager.cpp:1435:27 14 0x4dbe3761 in llvm::FPPassManager::runOnModule(llvm::Module&) build_docker/../contrib/llvm/llvm/lib/IR/LegacyPassManager.cpp:1481:16 15 0x4dbbebbb in (anonymous namespace)::MPPassManager::runOnModule(llvm::Module&) build_docker/../contrib/llvm/llvm/lib/IR/LegacyPassManager.cpp:1550:27 16 0x4dbbebbb in llvm::legacy::PassManagerImpl::run(llvm::Module&) build_docker/../contrib/llvm/llvm/lib/IR/LegacyPassManager.cpp:541:44 17 0x4dbe454b in llvm::legacy::PassManager::run(llvm::Module&) build_docker/../contrib/llvm/llvm/lib/IR/LegacyPassManager.cpp:1677:14 18 0x414405df in DB::JITCompiler::compile(llvm::Module&) build_docker/../src/Interpreters/JIT/CHJIT.cpp:78:22 19 0x4143bb7d in DB::CHJIT::compileModule(std::__1::unique_ptr>) build_docker/../src/Interpreters/JIT/CHJIT.cpp:378:29 20 0x4143aded in DB::CHJIT::compileModule(std::__1::function) build_docker/../src/Interpreters/JIT/CHJIT.cpp:359:24 21 0x4147b25e in DB::compileAggregateFunctions(DB::CHJIT&, std::__1::vector> const&, std::__1::basic_string, std::__1::allocator>) build_docker/../src/Interpreters/JIT/compileFunction.cpp:738:32 22 0x3de0a23a in DB::Aggregator::compileAggregateFunctionsIfNeeded()::$_0::operator()() const build_docker/../src/Interpreters/Aggregator.cpp:661:49 23 0x3de0a23a in std::__1::pair, bool> DB::CacheBase, DB::CompiledExpressionCacheEntry, UInt128Hash, DB::CompiledFunctionWeightFunction>::getOrSet(wide::integer<128ul, unsigned int> const&, DB::Aggregator::compileAggregateFunctionsIfNeeded()::$_0&&) build_docker/../src/Common/CacheBase.h:125:24 24 0x3de0a23a in DB::Aggregator::compileAggregateFunctionsIfNeeded() build_docker/../src/Interpreters/Aggregator.cpp:657:70 [1]: https://s3.amazonaws.com/clickhouse-test-reports/41046/490a2c75610c4bc3191d55226f8454b3c3d3919a/stateful_tests__msan_.html Note, that it is safe to do, but only for this method, since it had been disabled anyway, back in #27574, and I guess this MSan report may be related. Signed-off-by: Azat Khuzhin --- src/Interpreters/JIT/compileFunction.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/JIT/compileFunction.cpp b/src/Interpreters/JIT/compileFunction.cpp index 353ab84674c..99646084e5a 100644 --- a/src/Interpreters/JIT/compileFunction.cpp +++ b/src/Interpreters/JIT/compileFunction.cpp @@ -739,7 +739,10 @@ CompiledAggregateFunctions compileAggregateFunctions(CHJIT & jit, const std::vec { compileCreateAggregateStatesFunctions(module, functions, create_aggregate_states_functions_name); compileAddIntoAggregateStatesFunctions(module, functions, add_aggregate_states_functions_name); - compileAddIntoAggregateStatesFunctionsSinglePlace(module, functions, add_aggregate_states_functions_name_single_place); + /// FIXME: this leads to use-of-uninitialized-value in llvm + /// But for now, it is safe, since it is not used by Aggregator anyway + (void)compileAddIntoAggregateStatesFunctionsSinglePlace; + /// compileAddIntoAggregateStatesFunctionsSinglePlace(module, functions, add_aggregate_states_functions_name_single_place); compileMergeAggregatesStates(module, functions, merge_aggregate_states_functions_name); compileInsertAggregatesIntoResultColumns(module, functions, insert_aggregate_states_functions_name); }); @@ -752,7 +755,7 @@ CompiledAggregateFunctions compileAggregateFunctions(CHJIT & jit, const std::vec assert(create_aggregate_states_function); assert(add_into_aggregate_states_function); - assert(add_into_aggregate_states_function_single_place); + /// assert(add_into_aggregate_states_function_single_place); /// FIXME assert(merge_aggregate_states_function); assert(insert_aggregate_states_function); From 46eafb47329a7d718ea359e6b4091d8bd932358f Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Fri, 9 Sep 2022 15:09:35 +0200 Subject: [PATCH 53/74] tests: skip test_send_crash_reports under ASan Since it also does not fits into timeouts [1]: 2022-09-08 21:16:20 [ 377 ] DEBUG : Command:['docker', 'exec', '-u', 'root', 'roottestsendcrashreports_node_1', 'bash', '-c', 'pkill -SEGV clickhouse'] (cluster.py:95, run_and_check) ... 2022-09-08 21:16:22 [ 377 ] DEBUG : run container_id:roottestsendcrashreports_node_1 detach:False nothrow:False cmd: ['cat', '/result.txt'] (cluster.py:1744, exec_in_container) ... 2022-09-08 21:16:36 [ 377 ] DEBUG : Stdout:INITIAL_STATE (cluster.py:103, run_and_check) And server logs: 2022.09.08 21:16:21.112076 [ 228 ] {} BaseDaemon: ######################################## 2022.09.08 21:16:21.112170 [ 228 ] {} BaseDaemon: (version 22.9.1.1, build id: 0F7336E0A4D64134C51C8365DADCB78A9B39AA3B) (from thread 1) (no query) Received signal Segmentation fault (11) 2022.09.08 21:16:21.112244 [ 228 ] {} BaseDaemon: Address: 0xde Access: read. Unknown si_code. 2022.09.08 21:16:21.112321 [ 228 ] {} BaseDaemon: Stack trace: 0x7fbe21d09376 0x40a4f71a 0xe293a4b 0xdc5c51a 0x38dee227 0xdc326a0 0x38e319d9 0xdc2b4e2 0xdc25fdb 0x7fbe21b2c083 0xdb636ae 2022.09.08 21:16:21.112419 [ 228 ] {} BaseDaemon: 3. pthread_cond_wait @ 0x7fbe21d09376 in ? 2022.09.08 21:16:21.122914 [ 228 ] {} BaseDaemon: 4. ./build_docker/../contrib/libcxx/src/condition_variable.cpp:0: std::__1::condition_variable::wait(std::__1::unique_lock&) @ 0x40a4f71a in /usr/bin/clickhouse 2022.09.08 21:16:21.233016 [ 228 ] {} BaseDaemon: 5.1. inlined from ./build_docker/../contrib/libcxx/include/atomic:952: unsigned long std::__1::__cxx_atomic_load(std::__1::__cxx_atomic_base_impl const*, std::__1::memory_order) 2022.09.08 21:16:21.233135 [ 228 ] {} BaseDaemon: 5.2. inlined from ../contrib/libcxx/include/atomic:1582: std::__1::__atomic_base::load(std::__1::memory_order) const 2022.09.08 21:16:21.233183 [ 228 ] {} BaseDaemon: 5.3. inlined from ../contrib/libcxx/include/atomic:1586: std::__1::__atomic_base::operator unsigned long() const 2022.09.08 21:16:21.233234 [ 228 ] {} BaseDaemon: 5.4. inlined from ../src/Daemon/BaseDaemon.cpp:967: operator() 2022.09.08 21:16:21.233303 [ 228 ] {} BaseDaemon: 5.5. inlined from ../contrib/libcxx/include/__mutex_base:402: void std::__1::condition_variable::wait(std::__1::unique_lock&, BaseDaemon::waitForTerminationRequest()::$_0) 2022.09.08 21:16:21.233334 [ 228 ] {} BaseDaemon: 5. ../src/Daemon/BaseDaemon.cpp:967: BaseDaemon::waitForTerminationRequest() @ 0xe293a4b in /usr/bin/clickhouse 2022.09.08 21:16:21.350675 [ 228 ] {} BaseDaemon: 6. ./build_docker/../programs/server/Server.cpp:0: DB::Server::main(std::__1::vector, std::__1::allocator >, std::__1::allocator, std::__1::allocator > > > const&) @ 0xdc5c51a in /usr/bin/clickhouse 2022.09.08 21:16:21.394092 [ 228 ] {} BaseDaemon: 7. ./build_docker/../contrib/poco/Util/src/Application.cpp:0: Poco::Util::Application::run() @ 0x38dee227 in /usr/bin/clickhouse 2022.09.08 21:16:21.654195 [ 228 ] {} BaseDaemon: 8. ./build_docker/../programs/server/Server.cpp:466: DB::Server::run() @ 0xdc326a0 in /usr/bin/clickhouse 2022.09.08 21:16:21.666991 [ 228 ] {} BaseDaemon: 9. ./build_docker/../contrib/poco/Util/src/ServerApplication.cpp:0: Poco::Util::ServerApplication::run(int, char**) @ 0x38e319d9 in /usr/bin/clickhouse 2022.09.08 21:16:21.916078 [ 228 ] {} BaseDaemon: 10. ./build_docker/../programs/server/Server.cpp:0: mainEntryClickHouseServer(int, char**) @ 0xdc2b4e2 in /usr/bin/clickhouse 2022.09.08 21:16:21.929922 [ 228 ] {} BaseDaemon: 11. ./build_docker/../programs/main.cpp:0: main @ 0xdc25fdb in /usr/bin/clickhouse 2022.09.08 21:16:21.929981 [ 228 ] {} BaseDaemon: 12. __libc_start_main @ 0x7fbe21b2c083 in ? 2022.09.08 21:16:30.357032 [ 228 ] {} BaseDaemon: 13. _start @ 0xdb636ae in /usr/bin/clickhouse 2022.09.08 21:16:31.383233 [ 228 ] {} BaseDaemon: Integrity check of the executable skipped because the reference checksum could not be read. (calculated checksum: 6200AC7C1270DC293DF3302E1C64399B) ... 2022.09.08 21:16:40.564453 [ 228 ] {} SentryWriter: Sending crash report [1]: https://s3.amazonaws.com/clickhouse-test-reports/41046/a0b85eaca8d4003c9fbc4571b30830d30f1984e9/integration_tests__asan__[3/3].html Though another option is to increase waiting time. Signed-off-by: Azat Khuzhin --- tests/integration/test_send_crash_reports/test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/test_send_crash_reports/test.py b/tests/integration/test_send_crash_reports/test.py index 90a6c684de7..83c0827f891 100644 --- a/tests/integration/test_send_crash_reports/test.py +++ b/tests/integration/test_send_crash_reports/test.py @@ -36,8 +36,10 @@ def started_node(): def test_send_segfault(started_node): + # NOTE: another option is to increase waiting time. if ( started_node.is_built_with_thread_sanitizer() + or started_node.is_built_with_address_sanitizer() or started_node.is_built_with_memory_sanitizer() ): pytest.skip("doesn't fit in timeouts for stacktrace generation") From 4e3135383b7da4405133148eb1a6cfe5d0a697ae Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 10 Sep 2022 09:58:49 +0200 Subject: [PATCH 54/74] Fix clang-tidy warnings (from clang-15) Signed-off-by: Azat Khuzhin --- .clang-tidy | 5 +++++ src/Backups/BackupCoordinationReplicatedTables.cpp | 1 + src/Common/ArrayCache.h | 2 -- src/Core/examples/coro.cpp | 2 +- src/IO/ReadBufferFromFileDescriptor.cpp | 2 +- src/IO/ReadBufferFromFileDescriptor.h | 2 +- src/Storages/HDFS/StorageHDFS.cpp | 4 ++-- src/Storages/HDFS/StorageHDFS.h | 4 ++-- 8 files changed, 13 insertions(+), 9 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 860e7b3189f..532b0f37b81 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -22,6 +22,8 @@ Checks: '*, -bugprone-implicit-widening-of-multiplication-result, -bugprone-narrowing-conversions, -bugprone-not-null-terminated-result, + -bugprone-unchecked-optional-access, + -bugprone-assignment-in-if-condition, -cert-dcl16-c, -cert-err58-cpp, @@ -103,6 +105,7 @@ Checks: '*, -misc-no-recursion, -misc-non-private-member-variables-in-classes, + -misc-const-correctness, -modernize-avoid-c-arrays, -modernize-concat-nested-namespaces, @@ -114,6 +117,7 @@ Checks: '*, -modernize-use-nodiscard, -modernize-use-override, -modernize-use-trailing-return-type, + -modernize-macro-to-enum, -performance-inefficient-string-concatenation, -performance-no-int-to-ptr, @@ -135,6 +139,7 @@ Checks: '*, -readability-suspicious-call-argument, -readability-uppercase-literal-suffix, -readability-use-anyofallof, + -readability-simplify-boolean-expr, -zirkon-*, ' diff --git a/src/Backups/BackupCoordinationReplicatedTables.cpp b/src/Backups/BackupCoordinationReplicatedTables.cpp index 27137edb008..910719b5365 100644 --- a/src/Backups/BackupCoordinationReplicatedTables.cpp +++ b/src/Backups/BackupCoordinationReplicatedTables.cpp @@ -248,6 +248,7 @@ BackupCoordinationReplicatedTables::getMutations(const String & table_shared_id, return {}; std::vector res; + res.reserve(table_info.mutations.size()); for (const auto & [mutation_id, mutation_entry] : table_info.mutations) res.emplace_back(MutationInfo{mutation_id, mutation_entry}); return res; diff --git a/src/Common/ArrayCache.h b/src/Common/ArrayCache.h index f01ff94e38b..79aeddb09df 100644 --- a/src/Common/ArrayCache.h +++ b/src/Common/ArrayCache.h @@ -722,5 +722,3 @@ public: return res; } }; - -template constexpr size_t ArrayCache::min_chunk_size; diff --git a/src/Core/examples/coro.cpp b/src/Core/examples/coro.cpp index 370820a228d..fbccc261e9d 100644 --- a/src/Core/examples/coro.cpp +++ b/src/Core/examples/coro.cpp @@ -14,7 +14,7 @@ namespace std // NOLINT(cert-dcl58-cpp) { - using namespace experimental::coroutines_v1; + using namespace experimental::coroutines_v1; // NOLINT(cert-dcl58-cpp) } #if __has_warning("-Wdeprecated-experimental-coroutine") diff --git a/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp index ffb7bff8afb..cb4b6ca5f3e 100644 --- a/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/src/IO/ReadBufferFromFileDescriptor.cpp @@ -233,7 +233,7 @@ void ReadBufferFromFileDescriptor::rewind() /// Assuming file descriptor supports 'select', check that we have data to read or wait until timeout. -bool ReadBufferFromFileDescriptor::poll(size_t timeout_microseconds) +bool ReadBufferFromFileDescriptor::poll(size_t timeout_microseconds) const { fd_set fds; FD_ZERO(&fds); diff --git a/src/IO/ReadBufferFromFileDescriptor.h b/src/IO/ReadBufferFromFileDescriptor.h index 6b68b8b6dfd..6edda460bac 100644 --- a/src/IO/ReadBufferFromFileDescriptor.h +++ b/src/IO/ReadBufferFromFileDescriptor.h @@ -66,7 +66,7 @@ public: private: /// Assuming file descriptor supports 'select', check that we have data to read or wait until timeout. - bool poll(size_t timeout_microseconds); + bool poll(size_t timeout_microseconds) const; }; diff --git a/src/Storages/HDFS/StorageHDFS.cpp b/src/Storages/HDFS/StorageHDFS.cpp index f93bc45d1a3..45caddb21ea 100644 --- a/src/Storages/HDFS/StorageHDFS.cpp +++ b/src/Storages/HDFS/StorageHDFS.cpp @@ -255,7 +255,7 @@ private: class HDFSSource::URISIterator::Impl { public: - explicit Impl(const std::vector & uris_, ContextPtr context) + explicit Impl(const std::vector & uris_, ContextPtr context) { auto path_and_uri = getPathFromUriAndUriWithoutPath(uris_[0]); HDFSBuilderWrapper builder = createHDFSBuilder(path_and_uri.second + "/", context->getGlobalContext()->getConfigRef()); @@ -293,7 +293,7 @@ String HDFSSource::DisclosedGlobIterator::next() return pimpl->next(); } -HDFSSource::URISIterator::URISIterator(const std::vector & uris_, ContextPtr context) +HDFSSource::URISIterator::URISIterator(const std::vector & uris_, ContextPtr context) : pimpl(std::make_shared(uris_, context)) { } diff --git a/src/Storages/HDFS/StorageHDFS.h b/src/Storages/HDFS/StorageHDFS.h index a0d61f4bd2a..896371f9685 100644 --- a/src/Storages/HDFS/StorageHDFS.h +++ b/src/Storages/HDFS/StorageHDFS.h @@ -86,7 +86,7 @@ private: const String & format_name, const ContextPtr & ctx); - std::vector uris; + std::vector uris; String format_name; String compression_method; const bool distributed_processing; @@ -116,7 +116,7 @@ public: class URISIterator { public: - URISIterator(const std::vector & uris_, ContextPtr context); + URISIterator(const std::vector & uris_, ContextPtr context); String next(); private: class Impl; From c1e70169d2eec449012ac1bb555347a840c9c181 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Sat, 10 Sep 2022 13:11:54 +0200 Subject: [PATCH 55/74] Suppress clang-analyzer-cplusplus.NewDelete in MsgPackRowInputFormat Appartently there is some issue with clang-15, since even the following example shows error [1]. [1]: https://gist.github.com/azat/027f0e949ea836fc2e6269113ceb8752 clang-tidy report [1]: FAILED: src/CMakeFiles/dbms.dir/Processors/Formats/Impl/MsgPackRowInputFormat.cpp.o /usr/bin/cmake -E __run_co_compile --launcher="prlimit;--as=10000000000;--data=5000000000;--cpu=1000;/usr/bin/ccache" --tidy=/usr/bin/clang-tidy-15 --source=/ch/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp -- /usr/bin/clang++-15 --target=x86_64-linux-gnu --sysroot=/ch/cmake/linux/../../contrib/sysroot/linux-x86_64/x86_64-linux-gnu/libc -DAWS_SDK_VERSION_MAJOR=1 -DAWS_SDK_VERSION_MINOR=7 -DAWS_SDK_VERSION_PATCH=231 -DBOOST_ASIO_HAS_STD_INVOKE_RESULT=1 -DBOOST_ASIO_STANDALONE=1 -DCARES_STATICLIB -DCONFIGDIR=\"\" -DENABLE_MULTITARGET_CODE=1 -DENABLE_OPENSSL_ENCRYPTION -DHAS_RESERVED_IDENTIFIER -DHAVE_CONFIG_H -DLIBSASL_EXPORTS=1 -DLZ4_DISABLE_DEPRECATE_WARNINGS=1 -DOBSOLETE_CRAM_ATTR=1 -DOBSOLETE_DIGEST_ATTR=1 -DPLUGINDIR=\"\" -DPOCO_ENABLE_CPP11 -DPOCO_HAVE_FD_EPOLL -DPOCO_OS_FAMILY_UNIX -DSASLAUTHD_CONF_FILE_DEFAULT=\"\" -DSNAPPY_CODEC_AVAILABLE -DSTD_EXCEPTION_HAS_STACK_TRACE=1 -DUNALIGNED_OK -DWITH_COVERAGE=0 -DWITH_GZFILEOP -DX86_64 -DZLIB_COMPAT -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -Iincludes/configs -I/ch/src -Isrc -Isrc/Core/include -I/ch/base/glibc-compatibility/memcpy -I/ch/base/base/.. -Ibase/base/.. -I/ch/contrib/cctz/include -I/ch/base/pcg-random/. -I/ch/contrib/miniselect/include -I/ch/contrib/zstd/lib -Icontrib/cyrus-sasl-cmake -I/ch/contrib/lz4/lib -I/ch/src/Common/mysqlxx/. -Icontrib/c-ares -I/ch/contrib/c-ares -I/ch/contrib/c-ares/include -isystem /ch/contrib/libcxx/include -isystem /ch/contrib/libcxxabi/include -isystem /ch/contrib/libunwind/include -isystem /ch/contrib/libdivide/. -isystem /ch/contrib/jemalloc-cmake/include -isystem /ch/contrib/llvm/llvm/include -isystem contrib/llvm/llvm/include -isystem /ch/contrib/abseil-cpp -isystem /ch/contrib/croaring/cpp -isystem /ch/contrib/croaring/include -isystem /ch/contrib/cityhash102/include -isystem /ch/contrib/boost -isystem /ch/contrib/poco/Net/include -isystem /ch/contrib/poco/Foundation/include -isystem /ch/contrib/poco/NetSSL_OpenSSL/include -isystem /ch/contrib/poco/Crypto/include -isystem /ch/contrib/boringssl/include -isystem /ch/contrib/poco/Util/include -isystem /ch/contrib/poco/JSON/include -isystem /ch/contrib/poco/XML/include -isystem /ch/contrib/replxx/include -isystem /ch/contrib/fmtlib-cmake/../fmtlib/include -isystem /ch/contrib/magic_enum/include -isystem /ch/contrib/double-conversion -isystem /ch/contrib/dragonbox/include -isystem /ch/contrib/re2 -isystem contrib/re2-cmake -isystem /ch/contrib/zlib-ng -isystem contrib/zlib-ng-cmake -isystem /ch/contrib/pdqsort -isystem /ch/contrib/xz/src/liblzma/api -isystem /ch/contrib/aws-c-common/include -isystem /ch/contrib/aws-c-event-stream/include -isystem /ch/contrib/aws/aws-cpp-sdk-s3/include -isystem /ch/contrib/aws/aws-cpp-sdk-core/include -isystem contrib/aws-s3-cmake/include -isystem /ch/contrib/snappy -isystem contrib/snappy-cmake -isystem /ch/contrib/msgpack-c/include -isystem /ch/contrib/fast_float/include -isystem /ch/contrib/librdkafka-cmake/include -isystem /ch/contrib/librdkafka/src -isystem contrib/librdkafka-cmake/auxdir -isystem /ch/contrib/cppkafka/include -isystem /ch/contrib/nats-io/src -isystem /ch/contrib/nats-io/src/adapters -isystem /ch/contrib/nats-io/src/include -isystem /ch/contrib/nats-io/src/unix -isystem /ch/contrib/libuv/include -isystem /ch/contrib/krb5/src/include -isystem contrib/krb5-cmake/include -isystem /ch/contrib/NuRaft/include -isystem /ch/contrib/poco/MongoDB/include -isystem contrib/mariadb-connector-c-cmake/include-public -isystem /ch/contrib/mariadb-connector-c/include -isystem /ch/contrib/mariadb-connector-c/libmariadb -isystem /ch/contrib/icu/icu4c/source/i18n -isystem /ch/contrib/icu/icu4c/source/common -isystem /ch/contrib/capnproto/c++/src -isystem /ch/contrib/arrow/cpp/src -isystem /ch/contrib/arrow-cmake/cpp/src -isystem contrib/arrow-cmake/cpp/src -isystem contrib/arrow-cmake/../orc/c++/include -isystem /ch/contrib/orc/c++/include -isystem contrib/avro-cmake/include -isystem /ch/contrib/avro/lang/c++/api -isystem /ch/contrib/openldap-cmake/linux_x86_64/include -isystem /ch/contrib/openldap/include -isystem /ch/contrib/sparsehash-c11 -isystem /ch/contrib/protobuf/src -isystem src/Server/grpc_protos -isystem /ch/contrib/grpc/include -isystem /ch/contrib/libhdfs3/include -isystem /ch/contrib/hive-metastore -isystem /ch/contrib/thrift/lib/cpp/src -isystem contrib/thrift-cmake -isystem /ch/contrib/azure/sdk/core/azure-core/inc-isystem /ch/contrib/azure/sdk/identity/azure-identity/inc -isystem /ch/contrib/azure/sdk/storage/azure-storage-common/inc -isystem /ch/contrib/azure/sdk/storage/azure-storage-blobs/inc -isystem /ch/contrib/s2geometry/src -isystem /ch/contrib/AMQP-CPP/include -isystem /ch/contrib/AMQP-CPP -isystem /ch/contrib/sqlite-amalgamation -isystem /ch/contrib/rocksdb/include -isystem /ch/contrib/libpqxx/include -isystem /ch/contrib/libpq -isystem /ch/contrib/libpq/include -isystem /ch/contrib/libstemmer_c/include -isystem /ch/contrib/wordnet-blast -isystem /ch/contrib/lemmagen-c/include -isystem /ch/contrib/simdjson/include -isystem /ch/contrib/rapidjson/include -isystem /ch/contrib/consistent-hashing --gcc-toolchain=/ch/cmake/linux/../../contrib/sysroot/linux-x86_64 -std=c++20 -fdiagnostics-color=always -Xclang -fuse-ctor-homing -fsized-deallocation -UNDEBUG -gdwarf-aranges -pipe -mssse3 -msse4.1 -msse4.2 -mpclmul -mpopcnt -fasynchronous-unwind-tables -falign-functions=32 -mbranches-within-32B-boundaries -fdiagnostics-absolute-paths -fstrict-vtable-pointers -fexperimental-new-pass-manager -Wall -Wextra -Weverything -Wpedantic -Wno-zero -length-array -Wno-c++98-compat-pedantic -Wno-c++98-compat -Wno-c++20-compat -Wno-conversion -Wno-ctad-maybe-unsupported -Wno-disabled-macro-expansion -Wno-documentation-unknown-command -Wno-double-promotion -Wno-exit-time-destructors -Wno-float-equal -Wno-global-constructors -Wno-missing-prototypes -Wno-missing-variable-declarations -Wno-padded -Wno-switch-enum -Wno-undefined-func-template -Wno-unused-template -Wno-vla -Wno-weak-template-vtables -Wno-weak-vtables -Wno-thread-safety-negative -g -O0 -g -gdwarf-4 -fno-inline -D_LIBCPP_DEBUG=0 -D OS_LINUX -I/ch/base -I/ch/contrib/magic_enum/include -include /ch/src/Core/iostream_debug_helpers.h -Werror -nostdinc++ -std=gnu++2a -MD -MT src/CMakeFiles/dbms.dir/Processors/Formats/Impl/MsgPackRowInputFormat.cpp.o -MF src/CMakeFiles/dbms.dir/Processors/Formats/Impl/MsgPackRowInputFormat.cpp.o.d -o src/CMakeFiles/dbms.dir/Processors/Formats/Impl/MsgPackRowInputFormat.cpp.o -c /ch/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp /ch/contrib/msgpack-c/include/msgpack/v1/detail/cpp11_zone.hpp:195:9: error: Attempt to free released memory [clang-analyzer-cplusplus.NewDelete,-warnings-as-errors] ::free(p); ^ /ch/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp:509:5: note: Taking false branch if (buf.eof()) ^ /ch/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp:514:24: note: Assuming 'i' is not equal to field 'number_of_columns' for (size_t i = 0; i != number_of_columns; ++i) ^~~~~~~~~~~~~~~~~~~~~~ /ch/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp:514:5: note: Loop condition is true. Entering loop body for (size_t i = 0; i != number_of_columns; ++i) ^ /ch/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp:516:30: note: Calling 'MsgPackSchemaReader::readObject' auto object_handle = readObject(); ^~~~~~~~~~~~ /ch/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp:426:5: note: Taking false branch if (buf.eof()) ^ /ch/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp:433:5: note: Loop condition is true. Entering loop body while (need_more_data) ^ /ch/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp:438:29: note: Calling 'unpack' object_handle = msgpack::unpack(buf.position(), buf.buffer().end() - buf.position(), offset); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /ch/contrib/msgpack-c/include/msgpack/v3/unpack.hpp:52:12: note: Calling 'unpack' return msgpack::v3::unpack(data, len, off, referenced, f, user_data, limit); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ /ch/contrib/msgpack-c/include/msgpack/v3/unpack.hpp:35:5: note: Control jumps to the 'default' case at line 40 switch(ret) { ^ /ch/contrib/msgpack-c/include/msgpack/v3/unpack.hpp:41:9: note: Execution continues on line 43 break; ^ /ch/contrib/msgpack-c/include/msgpack/v3/unpack.hpp:43:35: note: Calling '~unique_ptr' return msgpack::object_handle(); ^ /ch/contrib/libcxx/include/__memory/unique_ptr.h:269:19: note: Calling 'unique_ptr::reset' ~unique_ptr() { reset(); } ^~~~~~~ /ch/contrib/libcxx/include/__memory/unique_ptr.h:314:9: note: '__tmp' is non-null if (__tmp) ^~~~~ /ch/contrib/libcxx/include/__memory/unique_ptr.h:314:5: note: Taking true branch if (__tmp) ^ /ch/contrib/libcxx/include/__memory/unique_ptr.h:315:7: note: Calling 'default_delete::operator()' __ptr_.second()(__tmp); ^~~~~~~~~~~~~~~~~~~~~~ /ch/contrib/libcxx/include/__memory/unique_ptr.h:54:5: note: Memory is released delete __ptr; ^~~~~~~~~~~~ /ch/contrib/libcxx/include/__memory/unique_ptr.h:54:5: note: Calling 'zone::operator delete' delete __ptr; ^~~~~~~~~~~~ /ch/contrib/msgpack-c/include/msgpack/v1/detail/cpp11_zone.hpp:195:9: note: Attempt to free released memory ::free(p); ^~~~~~~~~ [1]: https://s3.amazonaws.com/clickhouse-builds/41046/9677898b3b234a5ba0371edaf719ea8890d084ff/binary_tidy/build_log.log Signed-off-by: Azat Khuzhin --- src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp index b3d237fecfd..45522aece80 100644 --- a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp @@ -2,6 +2,15 @@ #if USE_MSGPACK +/// FIXME: there is some issue with clang-15, that incorrectly detect a +/// "Attempt to free released memory" in msgpack::unpack(), because of delete +/// operator for zone (from msgpack/v1/detail/cpp11_zone.hpp), hence NOLINT +/// +/// NOTE: that I was not able to suppress it locally, only with +/// NOLINTBEGIN/NOLINTEND +// +// NOLINTBEGIN(clang-analyzer-cplusplus.NewDelete) + #include #include #include @@ -551,6 +560,8 @@ void registerMsgPackSchemaReader(FormatFactory & factory) } +// NOLINTEND(clang-analyzer-cplusplus.NewDelete) + #else namespace DB From fb5812f5541a2bad23e127c31b4edc9e39d4fe0c Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Sep 2022 16:28:19 +0000 Subject: [PATCH 56/74] Bump vectorscan to 5.4.8 Brings good stuff like - https://github.com/VectorCamp/vectorscan/pull/119 - https://github.com/VectorCamp/vectorscan/pull/118 --- contrib/vectorscan | 2 +- contrib/vectorscan-cmake/CMakeLists.txt | 7 +------ contrib/vectorscan-cmake/{x86_64 => amd64}/config.h | 0 3 files changed, 2 insertions(+), 7 deletions(-) rename contrib/vectorscan-cmake/{x86_64 => amd64}/config.h (100%) diff --git a/contrib/vectorscan b/contrib/vectorscan index 73695e419c2..f6250ae3e5a 160000 --- a/contrib/vectorscan +++ b/contrib/vectorscan @@ -1 +1 @@ -Subproject commit 73695e419c27af7fe2a099c7aa57931cc02aea5d +Subproject commit f6250ae3e5a3085000239313ad0689cc1e00cdc2 diff --git a/contrib/vectorscan-cmake/CMakeLists.txt b/contrib/vectorscan-cmake/CMakeLists.txt index 828f2a17df2..a8b9bfa52ab 100644 --- a/contrib/vectorscan-cmake/CMakeLists.txt +++ b/contrib/vectorscan-cmake/CMakeLists.txt @@ -249,11 +249,6 @@ elseif (ARCH_AARCH64) "${LIBRARY_DIR}/../vectorscan-cmake/rageled_files/aarch64/Parser.cpp" "${LIBRARY_DIR}/../vectorscan-cmake/rageled_files/aarch64/control_verbs.cpp" ) - set_source_files_properties( - "${LIBRARY_DIR}/../vectorscan-cmake/rageled_files/aarch64/Parser.cpp" - "${LIBRARY_DIR}/../vectorscan-cmake/rageled_files/aarch64/control_verbs.cpp" - COMPILE_FLAGS -Wno-c++11-narrowing - ) endif() # Platform-dependent files @@ -304,7 +299,7 @@ target_include_directories (_vectorscan SYSTEM PUBLIC "${LIBRARY_DIR}/src") # Please regenerate these files if you update vectorscan. if (ARCH_AMD64) - target_include_directories (_vectorscan PRIVATE x86_64) + target_include_directories (_vectorscan PRIVATE amd64) endif () if (ARCH_AARCH64) diff --git a/contrib/vectorscan-cmake/x86_64/config.h b/contrib/vectorscan-cmake/amd64/config.h similarity index 100% rename from contrib/vectorscan-cmake/x86_64/config.h rename to contrib/vectorscan-cmake/amd64/config.h From 5aedf60689db88e99696da6d612958dc765e8ff7 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 13 Sep 2022 20:51:51 +0000 Subject: [PATCH 57/74] fix: repair aarch64 build --- contrib/vectorscan-cmake/CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/contrib/vectorscan-cmake/CMakeLists.txt b/contrib/vectorscan-cmake/CMakeLists.txt index a8b9bfa52ab..d6c626c1612 100644 --- a/contrib/vectorscan-cmake/CMakeLists.txt +++ b/contrib/vectorscan-cmake/CMakeLists.txt @@ -249,6 +249,11 @@ elseif (ARCH_AARCH64) "${LIBRARY_DIR}/../vectorscan-cmake/rageled_files/aarch64/Parser.cpp" "${LIBRARY_DIR}/../vectorscan-cmake/rageled_files/aarch64/control_verbs.cpp" ) + set_source_files_properties( + "${LIBRARY_DIR}/../vectorscan-cmake/rageled_files/aarch64/Parser.cpp" + "${LIBRARY_DIR}/../vectorscan-cmake/rageled_files/aarch64/control_verbs.cpp" + COMPILE_FLAGS -Wno-c++11-narrowing + ) endif() # Platform-dependent files From bc111b56805c41a7905d78ea540c610a0e478a0b Mon Sep 17 00:00:00 2001 From: young scott Date: Wed, 14 Sep 2022 09:15:40 +0000 Subject: [PATCH 58/74] fix issuse#41096 --- src/Interpreters/DDLWorker.h | 7 ++++++- src/Storages/System/StorageSystemDDLWorkerQueue.cpp | 6 +++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/DDLWorker.h b/src/Interpreters/DDLWorker.h index 7ddcc80c02a..e3c1fa4c271 100644 --- a/src/Interpreters/DDLWorker.h +++ b/src/Interpreters/DDLWorker.h @@ -61,18 +61,23 @@ public: return host_fqdn_id; } + std::string getQueueDir() const + { + return queue_dir; + } + void startup(); virtual void shutdown(); bool isCurrentlyActive() const { return initialized && !stop_flag; } -protected: /// Returns cached ZooKeeper session (possibly expired). ZooKeeperPtr tryGetZooKeeper() const; /// If necessary, creates a new session and caches it. ZooKeeperPtr getAndSetZooKeeper(); +protected: /// Iterates through queue tasks in ZooKeeper, runs execution of new tasks void scheduleTasks(bool reinitialized); diff --git a/src/Storages/System/StorageSystemDDLWorkerQueue.cpp b/src/Storages/System/StorageSystemDDLWorkerQueue.cpp index 111ea343398..185a6be6a70 100644 --- a/src/Storages/System/StorageSystemDDLWorkerQueue.cpp +++ b/src/Storages/System/StorageSystemDDLWorkerQueue.cpp @@ -205,9 +205,9 @@ static void fillStatusColumns(MutableColumns & res_columns, size_t & col, void StorageSystemDDLWorkerQueue::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { - zkutil::ZooKeeperPtr zookeeper = context->getZooKeeper(); - fs::path ddl_zookeeper_path = context->getConfigRef().getString("distributed_ddl.path", "/clickhouse/task_queue/ddl/"); - + auto& ddl_worker = context->getDDLWorker(); + fs::path ddl_zookeeper_path = ddl_worker.getQueueDir(); + zkutil::ZooKeeperPtr zookeeper = ddl_worker.getAndSetZooKeeper(); Strings ddl_task_paths = zookeeper->getChildren(ddl_zookeeper_path); GetResponseFutures ddl_task_futures; From ea31302071803a51201239d8039e13ba78954bfd Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 14 Sep 2022 11:30:06 +0200 Subject: [PATCH 59/74] Fix --- src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp b/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp index f18debe8a8b..1853b5b4dd7 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp @@ -131,9 +131,6 @@ DiskObjectStorageMetadata::DiskObjectStorageMetadata( void DiskObjectStorageMetadata::addObject(const String & path, size_t size) { - if (!object_storage_root_path.empty() && path.starts_with(object_storage_root_path)) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected relative path"); - total_size += size; storage_objects.emplace_back(path, size); } From 52db0e5c40cb3137cbdcb9f8c802d1fd60cdc96c Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Wed, 14 Sep 2022 11:52:23 +0200 Subject: [PATCH 60/74] Update DiskObjectStorageMetadata.cpp --- src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp b/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp index 1853b5b4dd7..56cc20098ba 100644 --- a/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp +++ b/src/Disks/ObjectStorages/DiskObjectStorageMetadata.cpp @@ -13,7 +13,6 @@ namespace DB namespace ErrorCodes { extern const int UNKNOWN_FORMAT; - extern const int LOGICAL_ERROR; } void DiskObjectStorageMetadata::deserialize(ReadBuffer & buf) From 59e7eb084c57be7416d7bc18bf420ccc117580da Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 14 Sep 2022 11:15:10 +0000 Subject: [PATCH 61/74] Add column type check before UUID insertion in MsgPack format --- src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp | 4 +++- .../0_stateless/02422_msgpack_uuid_wrong_column.reference | 0 tests/queries/0_stateless/02422_msgpack_uuid_wrong_column.sql | 4 ++++ 3 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/02422_msgpack_uuid_wrong_column.reference create mode 100644 tests/queries/0_stateless/02422_msgpack_uuid_wrong_column.sql diff --git a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp index b3d237fecfd..c9978de3ab2 100644 --- a/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp +++ b/src/Processors/Formats/Impl/MsgPackRowInputFormat.cpp @@ -235,8 +235,10 @@ static void insertNull(IColumn & column, DataTypePtr type) assert_cast(column).insertDefault(); } -static void insertUUID(IColumn & column, DataTypePtr /*type*/, const char * value, size_t size) +static void insertUUID(IColumn & column, DataTypePtr type, const char * value, size_t size) { + if (!isUUID(type)) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Cannot insert MessagePack UUID into column with type {}.", type->getName()); ReadBufferFromMemory buf(value, size); UUID uuid; readBinaryBigEndian(uuid.toUnderType().items[0], buf); diff --git a/tests/queries/0_stateless/02422_msgpack_uuid_wrong_column.reference b/tests/queries/0_stateless/02422_msgpack_uuid_wrong_column.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02422_msgpack_uuid_wrong_column.sql b/tests/queries/0_stateless/02422_msgpack_uuid_wrong_column.sql new file mode 100644 index 00000000000..4d790354d51 --- /dev/null +++ b/tests/queries/0_stateless/02422_msgpack_uuid_wrong_column.sql @@ -0,0 +1,4 @@ +-- Tags: no-parallel, no-fasttest + +insert into function file(02422_data.msgpack) select toUUID('f4cdd80d-5d15-4bdc-9527-adcca635ec1f') as uuid settings output_format_msgpack_uuid_representation='ext'; +select * from file(02422_data.msgpack, auto, 'x Int32'); -- {serverError ILLEGAL_COLUMN} From c2b02c2ae95d697b63324d9ebe423d903aeb3032 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 14 Sep 2022 18:23:37 +0200 Subject: [PATCH 62/74] Download ccache from release PRs for backports --- tests/ci/build_check.py | 4 +++- tests/ci/ccache_utils.py | 10 +++++++++- tests/ci/fast_test_check.py | 2 +- tests/ci/pr_info.py | 2 +- 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/tests/ci/build_check.py b/tests/ci/build_check.py index f58c7a74dfe..d668dbe0498 100644 --- a/tests/ci/build_check.py +++ b/tests/ci/build_check.py @@ -291,7 +291,9 @@ def main(): logging.info("Will try to fetch cache for our build") try: - get_ccache_if_not_exists(ccache_path, s3_helper, pr_info.number, TEMP_PATH) + get_ccache_if_not_exists( + ccache_path, s3_helper, pr_info.number, TEMP_PATH, pr_info.release_pr + ) except Exception as e: # In case there are issues with ccache, remove the path and do not fail a build logging.info("Failed to get ccache, building without it. Error: %s", e) diff --git a/tests/ci/ccache_utils.py b/tests/ci/ccache_utils.py index cfe07363589..864b3a8f9b6 100644 --- a/tests/ci/ccache_utils.py +++ b/tests/ci/ccache_utils.py @@ -11,6 +11,7 @@ import requests # type: ignore from compress_files import decompress_fast, compress_fast from env_helper import S3_DOWNLOAD, S3_BUILDS_BUCKET +from s3_helper import S3Helper DOWNLOAD_RETRIES_COUNT = 5 @@ -57,12 +58,19 @@ def dowload_file_with_progress(url, path): def get_ccache_if_not_exists( - path_to_ccache_dir, s3_helper, current_pr_number, temp_path + path_to_ccache_dir: str, + s3_helper: S3Helper, + current_pr_number: int, + temp_path: str, + release_pr: int, ) -> int: """returns: number of PR for downloaded PR. -1 if ccache not found""" ccache_name = os.path.basename(path_to_ccache_dir) cache_found = False prs_to_check = [current_pr_number] + # Release PR is either 0 or defined + if release_pr: + prs_to_check.append(release_pr) ccache_pr = -1 if current_pr_number != 0: prs_to_check.append(0) diff --git a/tests/ci/fast_test_check.py b/tests/ci/fast_test_check.py index 038289406de..03e42726808 100644 --- a/tests/ci/fast_test_check.py +++ b/tests/ci/fast_test_check.py @@ -125,7 +125,7 @@ if __name__ == "__main__": logging.info("Will try to fetch cache for our build") ccache_for_pr = get_ccache_if_not_exists( - cache_path, s3_helper, pr_info.number, temp_path + cache_path, s3_helper, pr_info.number, temp_path, pr_info.release_pr ) upload_master_ccache = ccache_for_pr in (-1, 0) diff --git a/tests/ci/pr_info.py b/tests/ci/pr_info.py index 2acd0e4c811..77421ddac32 100644 --- a/tests/ci/pr_info.py +++ b/tests/ci/pr_info.py @@ -86,7 +86,7 @@ class PRInfo: self.changed_files = set() # type: Set[str] self.body = "" self.diff_urls = [] - self.release_pr = "" + self.release_pr = 0 ref = github_event.get("ref", "refs/head/master") if ref and ref.startswith("refs/heads/"): ref = ref[11:] From 173df9a73a969e46d3d181cf5650b0686a353631 Mon Sep 17 00:00:00 2001 From: Alexander Tokmakov Date: Wed, 14 Sep 2022 20:38:50 +0300 Subject: [PATCH 63/74] Update StorageSystemDDLWorkerQueue.cpp --- src/Storages/System/StorageSystemDDLWorkerQueue.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/System/StorageSystemDDLWorkerQueue.cpp b/src/Storages/System/StorageSystemDDLWorkerQueue.cpp index 185a6be6a70..67867b6c577 100644 --- a/src/Storages/System/StorageSystemDDLWorkerQueue.cpp +++ b/src/Storages/System/StorageSystemDDLWorkerQueue.cpp @@ -206,7 +206,7 @@ static void fillStatusColumns(MutableColumns & res_columns, size_t & col, void StorageSystemDDLWorkerQueue::fillData(MutableColumns & res_columns, ContextPtr context, const SelectQueryInfo &) const { auto& ddl_worker = context->getDDLWorker(); - fs::path ddl_zookeeper_path = ddl_worker.getQueueDir(); + fs::path ddl_zookeeper_path = ddl_worker.getQueueDir(); zkutil::ZooKeeperPtr zookeeper = ddl_worker.getAndSetZooKeeper(); Strings ddl_task_paths = zookeeper->getChildren(ddl_zookeeper_path); From 559c696230bb64e5dee247cd7aa109f2f4621aaf Mon Sep 17 00:00:00 2001 From: kssenii Date: Wed, 14 Sep 2022 20:29:36 +0200 Subject: [PATCH 64/74] Fix --- .../IO/CachedOnDiskReadBufferFromFile.cpp | 16 ++-- src/Disks/IO/CachedOnDiskReadBufferFromFile.h | 2 +- .../configs/config.d/storage_conf.xml | 21 +++++ tests/integration/test_merge_tree_s3/test.py | 78 ++++++++++++++++++- 4 files changed, 107 insertions(+), 10 deletions(-) diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index fa4a79415ec..e268faebc63 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -143,9 +143,9 @@ void CachedOnDiskReadBufferFromFile::initialize(size_t offset, size_t size) } CachedOnDiskReadBufferFromFile::ImplementationBufferPtr -CachedOnDiskReadBufferFromFile::getCacheReadBuffer(size_t offset) const +CachedOnDiskReadBufferFromFile::getCacheReadBuffer(const FileSegment & file_segment) const { - auto path = cache->getPathInLocalCache(cache_key, offset, is_persistent); + auto path = file_segment.getPathInLocalCache(); ReadSettings local_read_settings{settings}; /// Do not allow to use asynchronous version of LocalFSReadMethod. @@ -247,7 +247,7 @@ CachedOnDiskReadBufferFromFile::getReadBufferForFileSegment(FileSegmentPtr & fil if (download_state == FileSegment::State::DOWNLOADED) { read_type = ReadType::CACHED; - return getCacheReadBuffer(range.left); + return getCacheReadBuffer(*file_segment); } else { @@ -280,7 +280,7 @@ CachedOnDiskReadBufferFromFile::getReadBufferForFileSegment(FileSegmentPtr & fil /// file_offset_of_buffer_end read_type = ReadType::CACHED; - return getCacheReadBuffer(range.left); + return getCacheReadBuffer(*file_segment); } download_state = file_segment->wait(); @@ -289,7 +289,7 @@ CachedOnDiskReadBufferFromFile::getReadBufferForFileSegment(FileSegmentPtr & fil case FileSegment::State::DOWNLOADED: { read_type = ReadType::CACHED; - return getCacheReadBuffer(range.left); + return getCacheReadBuffer(*file_segment); } case FileSegment::State::EMPTY: case FileSegment::State::PARTIALLY_DOWNLOADED: @@ -305,7 +305,7 @@ CachedOnDiskReadBufferFromFile::getReadBufferForFileSegment(FileSegmentPtr & fil /// file_offset_of_buffer_end read_type = ReadType::CACHED; - return getCacheReadBuffer(range.left); + return getCacheReadBuffer(*file_segment); } auto downloader_id = file_segment->getOrSetDownloader(); @@ -323,7 +323,7 @@ CachedOnDiskReadBufferFromFile::getReadBufferForFileSegment(FileSegmentPtr & fil read_type = ReadType::CACHED; file_segment->resetDownloader(); - return getCacheReadBuffer(range.left); + return getCacheReadBuffer(*file_segment); } if (file_segment->getCurrentWriteOffset() < file_offset_of_buffer_end) @@ -354,7 +354,7 @@ CachedOnDiskReadBufferFromFile::getReadBufferForFileSegment(FileSegmentPtr & fil if (canStartFromCache(file_offset_of_buffer_end, *file_segment)) { read_type = ReadType::CACHED; - return getCacheReadBuffer(range.left); + return getCacheReadBuffer(*file_segment); } else { diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.h b/src/Disks/IO/CachedOnDiskReadBufferFromFile.h index b86e53ec160..535d01f3a8c 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.h +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.h @@ -68,7 +68,7 @@ private: ImplementationBufferPtr getReadBufferForFileSegment(FileSegmentPtr & file_segment); - ImplementationBufferPtr getCacheReadBuffer(size_t offset) const; + ImplementationBufferPtr getCacheReadBuffer(const FileSegment & file_segment) const; std::optional getLastNonDownloadedOffset() const; diff --git a/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml b/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml index 3ee49744a61..f3505f53339 100644 --- a/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml +++ b/tests/integration/test_merge_tree_s3/configs/config.d/storage_conf.xml @@ -38,6 +38,20 @@ /jbod1/ 1000000000 + + s3 + http://minio1:9001/root/data/ + minio + minio123 + 33554432 + + + cache + s3_r + /s3_cache_r/ + 1000000000 + 1 + @@ -78,6 +92,13 @@ + + +
+ s3_cache_r +
+
+
diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index 9b254b71826..4ce5fd5a069 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -6,7 +6,6 @@ import pytest from helpers.cluster import ClickHouseCluster from helpers.utility import generate_values, replace_config, SafeThread - SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) @@ -36,6 +35,7 @@ def cluster(): "/jbod1:size=2M", ], ) + logging.info("Starting cluster...") cluster.start() logging.info("Cluster started") @@ -742,3 +742,79 @@ def test_store_cleanup_disk_s3(cluster, node_name): "CREATE TABLE s3_test UUID '00000000-1000-4000-8000-000000000001' (n UInt64) Engine=MergeTree() ORDER BY n SETTINGS storage_policy='s3';" ) node.query("INSERT INTO s3_test SELECT 1") + + +@pytest.mark.parametrize("node_name", ["node"]) +def test_cache_setting_compatibility(cluster, node_name): + node = cluster.instances[node_name] + + node.query("DROP TABLE IF EXISTS s3_test NO DELAY") + + node.query( + "CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_r';" + ) + node.query( + "INSERT INTO s3_test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 500" + ) + + result = node.query("SYSTEM DROP FILESYSTEM CACHE") + + result = node.query( + "SELECT count() FROM system.filesystem_cache WHERE cache_path LIKE '%persistent'" + ) + assert int(result) == 0 + + node.query("SELECT * FROM s3_test") + + result = node.query( + "SELECT count() FROM system.filesystem_cache WHERE cache_path LIKE '%persistent'" + ) + assert int(result) > 0 + + config_path = os.path.join( + SCRIPT_DIR, + f"./{cluster.instances_dir_name}/node/configs/config.d/storage_conf.xml", + ) + + replace_config( + config_path, + "1", + "0", + ) + + result = node.query("DESCRIBE CACHE 's3_cache_r'") + assert result.strip().endswith("1") + + node.restart_clickhouse() + + result = node.query("DESCRIBE CACHE 's3_cache_r'") + assert result.strip().endswith("0") + + result = node.query( + "SELECT count() FROM system.filesystem_cache WHERE cache_path LIKE '%persistent'" + ) + assert int(result) > 0 + + node.query("SELECT * FROM s3_test FORMAT Null") + + assert not node.contains_in_log("No such file or directory: Cache info:") + + replace_config( + config_path, + "0", + "1", + ) + + result = node.query( + "SELECT count() FROM system.filesystem_cache WHERE cache_path LIKE '%persistent'" + ) + assert int(result) > 0 + + node.restart_clickhouse() + + result = node.query("DESCRIBE CACHE 's3_cache_r'") + assert result.strip().endswith("1") + + node.query("SELECT * FROM s3_test FORMAT Null") + + assert not node.contains_in_log("No such file or directory: Cache info:") From 0b1c2c62fd4cb2f7999589308a5d3fcd37ac5551 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Wed, 14 Sep 2022 20:59:45 +0200 Subject: [PATCH 65/74] Update CachedOnDiskReadBufferFromFile.cpp --- src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index e268faebc63..5e7f107144f 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -145,6 +145,8 @@ void CachedOnDiskReadBufferFromFile::initialize(size_t offset, size_t size) CachedOnDiskReadBufferFromFile::ImplementationBufferPtr CachedOnDiskReadBufferFromFile::getCacheReadBuffer(const FileSegment & file_segment) const { + /// Use is_persistent flag from in-memory state of the filesegment, + /// because it is consistent with what is written on disk. auto path = file_segment.getPathInLocalCache(); ReadSettings local_read_settings{settings}; @@ -237,8 +239,6 @@ bool CachedOnDiskReadBufferFromFile::canStartFromCache(size_t current_offset, co CachedOnDiskReadBufferFromFile::ImplementationBufferPtr CachedOnDiskReadBufferFromFile::getReadBufferForFileSegment(FileSegmentPtr & file_segment) { - auto range = file_segment->range(); - auto download_state = file_segment->state(); LOG_TEST(log, "getReadBufferForFileSegment: {}", file_segment->getInfoForLog()); From 780c0e9b2f011d9d91a265500ec7d68947d9c577 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Thu, 15 Sep 2022 00:18:23 +0200 Subject: [PATCH 66/74] Update CachedOnDiskReadBufferFromFile.cpp --- src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index 5e7f107144f..d370a2902a4 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -339,7 +339,7 @@ CachedOnDiskReadBufferFromFile::getReadBufferForFileSegment(FileSegmentPtr & fil LOG_TEST(log, "Predownload. File segment info: {}", file_segment->getInfoForLog()); chassert(file_offset_of_buffer_end > file_segment->getCurrentWriteOffset()); bytes_to_predownload = file_offset_of_buffer_end - file_segment->getCurrentWriteOffset(); - chassert(bytes_to_predownload < range.size()); + chassert(bytes_to_predownload < file_segment->range().size()); } read_type = ReadType::REMOTE_FS_READ_AND_PUT_IN_CACHE; From def4eeac70d98859b6f7f96423148192bfe197ed Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Thu, 15 Sep 2022 12:27:08 +0200 Subject: [PATCH 67/74] Fix perf tests (#41332) We run left server two times. If after the first run server will not be properly stopped, we will get `Address already in use: [::]:9001` exception on the second run. --- docker/test/performance-comparison/compare.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/test/performance-comparison/compare.sh b/docker/test/performance-comparison/compare.sh index d3d7084f37f..b0b5ebdb2e2 100755 --- a/docker/test/performance-comparison/compare.sh +++ b/docker/test/performance-comparison/compare.sh @@ -61,7 +61,7 @@ function configure cp -rv right/config left ||: # Start a temporary server to rename the tables - while pkill clickhouse-serv; do echo . ; sleep 1 ; done + while pkill -f clickhouse-serv ; do echo . ; sleep 1 ; done echo all killed set -m # Spawn temporary in its own process groups @@ -88,7 +88,7 @@ function configure clickhouse-client --port $LEFT_SERVER_PORT --query "create database test" ||: clickhouse-client --port $LEFT_SERVER_PORT --query "rename table datasets.hits_v1 to test.hits" ||: - while pkill clickhouse-serv; do echo . ; sleep 1 ; done + while pkill -f clickhouse-serv ; do echo . ; sleep 1 ; done echo all killed # Make copies of the original db for both servers. Use hardlinks instead @@ -106,7 +106,7 @@ function configure function restart { - while pkill clickhouse-serv; do echo . ; sleep 1 ; done + while pkill -f clickhouse-serv ; do echo . ; sleep 1 ; done echo all killed # Change the jemalloc settings here. @@ -1400,7 +1400,7 @@ case "$stage" in while env kill -- -$watchdog_pid ; do sleep 1; done # Stop the servers to free memory for the subsequent query analysis. - while pkill clickhouse-serv; do echo . ; sleep 1 ; done + while pkill -f clickhouse-serv ; do echo . ; sleep 1 ; done echo Servers stopped. ;& "analyze_queries") From 4935a4bf966b724e0750e677d4a73ed23dfd23f4 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Thu, 15 Sep 2022 12:29:22 +0200 Subject: [PATCH 68/74] Update storing-data.md --- docs/en/operations/storing-data.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/docs/en/operations/storing-data.md b/docs/en/operations/storing-data.md index 194778400d3..6c8901e66c9 100644 --- a/docs/en/operations/storing-data.md +++ b/docs/en/operations/storing-data.md @@ -108,6 +108,7 @@ Example of disk configuration: 1 +
``` @@ -134,6 +135,13 @@ Example of configuration for versions later or equal to 22.8: 10000000 + + +
+ cache +
+
+ ``` @@ -151,6 +159,13 @@ Example of configuration for versions earlier than 22.8: 10000000 + + +
+ s3 +
+
+ ``` From 1ec7ce265becb119740b17ea0758d5133e923265 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Thu, 15 Sep 2022 12:30:02 +0200 Subject: [PATCH 69/74] Update storing-data.md --- docs/en/operations/storing-data.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/en/operations/storing-data.md b/docs/en/operations/storing-data.md index 6c8901e66c9..663469ef4ae 100644 --- a/docs/en/operations/storing-data.md +++ b/docs/en/operations/storing-data.md @@ -108,7 +108,6 @@ Example of disk configuration: 1 -
``` From 1b8b2ebed532a2d459f2a80c7bd9d3340de2bbaa Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Thu, 15 Sep 2022 08:41:46 -0300 Subject: [PATCH 70/74] Disable optimize_monotonous_functions_in_order_by by default (#41136) * #40094 disable optimize_monotonous_functions_in_order_by by default * fix 01576_alias_column_rewrite test * fix incorrect 02149_read_in_order_fixed_prefix.sql test * Update 02149_read_in_order_fixed_prefix.sql --- src/Core/Settings.h | 2 +- ...1321_monotonous_functions_in_order_by_bug.reference | 2 ++ .../01321_monotonous_functions_in_order_by_bug.sql | 7 +++++++ .../queries/0_stateless/01576_alias_column_rewrite.sql | 2 +- .../0_stateless/02149_read_in_order_fixed_prefix.sql | 10 ++++++++-- 5 files changed, 19 insertions(+), 4 deletions(-) create mode 100644 tests/queries/0_stateless/01321_monotonous_functions_in_order_by_bug.reference create mode 100644 tests/queries/0_stateless/01321_monotonous_functions_in_order_by_bug.sql diff --git a/src/Core/Settings.h b/src/Core/Settings.h index da420079766..8793bbb3011 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -481,7 +481,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value) M(Bool, optimize_if_chain_to_multiif, false, "Replace if(cond1, then1, if(cond2, ...)) chains to multiIf. Currently it's not beneficial for numeric types.", 0) \ M(Bool, optimize_multiif_to_if, true, "Replace 'multiIf' with only one condition to 'if'.", 0) \ M(Bool, optimize_if_transform_strings_to_enum, false, "Replaces string-type arguments in If and Transform to enum. Disabled by default cause it could make inconsistent change in distributed query that would lead to its fail.", 0) \ - M(Bool, optimize_monotonous_functions_in_order_by, true, "Replace monotonous function with its argument in ORDER BY", 0) \ + M(Bool, optimize_monotonous_functions_in_order_by, false, "Replace monotonous function with its argument in ORDER BY", 0) \ M(Bool, optimize_functions_to_subcolumns, false, "Transform functions to subcolumns, if possible, to reduce amount of read data. E.g. 'length(arr)' -> 'arr.size0', 'col IS NULL' -> 'col.null' ", 0) \ M(Bool, optimize_using_constraints, false, "Use constraints for query optimization", 0) \ M(Bool, optimize_substitute_columns, false, "Use constraints for column substitution", 0) \ diff --git a/tests/queries/0_stateless/01321_monotonous_functions_in_order_by_bug.reference b/tests/queries/0_stateless/01321_monotonous_functions_in_order_by_bug.reference new file mode 100644 index 00000000000..0c720206065 --- /dev/null +++ b/tests/queries/0_stateless/01321_monotonous_functions_in_order_by_bug.reference @@ -0,0 +1,2 @@ +2020-01-01 01:00:00 1 +2020-01-01 01:00:00 999 diff --git a/tests/queries/0_stateless/01321_monotonous_functions_in_order_by_bug.sql b/tests/queries/0_stateless/01321_monotonous_functions_in_order_by_bug.sql new file mode 100644 index 00000000000..4aa52fe6ae8 --- /dev/null +++ b/tests/queries/0_stateless/01321_monotonous_functions_in_order_by_bug.sql @@ -0,0 +1,7 @@ +SELECT + toStartOfHour(c1) AS _c1, + c2 +FROM values((toDateTime('2020-01-01 01:01:01'), 999), (toDateTime('2020-01-01 01:01:59'), 1)) +ORDER BY + _c1 ASC, + c2 ASC diff --git a/tests/queries/0_stateless/01576_alias_column_rewrite.sql b/tests/queries/0_stateless/01576_alias_column_rewrite.sql index 8424eb11f9b..1f28225bef8 100644 --- a/tests/queries/0_stateless/01576_alias_column_rewrite.sql +++ b/tests/queries/0_stateless/01576_alias_column_rewrite.sql @@ -17,7 +17,7 @@ INSERT INTO test_table(timestamp, value) SELECT toDateTime('2020-01-01 12:00:00' INSERT INTO test_table(timestamp, value) SELECT toDateTime('2020-01-02 12:00:00'), 1 FROM numbers(10); INSERT INTO test_table(timestamp, value) SELECT toDateTime('2020-01-03 12:00:00'), 1 FROM numbers(10); -set optimize_respect_aliases = 1; +set optimize_respect_aliases = 1, optimize_monotonous_functions_in_order_by = 1; SELECT 'test-partition-prune'; SELECT COUNT() = 10 FROM test_table WHERE day = '2020-01-01' SETTINGS max_rows_to_read = 10; diff --git a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql index 4dfcbb9bf80..44c1c12be35 100644 --- a/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql +++ b/tests/queries/0_stateless/02149_read_in_order_fixed_prefix.sql @@ -56,7 +56,13 @@ ENGINE = MergeTree ORDER BY (toStartOfDay(dt), d); INSERT INTO t_read_in_order SELECT toDateTime('2020-10-10 00:00:00') + number, 1 / (number % 100 + 1), number FROM numbers(1000); EXPLAIN PIPELINE SELECT toStartOfDay(dt) as date, d FROM t_read_in_order ORDER BY date, round(d) LIMIT 5; -SELECT toStartOfDay(dt) as date, d FROM t_read_in_order ORDER BY date, round(d) LIMIT 5; +SELECT * from ( + SELECT toStartOfDay(dt) as date, d FROM t_read_in_order ORDER BY date, round(d) LIMIT 50000000000 + -- subquery with limit 50000000 to stabilize a test result and prevent order by d pushdown +) order by d limit 5; EXPLAIN PIPELINE SELECT toStartOfDay(dt) as date, d FROM t_read_in_order ORDER BY date, round(d) LIMIT 5; -SELECT toStartOfDay(dt) as date, d FROM t_read_in_order WHERE date = '2020-10-10' ORDER BY round(d) LIMIT 5; +SELECT * from ( + SELECT toStartOfDay(dt) as date, d FROM t_read_in_order WHERE date = '2020-10-10' ORDER BY round(d) LIMIT 50000000000 + -- subquery with limit 50000000 to stabilize a test result and prevent order by d pushdown +) order by d limit 5; From 761d53b438be4b2031d0acd4559cacabe355ff3d Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 15 Sep 2022 16:14:45 +0000 Subject: [PATCH 71/74] Use merged NuRaft --- contrib/NuRaft | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/NuRaft b/contrib/NuRaft index e7f834e87ec..e15858f8ad0 160000 --- a/contrib/NuRaft +++ b/contrib/NuRaft @@ -1 +1 @@ -Subproject commit e7f834e87ec5c82bc111840bc1f934d5866c042d +Subproject commit e15858f8ad0ce8aba85cf74e3763874c76bf927c From 39307f9dba215e13c97b87768cfc58ca6914f4da Mon Sep 17 00:00:00 2001 From: Antonio Andelic Date: Thu, 15 Sep 2022 16:14:53 +0000 Subject: [PATCH 72/74] Extract into function --- src/Coordination/KeeperDispatcher.cpp | 134 +++++++++++++------------- src/Coordination/KeeperDispatcher.h | 2 + 2 files changed, 70 insertions(+), 66 deletions(-) diff --git a/src/Coordination/KeeperDispatcher.cpp b/src/Coordination/KeeperDispatcher.cpp index cf7b82eda13..3445ef5ea23 100644 --- a/src/Coordination/KeeperDispatcher.cpp +++ b/src/Coordination/KeeperDispatcher.cpp @@ -84,70 +84,6 @@ void KeeperDispatcher::requestThread() KeeperStorage::RequestsForSessions quorum_requests; KeeperStorage::RequestsForSessions read_requests; - auto process_read_requests = [&, this](const auto & coordination_settings) mutable - { - if (coordination_settings->read_mode.toString() == "fastlinear") - { - // we just want to know what's the current latest committed log on Leader node - auto leader_info_result = server->getLeaderInfo(); - if (leader_info_result) - { - leader_info_result->when_ready([&, requests_for_sessions = std::move(read_requests)](nuraft::cmd_result> & result, nuraft::ptr & exception) mutable - { - if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) - { - addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); - return; - } - else if (result.get_result_code() != nuraft::cmd_result_code::OK) - { - addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); - return; - } - else if (exception) - { - LOG_INFO(log, "Got exception while waiting for read results {}", exception->what()); - addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); - return; - } - - auto & leader_info_ctx = result.get(); - - if (!leader_info_ctx) - { - addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); - return; - } - - KeeperServer::NodeInfo leader_info; - leader_info.term = leader_info_ctx->get_ulong(); - leader_info.last_committed_index = leader_info_ctx->get_ulong(); - std::lock_guard lock(leader_waiter_mutex); - auto node_info = server->getNodeInfo(); - - /// we're behind, we need to wait - if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) - { - auto & leader_waiter = leader_waiters[leader_info]; - leader_waiter.insert(leader_waiter.end(), requests_for_sessions.begin(), requests_for_sessions.end()); - LOG_TRACE(log, "waiting for term {}, idx {}", leader_info.term, leader_info.last_committed_index); - } - /// process it in background thread - else if (!read_requests_queue.push(std::move(requests_for_sessions))) - throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); - }); - } - } - else - { - assert(coordination_settings->read_mode.toString() == "nonlinear"); - if (!read_requests_queue.push(std::move(read_requests))) - throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); - } - - read_requests.clear(); - }; - auto process_quorum_requests = [&, this]() mutable { /// Forcefully process all previous pending requests @@ -205,7 +141,7 @@ void KeeperDispatcher::requestThread() if (read_requests.size() > max_batch_size) { - process_read_requests(coordination_settings); + processReadRequests(coordination_settings, read_requests); if (previous_quorum_done()) break; @@ -225,7 +161,7 @@ void KeeperDispatcher::requestThread() /// batch of read requests can send at most one request /// so we don't care if the previous batch hasn't received response if (!read_requests.empty()) - process_read_requests(coordination_settings); + processReadRequests(coordination_settings, read_requests); /// if we still didn't process previous batch we can /// increase are current batch even more @@ -252,6 +188,72 @@ void KeeperDispatcher::requestThread() } } +void KeeperDispatcher::processReadRequests(const CoordinationSettingsPtr & coordination_settings, KeeperStorage::RequestsForSessions & read_requests) +{ + if (coordination_settings->read_mode.toString() == "fastlinear") + { + // we just want to know what's the current latest committed log on Leader node + auto leader_info_result = server->getLeaderInfo(); + if (leader_info_result) + { + leader_info_result->when_ready([&, requests_for_sessions = std::move(read_requests)](nuraft::cmd_result> & result, nuraft::ptr & exception) mutable + { + if (!result.get_accepted() || result.get_result_code() == nuraft::cmd_result_code::TIMEOUT) + { + addErrorResponses(requests_for_sessions, Coordination::Error::ZOPERATIONTIMEOUT); + return; + } + + if (result.get_result_code() != nuraft::cmd_result_code::OK) + { + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + return; + } + + if (exception) + { + LOG_INFO(log, "Got exception while waiting for read results {}", exception->what()); + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + return; + } + + auto & leader_info_ctx = result.get(); + + if (!leader_info_ctx) + { + addErrorResponses(requests_for_sessions, Coordination::Error::ZCONNECTIONLOSS); + return; + } + + KeeperServer::NodeInfo leader_info; + leader_info.term = leader_info_ctx->get_ulong(); + leader_info.last_committed_index = leader_info_ctx->get_ulong(); + std::lock_guard lock(leader_waiter_mutex); + auto node_info = server->getNodeInfo(); + + /// we're behind, we need to wait + if (node_info.term < leader_info.term || node_info.last_committed_index < leader_info.last_committed_index) + { + auto & leader_waiter = leader_waiters[leader_info]; + leader_waiter.insert(leader_waiter.end(), requests_for_sessions.begin(), requests_for_sessions.end()); + LOG_TRACE(log, "waiting for term {}, idx {}", leader_info.term, leader_info.last_committed_index); + } + /// process it in background thread + else if (!read_requests_queue.push(std::move(requests_for_sessions))) + throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); + }); + } + } + else + { + assert(coordination_settings->read_mode.toString() == "nonlinear"); + if (!read_requests_queue.push(std::move(read_requests))) + throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push read requests to queue"); + } + + read_requests.clear(); +} + void KeeperDispatcher::responseThread() { setThreadName("KeeperRspT"); diff --git a/src/Coordination/KeeperDispatcher.h b/src/Coordination/KeeperDispatcher.h index 0ebe67a4f39..6421db87793 100644 --- a/src/Coordination/KeeperDispatcher.h +++ b/src/Coordination/KeeperDispatcher.h @@ -125,6 +125,8 @@ private: void finalizeRequestsThread(); + void processReadRequests(const CoordinationSettingsPtr & coordination_settings, KeeperStorage::RequestsForSessions & read_requests); + void setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response); /// Add error responses for requests to responses queue. From 6dac5097390fd26d69c996e7afbe39246a26af7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 15 Sep 2022 23:41:15 +0200 Subject: [PATCH 73/74] Speed up reading uniqState (#41089) * Speed up reading UniquesHashSet * Improve uniq serialization tests --- src/AggregateFunctions/UniquesHashSet.h | 51 +++++++++++++++++----- tests/performance/uniq_stored.xml | 58 +++++++++++++++++++++++++ 2 files changed, 98 insertions(+), 11 deletions(-) create mode 100644 tests/performance/uniq_stored.xml diff --git a/src/AggregateFunctions/UniquesHashSet.h b/src/AggregateFunctions/UniquesHashSet.h index 8648f6e2500..54503e356c2 100644 --- a/src/AggregateFunctions/UniquesHashSet.h +++ b/src/AggregateFunctions/UniquesHashSet.h @@ -424,14 +424,30 @@ public: alloc(new_size_degree); - for (size_t i = 0; i < m_size; ++i) + if (m_size <= 1) { - HashValue x = 0; - DB::readIntBinary(x, rb); - if (x == 0) - has_zero = true; - else - reinsertImpl(x); + for (size_t i = 0; i < m_size; ++i) + { + HashValue x = 0; + DB::readIntBinary(x, rb); + if (x == 0) + has_zero = true; + else + reinsertImpl(x); + } + } + else + { + auto hs = std::make_unique(m_size); + rb.readStrict(reinterpret_cast(hs.get()), m_size * sizeof(HashValue)); + + for (size_t i = 0; i < m_size; ++i) + { + if (hs[i] == 0) + has_zero = true; + else + reinsertImpl(hs[i]); + } } } @@ -458,11 +474,24 @@ public: resize(new_size_degree); } - for (size_t i = 0; i < rhs_size; ++i) + if (rhs_size <= 1) { - HashValue x = 0; - DB::readIntBinary(x, rb); - insertHash(x); + for (size_t i = 0; i < rhs_size; ++i) + { + HashValue x = 0; + DB::readIntBinary(x, rb); + insertHash(x); + } + } + else + { + auto hs = std::make_unique(rhs_size); + rb.readStrict(reinterpret_cast(hs.get()), rhs_size * sizeof(HashValue)); + + for (size_t i = 0; i < rhs_size; ++i) + { + insertHash(hs[i]); + } } } diff --git a/tests/performance/uniq_stored.xml b/tests/performance/uniq_stored.xml new file mode 100644 index 00000000000..75fb9847aab --- /dev/null +++ b/tests/performance/uniq_stored.xml @@ -0,0 +1,58 @@ + + + create table matview_1 + ( + a String, + b_count AggregateFunction(uniq, UInt64) + ) Engine=MergeTree partition by tuple() + ORDER by tuple() + SETTINGS index_granularity = 1024; + + + + create table matview_10000 + ( + a String, + b_count AggregateFunction(uniq, String) + ) Engine=MergeTree partition by tuple() + ORDER by tuple() + SETTINGS index_granularity = 1024; + + + + DROP TABLE IF EXISTS matview_1 + DROP TABLE IF EXISTS matview_10000 + + + INSERT INTO matview_10000 + SELECT a, uniqState(b) b_count + FROM + ( + SELECT toString(rand() % 1000) a, toString(number % 10000) b + FROM numbers_mt(20000000) + ) + GROUP BY a + SETTINGS max_insert_threads=8; + + OPTIMIZE TABLE matview_10000 FINAL + + + INSERT INTO matview_1 + SELECT '1', uniqState(number) b_count + FROM + ( + SELECT * + FROM numbers_mt(2000000) + ) + GROUP BY number + SETTINGS max_insert_threads=8; + + OPTIMIZE TABLE matview_1 FINAL + + + select a, uniqMerge(b_count) as b_count from matview_10000 prewhere a='55' group by a FORMAT Null SETTINGS max_threads=1; + select uniqMerge(b_count) as b_count from matview_10000 FORMAT Null SETTINGS max_threads=1; + + + select uniqMerge(b_count) as b_count FROM matview_1 FORMAT Null SETTINGS max_threads=1; + From c3ff66bd9d476fb675991886c151f5f42a7e4c01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 15 Sep 2022 23:51:38 +0200 Subject: [PATCH 74/74] Implement batch processing for aggregate functions with multiple nullable arguments (#41058) * Implement batch processing for aggregate functions with multiple nullable arguments * Fix broken perf test * Improve filter handling in addBatchSinglePlace with nullable arguments * Fix detecting the Null filter usage --- .../AggregateFunctionIf.cpp | 65 +++++++++++ .../AggregateFunctionNull.h | 103 ++++++++++++++++++ tests/performance/avg_weighted.xml | 16 +++ .../02417_null_variadic_behaviour.reference | 65 +++++++++++ .../02417_null_variadic_behaviour.sql | 41 +++++++ 5 files changed, 290 insertions(+) create mode 100644 tests/queries/0_stateless/02417_null_variadic_behaviour.reference create mode 100644 tests/queries/0_stateless/02417_null_variadic_behaviour.sql diff --git a/src/AggregateFunctions/AggregateFunctionIf.cpp b/src/AggregateFunctions/AggregateFunctionIf.cpp index fa5e6b85a1e..0cf92585b77 100644 --- a/src/AggregateFunctions/AggregateFunctionIf.cpp +++ b/src/AggregateFunctions/AggregateFunctionIf.cpp @@ -278,6 +278,71 @@ public: } } + void addBatchSinglePlace( + size_t row_begin, size_t row_end, AggregateDataPtr __restrict place, const IColumn ** columns, Arena * arena, ssize_t) const final + { + std::unique_ptr final_null_flags = std::make_unique(row_end); + const size_t filter_column_num = number_of_arguments - 1; + + if (is_nullable[filter_column_num]) + { + const ColumnNullable * nullable_column = assert_cast(columns[filter_column_num]); + const IColumn & filter_column = nullable_column->getNestedColumn(); + const UInt8 * filter_null_map = nullable_column->getNullMapColumn().getData().data(); + const UInt8 * filter_values = assert_cast(filter_column).getData().data(); + + for (size_t i = row_begin; i < row_end; i++) + { + final_null_flags[i] = (null_is_skipped && filter_null_map[i]) || !filter_values[i]; + } + } + else + { + const IColumn * filter_column = columns[filter_column_num]; + const UInt8 * filter_values = assert_cast(filter_column)->getData().data(); + for (size_t i = row_begin; i < row_end; i++) + final_null_flags[i] = !filter_values[i]; + } + + const IColumn * nested_columns[number_of_arguments]; + for (size_t arg = 0; arg < number_of_arguments; arg++) + { + if (is_nullable[arg]) + { + const ColumnNullable & nullable_col = assert_cast(*columns[arg]); + if (null_is_skipped && (arg != filter_column_num)) + { + const ColumnUInt8 & nullmap_column = nullable_col.getNullMapColumn(); + const UInt8 * col_null_map = nullmap_column.getData().data(); + for (size_t r = row_begin; r < row_end; r++) + { + final_null_flags[r] |= col_null_map[r]; + } + } + nested_columns[arg] = &nullable_col.getNestedColumn(); + } + else + nested_columns[arg] = columns[arg]; + } + + bool at_least_one = false; + for (size_t i = row_begin; i < row_end; i++) + { + if (!final_null_flags[i]) + { + at_least_one = true; + break; + } + } + + if (at_least_one) + { + this->setFlag(place); + this->nested_function->addBatchSinglePlaceNotNull( + row_begin, row_end, this->nestedPlace(place), nested_columns, final_null_flags.get(), arena, -1); + } + } + #if USE_EMBEDDED_COMPILER void compileAdd(llvm::IRBuilderBase & builder, llvm::Value * aggregate_data_ptr, const DataTypes & arguments_types, const std::vector & argument_values) const override diff --git a/src/AggregateFunctions/AggregateFunctionNull.h b/src/AggregateFunctions/AggregateFunctionNull.h index ca284680800..1e2c9326142 100644 --- a/src/AggregateFunctions/AggregateFunctionNull.h +++ b/src/AggregateFunctions/AggregateFunctionNull.h @@ -414,6 +414,109 @@ public: this->nested_function->add(this->nestedPlace(place), nested_columns, row_num, arena); } + void addBatchSinglePlace( + size_t row_begin, + size_t row_end, + AggregateDataPtr __restrict place, + const IColumn ** columns, + Arena * arena, + ssize_t if_argument_pos) const final + { + /// We are going to merge all the flags into a single one to be able to call the nested batching functions + std::vector nullable_filters; + const IColumn * nested_columns[number_of_arguments]; + + std::unique_ptr final_flags = nullptr; + const UInt8 * final_flags_ptr = nullptr; + + if (if_argument_pos >= 0) + { + final_flags = std::make_unique(row_end); + final_flags_ptr = final_flags.get(); + + bool included_elements = 0; + const auto & flags = assert_cast(*columns[if_argument_pos]).getData(); + for (size_t i = row_begin; i < row_end; i++) + { + final_flags[i] = !flags.data()[i]; + included_elements += !!flags.data()[i]; + } + + if (included_elements == 0) + return; + if (included_elements != (row_end - row_begin)) + { + nullable_filters.push_back(final_flags_ptr); + } + } + + for (size_t i = 0; i < number_of_arguments; ++i) + { + if (is_nullable[i]) + { + const ColumnNullable & nullable_col = assert_cast(*columns[i]); + nested_columns[i] = &nullable_col.getNestedColumn(); + if constexpr (null_is_skipped) + { + const ColumnUInt8 & nullmap_column = nullable_col.getNullMapColumn(); + nullable_filters.push_back(nullmap_column.getData().data()); + } + } + else + { + nested_columns[i] = columns[i]; + } + } + + bool found_one = false; + + chassert(nullable_filters.size() > 0); /// We work under the assumption that we reach this because one argument was NULL + if (nullable_filters.size() == 1) + { + /// We can avoid making copies of the only filter but we still need to check that there is data to be added + final_flags_ptr = nullable_filters[0]; + for (size_t i = row_begin; i < row_end; i++) + { + if (!final_flags_ptr[i]) + { + found_one = true; + break; + } + } + } + else + { + if (!final_flags) + { + final_flags = std::make_unique(row_end); + final_flags_ptr = final_flags.get(); + } + + const size_t filter_start = nullable_filters[0] == final_flags_ptr ? 1 : 0; + for (size_t filter = filter_start; filter < nullable_filters.size(); filter++) + { + for (size_t i = row_begin; i < row_end; i++) + final_flags[i] |= nullable_filters[filter][i]; + } + + for (size_t i = row_begin; i < row_end; i++) + { + if (!final_flags_ptr[i]) + { + found_one = true; + break; + } + } + } + + if (!found_one) + return; // Nothing to do and nothing to mark + + this->setFlag(place); + this->nested_function->addBatchSinglePlaceNotNull( + row_begin, row_end, this->nestedPlace(place), nested_columns, final_flags_ptr, arena, -1); + } + #if USE_EMBEDDED_COMPILER diff --git a/tests/performance/avg_weighted.xml b/tests/performance/avg_weighted.xml index df992ad682a..5aa89b08c35 100644 --- a/tests/performance/avg_weighted.xml +++ b/tests/performance/avg_weighted.xml @@ -32,5 +32,21 @@ SELECT avgWeighted(num_u, num) FROM perf_avg FORMAT Null SELECT avgWeighted(num_u, num_u) FROM perf_avg FORMAT Null + SELECT avgWeighted(num_f, num_f) FROM perf_avg FORMAT Null + SELECT avgWeighted(toNullable(num_f), num_f) FROM perf_avg FORMAT Null + SELECT avgWeighted(num_f, toNullable(num_f)) FROM perf_avg FORMAT Null + SELECT avgWeighted(toNullable(num_f), toNullable(num_f)) FROM perf_avg FORMAT Null + + SELECT avgWeightedIf(num_f, num_f, num % 10) FROM perf_avg FORMAT Null + SELECT avgWeightedIf(toNullable(num_f), num_f, num % 10) FROM perf_avg FORMAT Null + SELECT avgWeightedIf(num_f, toNullable(num_f), num % 10) FROM perf_avg FORMAT Null + SELECT avgWeightedIf(toNullable(num_f), toNullable(num_f), num % 10) FROM perf_avg FORMAT Null + + SELECT avgWeightedIf(num_f, num_f, toNullable(num) % 10) FROM perf_avg FORMAT Null + SELECT avgWeightedIf(toNullable(num_f), num_f, toNullable(num) % 10) FROM perf_avg FORMAT Null + SELECT avgWeightedIf(num_f, toNullable(num_f), toNullable(num) % 10) FROM perf_avg FORMAT Null + SELECT avgWeightedIf(toNullable(num_f), toNullable(num_f), toNullable(num) % 10) FROM perf_avg FORMAT Null + + DROP TABLE IF EXISTS perf_avg diff --git a/tests/queries/0_stateless/02417_null_variadic_behaviour.reference b/tests/queries/0_stateless/02417_null_variadic_behaviour.reference new file mode 100644 index 00000000000..bedb69f99b0 --- /dev/null +++ b/tests/queries/0_stateless/02417_null_variadic_behaviour.reference @@ -0,0 +1,65 @@ +-- { echo } +SELECT avgWeighted(number, number) t, toTypeName(t) FROM numbers(1); +nan Float64 +SELECT avgWeighted(number, number + 1) t, toTypeName(t) FROM numbers(0); +nan Float64 +SELECT avgWeighted(toNullable(number), number) t, toTypeName(t) FROM numbers(1); +nan Nullable(Float64) +SELECT avgWeighted(if(number < 10000, NULL, number), number) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeighted(if(number < 50, NULL, number), number) t, toTypeName(t) FROM numbers(100); +77.29530201342281 Nullable(Float64) +SELECT avgWeighted(number, if(number < 10000, NULL, number)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeighted(number, if(number < 50, NULL, number)) t, toTypeName(t) FROM numbers(100); +77.29530201342281 Nullable(Float64) +SELECT avgWeighted(toNullable(number), if(number < 10000, NULL, number)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeighted(toNullable(number), if(number < 50, NULL, number)) t, toTypeName(t) FROM numbers(100); +77.29530201342281 Nullable(Float64) +SELECT avgWeighted(if(number < 10000, NULL, number), toNullable(number)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeighted(if(number < 50, NULL, number), toNullable(number)) t, toTypeName(t) FROM numbers(100); +77.29530201342281 Nullable(Float64) +SELECT avgWeighted(if(number < 10000, NULL, number), if(number < 10000, NULL, number)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeighted(if(number < 50, NULL, number), if(number < 10000, NULL, number)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeighted(if(number < 10000, NULL, number), if(number < 50, NULL, number)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeighted(if(number < 50, NULL, number), if(number < 50, NULL, number)) t, toTypeName(t) FROM numbers(100); +77.29530201342281 Nullable(Float64) +SELECT avgWeightedIf(number, number, number % 10) t, toTypeName(t) FROM numbers(100); +66.63333333333334 Float64 +SELECT avgWeightedIf(number, number, toNullable(number % 10)) t, toTypeName(t) FROM numbers(100); +66.63333333333334 Float64 +SELECT avgWeightedIf(number, number, if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +nan Float64 +SELECT avgWeightedIf(number, number, if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +77.75555555555556 Float64 +SELECT avgWeightedIf(number, number, if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +66.63333333333334 Float64 +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 10000, NULL, number), if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 10000, NULL, number), if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 50, NULL, number), if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 50, NULL, number), if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 10000, NULL, number), if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 10000, NULL, number), if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 50, NULL, number), if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 50, NULL, number), if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +77.75555555555556 Nullable(Float64) +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 10000, NULL, number), if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 10000, NULL, number), if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 50, NULL, number), if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +\N Nullable(Float64) +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 50, NULL, number), if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +77.75555555555556 Nullable(Float64) diff --git a/tests/queries/0_stateless/02417_null_variadic_behaviour.sql b/tests/queries/0_stateless/02417_null_variadic_behaviour.sql new file mode 100644 index 00000000000..566cf27bb90 --- /dev/null +++ b/tests/queries/0_stateless/02417_null_variadic_behaviour.sql @@ -0,0 +1,41 @@ +-- { echo } +SELECT avgWeighted(number, number) t, toTypeName(t) FROM numbers(1); +SELECT avgWeighted(number, number + 1) t, toTypeName(t) FROM numbers(0); + +SELECT avgWeighted(toNullable(number), number) t, toTypeName(t) FROM numbers(1); +SELECT avgWeighted(if(number < 10000, NULL, number), number) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(if(number < 50, NULL, number), number) t, toTypeName(t) FROM numbers(100); + +SELECT avgWeighted(number, if(number < 10000, NULL, number)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(number, if(number < 50, NULL, number)) t, toTypeName(t) FROM numbers(100); + +SELECT avgWeighted(toNullable(number), if(number < 10000, NULL, number)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(toNullable(number), if(number < 50, NULL, number)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(if(number < 10000, NULL, number), toNullable(number)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(if(number < 50, NULL, number), toNullable(number)) t, toTypeName(t) FROM numbers(100); + +SELECT avgWeighted(if(number < 10000, NULL, number), if(number < 10000, NULL, number)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(if(number < 50, NULL, number), if(number < 10000, NULL, number)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(if(number < 10000, NULL, number), if(number < 50, NULL, number)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(if(number < 50, NULL, number), if(number < 50, NULL, number)) t, toTypeName(t) FROM numbers(100); + +SELECT avgWeightedIf(number, number, number % 10) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(number, number, toNullable(number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(number, number, if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(number, number, if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(number, number, if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); + +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 10000, NULL, number), if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 10000, NULL, number), if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 50, NULL, number), if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 50, NULL, number), if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); + +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 10000, NULL, number), if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 10000, NULL, number), if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 50, NULL, number), if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 50, NULL, number), if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); + +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 10000, NULL, number), if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 10000, NULL, number), if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 50, NULL, number), if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 50, NULL, number), if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100);