diff --git a/docker/test/upgrade/run.sh b/docker/test/upgrade/run.sh index 0b30ab9dbf7..886efe3ff33 100644 --- a/docker/test/upgrade/run.sh +++ b/docker/test/upgrade/run.sh @@ -60,6 +60,12 @@ install_packages previous_release_package_folder # available for dump via clickhouse-local configure +# async_replication setting doesn't exist on some older versions +sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \ + | sed "/1<\/async_replication>/d" \ + > /etc/clickhouse-server/config.d/keeper_port.xml.tmp +sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml + # it contains some new settings, but we can safely remove it rm /etc/clickhouse-server/config.d/merge_tree.xml rm /etc/clickhouse-server/config.d/enable_wait_for_shutdown_replicated_tables.xml @@ -82,6 +88,12 @@ sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \ > /etc/clickhouse-server/config.d/keeper_port.xml.tmp sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml +# async_replication setting doesn't exist on some older versions +sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \ + | sed "/1<\/async_replication>/d" \ + > /etc/clickhouse-server/config.d/keeper_port.xml.tmp +sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml + # But we still need default disk because some tables loaded only into it sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \ | sed "s|
s3
|
s3
default|" \ diff --git a/src/Coordination/Changelog.cpp b/src/Coordination/Changelog.cpp index 2af68173588..7030775904c 100644 --- a/src/Coordination/Changelog.cpp +++ b/src/Coordination/Changelog.cpp @@ -1021,7 +1021,7 @@ void Changelog::writeThread() const auto flush_logs = [&](const auto & flush) { - LOG_TRACE(log, "Flushing {} logs", pending_appends); + LOG_TEST(log, "Flushing {} logs", pending_appends); { std::lock_guard writer_lock(writer_mutex); diff --git a/src/Coordination/CoordinationSettings.cpp b/src/Coordination/CoordinationSettings.cpp index aadedf19204..8688f6f5a40 100644 --- a/src/Coordination/CoordinationSettings.cpp +++ b/src/Coordination/CoordinationSettings.cpp @@ -22,16 +22,19 @@ void CoordinationSettings::loadFromConfig(const String & config_elem, const Poco Poco::Util::AbstractConfiguration::Keys config_keys; config.keys(config_elem, config_keys); - try + for (const String & key : config_keys) { - for (const String & key : config_keys) + try + { set(key, config.getString(config_elem + "." + key)); - } - catch (Exception & e) - { - if (e.code() == ErrorCodes::UNKNOWN_SETTING) - e.addMessage("in Coordination settings config"); - throw; + } + catch (Exception & e) + { + if (e.code() == ErrorCodes::UNKNOWN_SETTING) + LOG_WARNING(&Poco::Logger::get("CoordinationSettings"), "Found unknown coordination setting in config: '{}'", key); + else + throw; + } } } @@ -134,6 +137,8 @@ void KeeperConfigurationAndSettings::dump(WriteBufferFromOwnString & buf) const write_int(coordination_settings->max_requests_batch_size); writeText("max_requests_batch_bytes_size=", buf); write_int(coordination_settings->max_requests_batch_bytes_size); + writeText("max_flush_batch_size=", buf); + write_int(coordination_settings->max_flush_batch_size); writeText("max_request_queue_size=", buf); write_int(coordination_settings->max_request_queue_size); writeText("max_requests_quick_batch_size=", buf); diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 1e92bdaa788..7ee86d74d35 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -4260,9 +4260,10 @@ class ClickHouseInstance: if len(self.custom_dictionaries_paths): write_embedded_config("0_common_enable_dictionaries.xml", self.config_d_dir) - write_embedded_config( - "0_common_enable_keeper_async_replication.xml", self.config_d_dir - ) + if self.tag == "latest": + write_embedded_config( + "0_common_enable_keeper_async_replication.xml", self.config_d_dir + ) logging.debug("Generate and write macros file") macros = self.macros.copy() diff --git a/tests/integration/test_keeper_four_word_command/test.py b/tests/integration/test_keeper_four_word_command/test.py index 1d5bc6a6541..0de7de8c3be 100644 --- a/tests/integration/test_keeper_four_word_command/test.py +++ b/tests/integration/test_keeper_four_word_command/test.py @@ -279,8 +279,9 @@ def test_cmd_conf(started_cluster): assert result["stale_log_gap"] == "10000" assert result["fresh_log_gap"] == "200" - assert result["max_requests_batch_size"] == "100" + assert result["max_requests_batch_size"] == "1000" assert result["max_requests_batch_bytes_size"] == "102400" + assert result["max_flush_batch_size"] == "1000" assert result["max_request_queue_size"] == "100000" assert result["max_requests_quick_batch_size"] == "100" assert result["quorum_reads"] == "false" @@ -290,6 +291,7 @@ def test_cmd_conf(started_cluster): assert result["compress_snapshots_with_zstd_format"] == "true" assert result["configuration_change_tries_count"] == "20" + assert result["async_replication"] == "true" finally: close_keeper_socket(client)