Merge branch 'master' into feature/keeper-dyn-reconf

This commit is contained in:
Antonio Andelic 2023-07-21 14:08:26 +02:00 committed by GitHub
commit 59ad2d9acc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 57 additions and 23 deletions

View File

@ -353,6 +353,8 @@ bool HedgedConnections::resumePacketReceiver(const HedgedConnections::ReplicaLoc
if (replica_state.packet_receiver->isPacketReady())
{
/// Reset the socket timeout after some packet received
replica_state.packet_receiver->setTimeout(hedged_connections_factory.getConnectionTimeouts().receive_timeout);
last_received_packet = replica_state.packet_receiver->getPacket();
return true;
}

View File

@ -353,8 +353,14 @@ MergeTreeData::DataPartPtr Service::findPart(const String & name)
{
/// It is important to include Outdated parts here because remote replicas cannot reliably
/// determine the local state of the part, so queries for the parts in these states are completely normal.
auto part = data.getPartIfExists(
name, {MergeTreeDataPartState::Active, MergeTreeDataPartState::Outdated});
MergeTreeData::DataPartPtr part;
/// Ephemeral zero-copy lock may be lost for PreActive parts
bool zero_copy_enabled = data.getSettings()->allow_remote_fs_zero_copy_replication;
if (zero_copy_enabled)
part = data.getPartIfExists(name, {MergeTreeDataPartState::Active, MergeTreeDataPartState::Outdated});
else
part = data.getPartIfExists(name, {MergeTreeDataPartState::PreActive, MergeTreeDataPartState::Active, MergeTreeDataPartState::Outdated});
if (part)
return part;

View File

@ -22,7 +22,6 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
extern const int POSTGRESQL_REPLICATION_INTERNAL_ERROR;
extern const int BAD_ARGUMENTS;
extern const int TOO_MANY_PARTS;
}
MaterializedPostgreSQLConsumer::MaterializedPostgreSQLConsumer(
@ -589,13 +588,10 @@ void MaterializedPostgreSQLConsumer::syncTables()
executor.execute();
}
}
catch (DB::Exception & e)
catch (...)
{
if (e.code() == ErrorCodes::TOO_MANY_PARTS)
{
/// Retry this buffer later.
storage_data.buffer.columns = result_rows.mutateColumns();
}
/// Retry this buffer later.
storage_data.buffer.columns = result_rows.mutateColumns();
throw;
}

View File

@ -1533,6 +1533,7 @@ class ClickHouseCluster:
with_jdbc_bridge=False,
with_hive=False,
with_coredns=False,
allow_analyzer=True,
hostname=None,
env_variables=None,
image="clickhouse/integration-test",
@ -1630,6 +1631,7 @@ class ClickHouseCluster:
with_hive=with_hive,
with_coredns=with_coredns,
with_cassandra=with_cassandra,
allow_analyzer=allow_analyzer,
server_bin_path=self.server_bin_path,
odbc_bridge_bin_path=self.odbc_bridge_bin_path,
library_bridge_bin_path=self.library_bridge_bin_path,
@ -3169,6 +3171,7 @@ class ClickHouseInstance:
with_hive,
with_coredns,
with_cassandra,
allow_analyzer,
server_bin_path,
odbc_bridge_bin_path,
library_bridge_bin_path,
@ -3256,6 +3259,7 @@ class ClickHouseInstance:
self.with_hive = with_hive
self.with_coredns = with_coredns
self.coredns_config_dir = p.abspath(p.join(base_path, "coredns_config"))
self.allow_analyzer = allow_analyzer
self.main_config_name = main_config_name
self.users_config_name = users_config_name
@ -4245,7 +4249,10 @@ class ClickHouseInstance:
)
write_embedded_config("0_common_instance_users.xml", users_d_dir)
if os.environ.get("CLICKHOUSE_USE_NEW_ANALYZER") is not None:
if (
os.environ.get("CLICKHOUSE_USE_NEW_ANALYZER") is not None
and self.allow_analyzer
):
write_embedded_config("0_common_enable_analyzer.xml", users_d_dir)
if len(self.custom_dictionaries_paths):

View File

@ -10,11 +10,13 @@ node1 = cluster.add_instance(
tag="19.17.8.54",
stay_alive=True,
with_installed_binary=True,
allow_analyzer=False,
)
node2 = cluster.add_instance(
"node2",
main_configs=["configs/wide_parts_only.xml", "configs/no_compress_marks.xml"],
with_zookeeper=True,
allow_analyzer=False,
)

View File

@ -9,9 +9,10 @@ node1 = cluster.add_instance(
image="yandex/clickhouse-server",
tag="21.3",
with_installed_binary=True,
allow_analyzer=False,
)
node2 = cluster.add_instance("node2", with_zookeeper=True)
node3 = cluster.add_instance("node3", with_zookeeper=True)
node2 = cluster.add_instance("node2", with_zookeeper=True, allow_analyzer=False)
node3 = cluster.add_instance("node3", with_zookeeper=True, allow_analyzer=False)
@pytest.fixture(scope="module")

View File

@ -10,6 +10,7 @@ node1 = cluster.add_instance(
tag="19.16.9.37",
stay_alive=True,
with_installed_binary=True,
allow_analyzer=False,
)
node2 = cluster.add_instance(
"node2",
@ -18,9 +19,10 @@ node2 = cluster.add_instance(
tag="19.16.9.37",
stay_alive=True,
with_installed_binary=True,
allow_analyzer=False,
)
node3 = cluster.add_instance("node3", with_zookeeper=False)
node4 = cluster.add_instance("node4", with_zookeeper=False)
node3 = cluster.add_instance("node3", with_zookeeper=False, allow_analyzer=False)
node4 = cluster.add_instance("node4", with_zookeeper=False, allow_analyzer=False)
@pytest.fixture(scope="module")

View File

@ -9,6 +9,7 @@ node = cluster.add_instance(
stay_alive=True,
with_zookeeper=True,
with_installed_binary=True,
allow_analyzer=False,
)

View File

@ -3,7 +3,7 @@ import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1", with_zookeeper=False)
node1 = cluster.add_instance("node1", with_zookeeper=False, allow_analyzer=False)
node2 = cluster.add_instance(
"node2",
with_zookeeper=False,
@ -11,6 +11,7 @@ node2 = cluster.add_instance(
tag="21.7.3.14",
stay_alive=True,
with_installed_binary=True,
allow_analyzer=False,
)
@ -31,7 +32,7 @@ WITH
quantile(0.05)(cnt) as p05,
quantile(0.95)(cnt) as p95,
p95 - p05 as inter_percentile_range
SELECT
SELECT
sum(cnt) as total_requests,
count() as data_points,
inter_percentile_range
@ -49,7 +50,7 @@ WITH
quantile(0.05)(cnt) as p05,
quantile(0.95)(cnt) as p95,
p95 - p05 as inter_percentile_range
SELECT
SELECT
sum(cnt) as total_requests,
count() as data_points,
inter_percentile_range

View File

@ -12,6 +12,7 @@ node = cluster.add_instance(
tag="21.6",
stay_alive=True,
with_installed_binary=True,
allow_analyzer=False,
)

View File

@ -9,7 +9,7 @@ from helpers.cluster import ClickHouseCluster
from helpers.client import QueryRuntimeException
cluster = ClickHouseCluster(__file__)
upstream = cluster.add_instance("upstream")
upstream = cluster.add_instance("upstream", allow_analyzer=False)
backward = cluster.add_instance(
"backward",
image="clickhouse/clickhouse-server",
@ -19,6 +19,7 @@ backward = cluster.add_instance(
# Affected at least: singleValueOrNull, last_value, min, max, any, anyLast, anyHeavy, first_value, argMin, argMax
tag="22.6",
with_installed_binary=True,
allow_analyzer=False,
)

View File

@ -12,6 +12,7 @@ node = cluster.add_instance(
tag="23.4",
stay_alive=True,
with_installed_binary=True,
allow_analyzer=False,
)

View File

@ -7,12 +7,13 @@ import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
upstream_node = cluster.add_instance("upstream_node")
upstream_node = cluster.add_instance("upstream_node", allow_analyzer=False)
old_node = cluster.add_instance(
"old_node",
image="clickhouse/clickhouse-server",
tag="22.5.1.2079",
with_installed_binary=True,
allow_analyzer=False,
)

View File

@ -10,6 +10,7 @@ node_22_6 = cluster.add_instance(
tag="22.6",
stay_alive=True,
with_installed_binary=True,
allow_analyzer=False,
)

View File

@ -10,6 +10,7 @@ node1 = cluster.add_instance(
tag="21.1",
stay_alive=True,
with_installed_binary=True,
allow_analyzer=False,
)
node2 = cluster.add_instance(
"node2",
@ -18,8 +19,9 @@ node2 = cluster.add_instance(
tag="21.1",
stay_alive=True,
with_installed_binary=True,
allow_analyzer=False,
)
node3 = cluster.add_instance("node3", with_zookeeper=False)
node3 = cluster.add_instance("node3", with_zookeeper=False, allow_analyzer=False)
@pytest.fixture(scope="module")

View File

@ -3,7 +3,7 @@ import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1", with_zookeeper=False)
node1 = cluster.add_instance("node1", with_zookeeper=False, allow_analyzer=False)
node2 = cluster.add_instance(
"node2",
with_zookeeper=False,
@ -11,6 +11,7 @@ node2 = cluster.add_instance(
tag="21.7.2.7",
stay_alive=True,
with_installed_binary=True,
allow_analyzer=False,
)

View File

@ -3,7 +3,7 @@ import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1", with_zookeeper=False)
node1 = cluster.add_instance("node1", with_zookeeper=False, allow_analyzer=False)
node2 = cluster.add_instance(
"node2",
with_zookeeper=False,
@ -11,6 +11,7 @@ node2 = cluster.add_instance(
tag="21.7.2.7",
stay_alive=True,
with_installed_binary=True,
allow_analyzer=False,
)

View File

@ -10,6 +10,7 @@ node1 = cluster.add_instance(
tag="19.16.9.37",
stay_alive=True,
with_installed_binary=True,
allow_analyzer=False,
)
node2 = cluster.add_instance(
"node2",
@ -18,8 +19,9 @@ node2 = cluster.add_instance(
tag="19.16.9.37",
stay_alive=True,
with_installed_binary=True,
allow_analyzer=False,
)
node3 = cluster.add_instance("node3", with_zookeeper=False)
node3 = cluster.add_instance("node3", with_zookeeper=False, allow_analyzer=False)
@pytest.fixture(scope="module")

View File

@ -11,12 +11,14 @@ node_old = cluster.add_instance(
stay_alive=True,
with_installed_binary=True,
with_zookeeper=True,
allow_analyzer=False,
)
node_new = cluster.add_instance(
"node2",
main_configs=["configs/no_compress_marks.xml"],
with_zookeeper=True,
stay_alive=True,
allow_analyzer=False,
)

View File

@ -0,0 +1,3 @@
select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=0, use_hedged_requests=1 format Null;
select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=1, use_hedged_requests=0 format Null;
select * from remote('127.2', view(select sleep(3) from system.one)) settings receive_timeout=1, async_socket_for_remote=0, use_hedged_requests=0 format Null;