Set temporary_directories_lifetime to integration tests with MOVE

This commit is contained in:
vdimir 2023-06-02 17:41:16 +00:00
parent db1c03d6db
commit 19bb802b04
No known key found for this signature in database
GPG Key ID: 6EE4CE2BEDC51862
12 changed files with 17 additions and 9 deletions

View File

@ -18,7 +18,7 @@ def initialize_database(nodes, shard):
CREATE TABLE `{database}`.dest (p UInt64, d UInt64)
ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard2{shard}/replicated', '{replica}')
ORDER BY d PARTITION BY p
SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0, temporary_directories_lifetime=1;
""".format(
shard=shard, replica=node.name, database=CLICKHOUSE_DATABASE
)

View File

@ -96,7 +96,7 @@ def test_part_move(policy, destination_disks):
data String
) ENGINE=MergeTree()
ORDER BY id
SETTINGS storage_policy='{}'
SETTINGS storage_policy='{}', temporary_directories_lifetime=1
""".format(
policy
)

View File

@ -66,6 +66,7 @@ def create_table(node, table_name, **additional_settings):
"storage_policy": "blob_storage_policy",
"old_parts_lifetime": 1,
"index_granularity": 512,
"temporary_directories_lifetime": 1,
}
settings.update(additional_settings)

View File

@ -29,7 +29,8 @@ def create_table(cluster, table_name, additional_settings=None):
SETTINGS
storage_policy='hdfs',
old_parts_lifetime=0,
index_granularity=512
index_granularity=512,
temporary_directories_lifetime=1
""".format(
table_name
)

View File

@ -75,6 +75,7 @@ def create_table(node, table_name, **additional_settings):
"storage_policy": "s3",
"old_parts_lifetime": 0,
"index_granularity": 512,
"temporary_directories_lifetime": 1,
}
settings.update(additional_settings)

View File

@ -46,7 +46,7 @@ def test_move_partition_to_disk_on_cluster(start_cluster):
"(x UInt64) "
"ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_local_table', '{replica}') "
"ORDER BY tuple()"
"SETTINGS storage_policy = 'jbod_with_external';",
"SETTINGS storage_policy = 'jbod_with_external', temporary_directories_lifetime=1;",
)
node1.query("INSERT INTO test_local_table VALUES (0)")

View File

@ -123,4 +123,8 @@
</storage_configuration>
<merge_tree>
<temporary_directories_lifetime>1</temporary_directories_lifetime>
</merge_tree>
</clickhouse>

View File

@ -24,9 +24,10 @@
</default_with_external>
</policies>
</storage_configuration>
<merge_tree>
<min_bytes_for_wide_part>0</min_bytes_for_wide_part>
<temporary_directories_lifetime>1</temporary_directories_lifetime>
</merge_tree>
</clickhouse>

View File

@ -128,7 +128,7 @@ def test_hdfs_zero_copy_replication_single_move(cluster, storage_policy, init_ob
CREATE TABLE single_node_move_test (dt DateTime, id Int64)
ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/single_node_move_test', '{replica}')
ORDER BY (dt, id)
SETTINGS storage_policy='$policy'
SETTINGS storage_policy='$policy',temporary_directories_lifetime=1
"""
).substitute(policy=storage_policy)
)

View File

@ -163,7 +163,7 @@ def test_s3_zero_copy_on_hybrid_storage(started_cluster):
CREATE TABLE hybrid_test ON CLUSTER test_cluster (id UInt32, value String)
ENGINE=ReplicatedMergeTree('/clickhouse/tables/hybrid_test', '{}')
ORDER BY id
SETTINGS storage_policy='hybrid'
SETTINGS storage_policy='hybrid',temporary_directories_lifetime=1
""".format(
"{replica}"
)

View File

@ -1549,7 +1549,7 @@ def test_double_move_while_select(started_cluster, name, positive):
) ENGINE = MergeTree
ORDER BY tuple()
PARTITION BY n
SETTINGS storage_policy='small_jbod_with_external'
SETTINGS storage_policy='small_jbod_with_external',temporary_directories_lifetime=1
""".format(
name=name
)

View File

@ -45,7 +45,7 @@ CREATE TABLE test1 (EventDate Date, CounterID UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse-tables/test1', 'r1')
PARTITION BY toMonday(EventDate)
ORDER BY (CounterID, EventDate)
SETTINGS index_granularity = 8192, storage_policy = 's3'"""
SETTINGS index_granularity = 8192, storage_policy = 's3', temporary_directories_lifetime=1"""
)
node1.query(