mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 00:22:29 +00:00
Skip max_partition_size_to_drop check in case of ATTACH PARTITION ... FROM (#30995)
* Skip checkPartitionCanBeDropped check in case of ATTACH PARTITION ... FROM * Add integration test * Fix syntax error * Fix integration test * Fix integration test * Fix integration test * Skip max_partition_size_to_drop for MOVE PARTITION
This commit is contained in:
parent
d15fc85c37
commit
ad81977ace
@ -3423,7 +3423,6 @@ Pipe MergeTreeData::alterPartition(
|
||||
|
||||
case PartitionCommand::MoveDestinationType::TABLE:
|
||||
{
|
||||
checkPartitionCanBeDropped(command.partition);
|
||||
String dest_database = query_context->resolveDatabase(command.to_database);
|
||||
auto dest_storage = DatabaseCatalog::instance().getTable({dest_database, command.to_table}, query_context);
|
||||
movePartitionToTable(dest_storage, command.partition, query_context);
|
||||
@ -3445,7 +3444,8 @@ Pipe MergeTreeData::alterPartition(
|
||||
|
||||
case PartitionCommand::REPLACE_PARTITION:
|
||||
{
|
||||
checkPartitionCanBeDropped(command.partition);
|
||||
if (command.replace)
|
||||
checkPartitionCanBeDropped(command.partition);
|
||||
String from_database = query_context->resolveDatabase(command.from_database);
|
||||
auto from_storage = DatabaseCatalog::instance().getTable({from_database, command.from_table}, query_context);
|
||||
replacePartitionFrom(from_storage, command.partition, command.replace, query_context);
|
||||
|
@ -0,0 +1,4 @@
|
||||
<clickhouse>
|
||||
<max_table_size_to_drop>1</max_table_size_to_drop>
|
||||
<max_partition_size_to_drop>1</max_partition_size_to_drop>
|
||||
</clickhouse>
|
@ -0,0 +1,50 @@
|
||||
import pytest
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
node = cluster.add_instance('node', main_configs=["configs/config.xml"], with_zookeeper=True)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
def create_force_drop_flag(node):
|
||||
force_drop_flag_path = "/var/lib/clickhouse/flags/force_drop_table"
|
||||
node.exec_in_container(["bash", "-c", "touch {} && chmod a=rw {}".format(force_drop_flag_path, force_drop_flag_path)], user="root")
|
||||
|
||||
@pytest.mark.parametrize("engine", ['Ordinary', 'Atomic'])
|
||||
def test_attach_partition_with_large_destination(started_cluster, engine):
|
||||
# Initialize
|
||||
node.query("CREATE DATABASE db ENGINE={}".format(engine))
|
||||
node.query("CREATE TABLE db.destination (n UInt64) ENGINE=ReplicatedMergeTree('/test/destination', 'r1') ORDER BY n PARTITION BY n % 2")
|
||||
node.query("CREATE TABLE db.source_1 (n UInt64) ENGINE=ReplicatedMergeTree('/test/source_1', 'r1') ORDER BY n PARTITION BY n % 2")
|
||||
node.query("INSERT INTO db.source_1 VALUES (1), (2), (3), (4)")
|
||||
node.query("CREATE TABLE db.source_2 (n UInt64) ENGINE=ReplicatedMergeTree('/test/source_2', 'r1') ORDER BY n PARTITION BY n % 2")
|
||||
node.query("INSERT INTO db.source_2 VALUES (5), (6), (7), (8)")
|
||||
|
||||
# Attach partition when destination partition is empty
|
||||
node.query("ALTER TABLE db.destination ATTACH PARTITION 0 FROM db.source_1")
|
||||
assert node.query("SELECT n FROM db.destination ORDER BY n") == "2\n4\n"
|
||||
|
||||
# REPLACE PARTITION should still respect max_partition_size_to_drop
|
||||
assert node.query_and_get_error("ALTER TABLE db.destination REPLACE PARTITION 0 FROM db.source_2")
|
||||
assert node.query("SELECT n FROM db.destination ORDER BY n") == "2\n4\n"
|
||||
|
||||
# Attach partition when destination partition is larger than max_partition_size_to_drop
|
||||
node.query("ALTER TABLE db.destination ATTACH PARTITION 0 FROM db.source_2")
|
||||
assert node.query("SELECT n FROM db.destination ORDER BY n") == "2\n4\n6\n8\n"
|
||||
|
||||
# Cleanup
|
||||
create_force_drop_flag(node)
|
||||
node.query("DROP TABLE db.source_1 SYNC")
|
||||
create_force_drop_flag(node)
|
||||
node.query("DROP TABLE db.source_2 SYNC")
|
||||
create_force_drop_flag(node)
|
||||
node.query("DROP TABLE db.destination SYNC")
|
||||
node.query("DROP DATABASE db")
|
Loading…
Reference in New Issue
Block a user