Always detach parts with wrong partition id.

This commit is contained in:
Nikolai Kochetov 2021-06-10 15:04:36 +03:00
parent 847dfb9914
commit 2936fdd16c
3 changed files with 35 additions and 2 deletions

View File

@ -554,6 +554,7 @@
M(584, PROJECTION_NOT_USED) \
M(585, CANNOT_PARSE_YAML) \
M(586, CANNOT_CREATE_FILE) \
M(587, INVALID_PARTITION_ID) \
\
M(998, POSTGRESQL_CONNECTION_FAILURE) \
M(999, KEEPER_EXCEPTION) \

View File

@ -46,7 +46,7 @@ namespace ErrorCodes
extern const int FILE_DOESNT_EXIST;
extern const int NO_FILE_IN_DATA_PART;
extern const int EXPECTED_END_OF_FILE;
extern const int CORRUPTED_DATA;
extern const int INVALID_PARTITION_ID;
extern const int NOT_FOUND_EXPECTED_DATA_PART;
extern const int BAD_SIZE_OF_FILE_IN_DATA_PART;
extern const int BAD_TTL_FILE;
@ -817,7 +817,7 @@ void IMergeTreeDataPart::loadPartitionAndMinMaxIndex()
throw Exception(
"While loading part " + getFullPath() + ": calculated partition ID: " + calculated_partition_id
+ " differs from partition ID in part name: " + info.partition_id,
ErrorCodes::CORRUPTED_DATA);
ErrorCodes::INVALID_PARTITION_ID);
}
void IMergeTreeDataPart::loadChecksums(bool require)

View File

@ -0,0 +1,32 @@
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
# Version 21.6.3.14 has incompatible partition id for tables with UUID in partition key.
node1 = cluster.add_instance('node1', image='yandex/clickhouse-server', tag='21.6.3.14', stay_alive=True, with_installed_binary=True)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_detach_part_wrong_partition_id(start_cluster):
# Here we create table with partition by UUID.
node1.query("create table tab (id UUID, value UInt32) engine = MergeTree PARTITION BY (id) order by tuple()")
node1.query("insert into tab values ('61f0c404-5cb3-11e7-907b-a6006ad3dba0', 2)")
# After restart, partition id will be different.
# There is a single 0-level part, which will become broken.
# We expect that it will not be removed (as usual for 0-level broken parts),
# but moved to /detached
node1.restart_with_latest_version()
num_detached = node1.query("select count() from system.detached_parts")
assert num_detached == '1\n'