Merge pull request #73274 from ClickHouse/kssenii-patch-14

Fix crash in StorageObjectStorageQueue
This commit is contained in:
Kseniia Sumarokova 2024-12-17 09:38:57 +00:00 committed by GitHub
commit 5ba04ea9b7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 62 additions and 6 deletions

View File

@ -204,6 +204,7 @@ StorageObjectStorageQueue::StorageObjectStorageQueue(
storage_metadata.setColumns(columns);
storage_metadata.setConstraints(constraints_);
storage_metadata.setComment(comment);
if (engine_args->settings)
storage_metadata.settings_changes = engine_args->settings->ptr();
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.columns, context_));
setInMemoryMetadata(storage_metadata);
@ -654,6 +655,8 @@ void StorageObjectStorageQueue::alter(
auto table_id = getStorageID();
StorageInMemoryMetadata old_metadata = getInMemoryMetadata();
/// At the moment we cannot do ALTER MODIFY/RESET SETTING if there are no settings changes (exception will be thrown),
/// so we do not need to check if old_metadata.settings_changes == nullptr.
const auto & old_settings = old_metadata.settings_changes->as<const ASTSetQuery &>().changes;
StorageInMemoryMetadata new_metadata = getInMemoryMetadata();

View File

@ -280,6 +280,7 @@ def create_table(
bucket=None,
expect_error=False,
database_name="default",
no_settings=False,
):
auth_params = ",".join(auth)
bucket = started_cluster.minio_bucket if bucket is None else bucket
@ -300,6 +301,12 @@ def create_table(
engine_def = f"{engine_name}('{started_cluster.env_variables['AZURITE_CONNECTION_STRING']}', '{started_cluster.azurite_container}', '{files_path}/', 'CSV')"
node.query(f"DROP TABLE IF EXISTS {table_name}")
if no_settings:
create_query = f"""
CREATE TABLE {database_name}.{table_name} ({format})
ENGINE = {engine_def}
"""
else:
create_query = f"""
CREATE TABLE {database_name}.{table_name} ({format})
ENGINE = {engine_def}
@ -1975,6 +1982,8 @@ def test_commit_on_limit(started_cluster):
def test_upgrade_2(started_cluster):
node = started_cluster.instances["instance_24.5"]
if "24.5" not in node.query("select version()").strip():
node.restart_with_original_version()
table_name = f"test_upgrade_2_{uuid.uuid4().hex[:8]}"
dst_table_name = f"{table_name}_dst"
@ -2527,3 +2536,47 @@ def test_registry(started_cluster):
node1.query(f"DROP TABLE {db_name}.{table_name} SYNC")
assert zk.exists(keeper_path) is None
def test_upgrade_3(started_cluster):
node = started_cluster.instances["instance_24.5"]
if "24.5" not in node.query("select version()").strip():
node.restart_with_original_version()
table_name = f"test_upgrade_3_{uuid.uuid4().hex[:8]}"
dst_table_name = f"{table_name}_dst"
keeper_path = f"/clickhouse/test_{table_name}"
files_path = f"{table_name}_data"
files_to_generate = 10
create_table(
started_cluster, node, table_name, "ordered", files_path, no_settings=True
)
total_values = generate_random_files(
started_cluster, files_path, files_to_generate, start_ind=0, row_num=1
)
create_mv(node, table_name, dst_table_name)
def get_count():
return int(node.query(f"SELECT count() FROM {dst_table_name}"))
expected_rows = 10
for _ in range(20):
if expected_rows == get_count():
break
time.sleep(1)
assert expected_rows == get_count()
node.restart_with_latest_version()
assert table_name in node.query("SHOW TABLES")
assert (
"Cannot alter settings, because table engine doesn't support settings changes"
in node.query_and_get_error(
f"""
ALTER TABLE {table_name} MODIFY SETTING processing_threads_num=5
"""
)
)