mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-19 04:42:37 +00:00
Merge pull request #73274 from ClickHouse/kssenii-patch-14
Fix crash in StorageObjectStorageQueue
This commit is contained in:
commit
5ba04ea9b7
@ -204,7 +204,8 @@ StorageObjectStorageQueue::StorageObjectStorageQueue(
|
|||||||
storage_metadata.setColumns(columns);
|
storage_metadata.setColumns(columns);
|
||||||
storage_metadata.setConstraints(constraints_);
|
storage_metadata.setConstraints(constraints_);
|
||||||
storage_metadata.setComment(comment);
|
storage_metadata.setComment(comment);
|
||||||
storage_metadata.settings_changes = engine_args->settings->ptr();
|
if (engine_args->settings)
|
||||||
|
storage_metadata.settings_changes = engine_args->settings->ptr();
|
||||||
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.columns, context_));
|
setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.columns, context_));
|
||||||
setInMemoryMetadata(storage_metadata);
|
setInMemoryMetadata(storage_metadata);
|
||||||
|
|
||||||
@ -654,6 +655,8 @@ void StorageObjectStorageQueue::alter(
|
|||||||
auto table_id = getStorageID();
|
auto table_id = getStorageID();
|
||||||
|
|
||||||
StorageInMemoryMetadata old_metadata = getInMemoryMetadata();
|
StorageInMemoryMetadata old_metadata = getInMemoryMetadata();
|
||||||
|
/// At the moment we cannot do ALTER MODIFY/RESET SETTING if there are no settings changes (exception will be thrown),
|
||||||
|
/// so we do not need to check if old_metadata.settings_changes == nullptr.
|
||||||
const auto & old_settings = old_metadata.settings_changes->as<const ASTSetQuery &>().changes;
|
const auto & old_settings = old_metadata.settings_changes->as<const ASTSetQuery &>().changes;
|
||||||
|
|
||||||
StorageInMemoryMetadata new_metadata = getInMemoryMetadata();
|
StorageInMemoryMetadata new_metadata = getInMemoryMetadata();
|
||||||
|
@ -280,6 +280,7 @@ def create_table(
|
|||||||
bucket=None,
|
bucket=None,
|
||||||
expect_error=False,
|
expect_error=False,
|
||||||
database_name="default",
|
database_name="default",
|
||||||
|
no_settings=False,
|
||||||
):
|
):
|
||||||
auth_params = ",".join(auth)
|
auth_params = ",".join(auth)
|
||||||
bucket = started_cluster.minio_bucket if bucket is None else bucket
|
bucket = started_cluster.minio_bucket if bucket is None else bucket
|
||||||
@ -300,11 +301,17 @@ def create_table(
|
|||||||
engine_def = f"{engine_name}('{started_cluster.env_variables['AZURITE_CONNECTION_STRING']}', '{started_cluster.azurite_container}', '{files_path}/', 'CSV')"
|
engine_def = f"{engine_name}('{started_cluster.env_variables['AZURITE_CONNECTION_STRING']}', '{started_cluster.azurite_container}', '{files_path}/', 'CSV')"
|
||||||
|
|
||||||
node.query(f"DROP TABLE IF EXISTS {table_name}")
|
node.query(f"DROP TABLE IF EXISTS {table_name}")
|
||||||
create_query = f"""
|
if no_settings:
|
||||||
CREATE TABLE {database_name}.{table_name} ({format})
|
create_query = f"""
|
||||||
ENGINE = {engine_def}
|
CREATE TABLE {database_name}.{table_name} ({format})
|
||||||
SETTINGS {",".join((k+"="+repr(v) for k, v in settings.items()))}
|
ENGINE = {engine_def}
|
||||||
"""
|
"""
|
||||||
|
else:
|
||||||
|
create_query = f"""
|
||||||
|
CREATE TABLE {database_name}.{table_name} ({format})
|
||||||
|
ENGINE = {engine_def}
|
||||||
|
SETTINGS {",".join((k+"="+repr(v) for k, v in settings.items()))}
|
||||||
|
"""
|
||||||
|
|
||||||
if expect_error:
|
if expect_error:
|
||||||
return node.query_and_get_error(create_query)
|
return node.query_and_get_error(create_query)
|
||||||
@ -1975,6 +1982,8 @@ def test_commit_on_limit(started_cluster):
|
|||||||
|
|
||||||
def test_upgrade_2(started_cluster):
|
def test_upgrade_2(started_cluster):
|
||||||
node = started_cluster.instances["instance_24.5"]
|
node = started_cluster.instances["instance_24.5"]
|
||||||
|
if "24.5" not in node.query("select version()").strip():
|
||||||
|
node.restart_with_original_version()
|
||||||
|
|
||||||
table_name = f"test_upgrade_2_{uuid.uuid4().hex[:8]}"
|
table_name = f"test_upgrade_2_{uuid.uuid4().hex[:8]}"
|
||||||
dst_table_name = f"{table_name}_dst"
|
dst_table_name = f"{table_name}_dst"
|
||||||
@ -2527,3 +2536,47 @@ def test_registry(started_cluster):
|
|||||||
node1.query(f"DROP TABLE {db_name}.{table_name} SYNC")
|
node1.query(f"DROP TABLE {db_name}.{table_name} SYNC")
|
||||||
|
|
||||||
assert zk.exists(keeper_path) is None
|
assert zk.exists(keeper_path) is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_upgrade_3(started_cluster):
|
||||||
|
node = started_cluster.instances["instance_24.5"]
|
||||||
|
if "24.5" not in node.query("select version()").strip():
|
||||||
|
node.restart_with_original_version()
|
||||||
|
|
||||||
|
table_name = f"test_upgrade_3_{uuid.uuid4().hex[:8]}"
|
||||||
|
dst_table_name = f"{table_name}_dst"
|
||||||
|
keeper_path = f"/clickhouse/test_{table_name}"
|
||||||
|
files_path = f"{table_name}_data"
|
||||||
|
files_to_generate = 10
|
||||||
|
|
||||||
|
create_table(
|
||||||
|
started_cluster, node, table_name, "ordered", files_path, no_settings=True
|
||||||
|
)
|
||||||
|
total_values = generate_random_files(
|
||||||
|
started_cluster, files_path, files_to_generate, start_ind=0, row_num=1
|
||||||
|
)
|
||||||
|
|
||||||
|
create_mv(node, table_name, dst_table_name)
|
||||||
|
|
||||||
|
def get_count():
|
||||||
|
return int(node.query(f"SELECT count() FROM {dst_table_name}"))
|
||||||
|
|
||||||
|
expected_rows = 10
|
||||||
|
for _ in range(20):
|
||||||
|
if expected_rows == get_count():
|
||||||
|
break
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
assert expected_rows == get_count()
|
||||||
|
|
||||||
|
node.restart_with_latest_version()
|
||||||
|
assert table_name in node.query("SHOW TABLES")
|
||||||
|
|
||||||
|
assert (
|
||||||
|
"Cannot alter settings, because table engine doesn't support settings changes"
|
||||||
|
in node.query_and_get_error(
|
||||||
|
f"""
|
||||||
|
ALTER TABLE {table_name} MODIFY SETTING processing_threads_num=5
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user