Merge branch 'revert-46909-revert-45911-mutations_rename_hang' of github.com:ClickHouse/ClickHouse into revert-46909-revert-45911-mutations_rename_hang

This commit is contained in:
alesapin 2023-03-02 12:49:06 +01:00
commit 189836307c
5 changed files with 62 additions and 18 deletions

View File

@ -213,6 +213,11 @@ bool DataPartStorageOnDiskBase::isBroken() const
return volume->getDisk()->isBroken();
}
bool DataPartStorageOnDiskBase::isReadonly() const
{
return volume->getDisk()->isReadOnly();
}
void DataPartStorageOnDiskBase::syncRevision(UInt64 revision) const
{
volume->getDisk()->syncRevision(revision);

View File

@ -39,6 +39,7 @@ public:
bool supportZeroCopyReplication() const override;
bool supportParallelWrite() const override;
bool isBroken() const override;
bool isReadonly() const override;
void syncRevision(UInt64 revision) const override;
UInt64 getRevision() const override;
std::string getDiskPath() const override;

View File

@ -151,6 +151,7 @@ public:
virtual bool supportZeroCopyReplication() const { return false; }
virtual bool supportParallelWrite() const = 0;
virtual bool isBroken() const = 0;
virtual bool isReadonly() const = 0;
/// TODO: remove or at least remove const.
virtual void syncRevision(UInt64 revision) const = 0;

View File

@ -1304,6 +1304,8 @@ void IMergeTreeDataPart::loadColumns(bool require)
metadata_snapshot = metadata_snapshot->projections.get(name).metadata;
NamesAndTypesList loaded_columns;
bool is_readonly_storage = getDataPartStorage().isReadonly();
if (!metadata_manager->exists("columns.txt"))
{
/// We can get list of columns only from columns.txt in compact parts.
@ -1319,7 +1321,8 @@ void IMergeTreeDataPart::loadColumns(bool require)
if (columns.empty())
throw Exception(ErrorCodes::NO_FILE_IN_DATA_PART, "No columns in part {}", name);
writeColumns(loaded_columns, {});
if (!is_readonly_storage)
writeColumns(loaded_columns, {});
}
else
{
@ -1353,10 +1356,13 @@ void IMergeTreeDataPart::loadColumns(bool require)
{
loaded_metadata_version = metadata_snapshot->getMetadataVersion();
writeMetadata(METADATA_VERSION_FILE_NAME, {}, [loaded_metadata_version](auto & buffer)
if (!is_readonly_storage)
{
writeIntText(loaded_metadata_version, buffer);
});
writeMetadata(METADATA_VERSION_FILE_NAME, {}, [loaded_metadata_version](auto & buffer)
{
writeIntText(loaded_metadata_version, buffer);
});
}
}
setColumns(loaded_columns, infos, loaded_metadata_version);

View File

@ -21,23 +21,31 @@ def cluster():
cluster.add_instance(
"node3", main_configs=["configs/storage_conf_web.xml"], with_nginx=True
)
cluster.add_instance(
"node4",
main_configs=["configs/storage_conf.xml"],
with_nginx=True,
stay_alive=True,
with_installed_binary=True,
image="clickhouse/clickhouse-server",
tag="22.8.14.53",
)
cluster.start()
node1 = cluster.instances["node1"]
expected = ""
global uuids
for i in range(3):
node1.query(
def create_table_and_upload_data(node, i):
node.query(
f"CREATE TABLE data{i} (id Int32) ENGINE = MergeTree() ORDER BY id SETTINGS storage_policy = 'def', min_bytes_for_wide_part=1;"
)
for _ in range(10):
node1.query(
node.query(
f"INSERT INTO data{i} SELECT number FROM numbers(500000 * {i+1})"
)
expected = node1.query(f"SELECT * FROM data{i} ORDER BY id")
node.query(f"SELECT * FROM data{i} ORDER BY id")
metadata_path = node1.query(
metadata_path = node.query(
f"SELECT data_paths FROM system.tables WHERE name='data{i}'"
)
metadata_path = metadata_path[
@ -45,7 +53,7 @@ def cluster():
]
print(f"Metadata: {metadata_path}")
node1.exec_in_container(
node.exec_in_container(
[
"bash",
"-c",
@ -56,8 +64,20 @@ def cluster():
user="root",
)
parts = metadata_path.split("/")
uuids.append(parts[3])
print(f"UUID: {parts[3]}")
return parts[3]
node1 = cluster.instances["node1"]
global uuids
for i in range(2):
uuid = create_table_and_upload_data(node1, i)
uuids.append(uuid)
node4 = cluster.instances["node4"]
uuid = create_table_and_upload_data(node4, 2)
uuids.append(uuid)
yield cluster
@ -68,6 +88,7 @@ def cluster():
@pytest.mark.parametrize("node_name", ["node2"])
def test_usage(cluster, node_name):
node1 = cluster.instances["node1"]
node4 = cluster.instances["node4"]
node2 = cluster.instances[node_name]
global uuids
assert len(uuids) == 3
@ -90,7 +111,11 @@ def test_usage(cluster, node_name):
result = node2.query(
"SELECT id FROM test{} WHERE id % 56 = 3 ORDER BY id".format(i)
)
assert result == node1.query(
node = node1
if i == 2:
node = node4
assert result == node.query(
"SELECT id FROM data{} WHERE id % 56 = 3 ORDER BY id".format(i)
)
@ -99,7 +124,7 @@ def test_usage(cluster, node_name):
i
)
)
assert result == node1.query(
assert result == node.query(
"SELECT id FROM data{} WHERE id > 789999 AND id < 999999 ORDER BY id".format(
i
)
@ -141,6 +166,7 @@ def test_incorrect_usage(cluster):
@pytest.mark.parametrize("node_name", ["node2"])
def test_cache(cluster, node_name):
node1 = cluster.instances["node1"]
node4 = cluster.instances["node4"]
node2 = cluster.instances[node_name]
global uuids
assert len(uuids) == 3
@ -178,7 +204,12 @@ def test_cache(cluster, node_name):
result = node2.query(
"SELECT id FROM test{} WHERE id % 56 = 3 ORDER BY id".format(i)
)
assert result == node1.query(
node = node1
if i == 2:
node = node4
assert result == node.query(
"SELECT id FROM data{} WHERE id % 56 = 3 ORDER BY id".format(i)
)
@ -187,7 +218,7 @@ def test_cache(cluster, node_name):
i
)
)
assert result == node1.query(
assert result == node.query(
"SELECT id FROM data{} WHERE id > 789999 AND id < 999999 ORDER BY id".format(
i
)