2021-05-22 20:33:15 +00:00
|
|
|
import logging
|
|
|
|
import os
|
2024-09-27 10:19:39 +00:00
|
|
|
import time
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
import pytest
|
2024-09-27 10:19:39 +00:00
|
|
|
from pyhdfs import HdfsClient
|
|
|
|
|
2024-03-20 15:48:53 +00:00
|
|
|
from helpers.cluster import ClickHouseCluster, is_arm
|
2021-12-07 14:43:55 +00:00
|
|
|
from helpers.utility import generate_values
|
2024-09-27 10:19:39 +00:00
|
|
|
from helpers.wait_for_helpers import (
|
|
|
|
wait_for_delete_empty_parts,
|
|
|
|
wait_for_delete_inactive_parts,
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
2022-03-22 16:39:58 +00:00
|
|
|
CONFIG_PATH = os.path.join(
|
|
|
|
SCRIPT_DIR, "./_instances/node/configs/config.d/storage_conf.xml"
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
|
2024-03-20 15:48:53 +00:00
|
|
|
if is_arm():
|
|
|
|
pytestmark = pytest.mark.skip
|
|
|
|
|
|
|
|
|
2021-05-22 20:33:15 +00:00
|
|
|
def create_table(cluster, table_name, additional_settings=None):
|
|
|
|
node = cluster.instances["node"]
|
|
|
|
|
|
|
|
create_table_statement = """
|
|
|
|
CREATE TABLE {} (
|
|
|
|
dt Date, id Int64, data String,
|
|
|
|
INDEX min_max (id) TYPE minmax GRANULARITY 3
|
|
|
|
) ENGINE=MergeTree()
|
|
|
|
PARTITION BY dt
|
|
|
|
ORDER BY (dt, id)
|
|
|
|
SETTINGS
|
|
|
|
storage_policy='hdfs',
|
|
|
|
old_parts_lifetime=0,
|
2023-06-02 17:41:16 +00:00
|
|
|
index_granularity=512,
|
|
|
|
temporary_directories_lifetime=1
|
2022-03-22 16:39:58 +00:00
|
|
|
""".format(
|
|
|
|
table_name
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
if additional_settings:
|
|
|
|
create_table_statement += ","
|
|
|
|
create_table_statement += additional_settings
|
|
|
|
|
|
|
|
node.query(create_table_statement)
|
|
|
|
|
|
|
|
|
|
|
|
FILES_OVERHEAD = 1
|
|
|
|
FILES_OVERHEAD_PER_COLUMN = 2 # Data and mark files
|
2023-02-27 11:27:57 +00:00
|
|
|
FILES_OVERHEAD_DEFAULT_COMPRESSION_CODEC = 1
|
|
|
|
FILES_OVERHEAD_METADATA_VERSION = 1
|
|
|
|
FILES_OVERHEAD_PER_PART_WIDE = (
|
|
|
|
FILES_OVERHEAD_PER_COLUMN * 3
|
|
|
|
+ 2
|
|
|
|
+ 6
|
|
|
|
+ FILES_OVERHEAD_DEFAULT_COMPRESSION_CODEC
|
|
|
|
+ FILES_OVERHEAD_METADATA_VERSION
|
|
|
|
)
|
|
|
|
FILES_OVERHEAD_PER_PART_COMPACT = (
|
|
|
|
10 + FILES_OVERHEAD_DEFAULT_COMPRESSION_CODEC + FILES_OVERHEAD_METADATA_VERSION
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
|
|
def cluster():
|
|
|
|
try:
|
|
|
|
cluster = ClickHouseCluster(__file__)
|
2022-03-22 16:39:58 +00:00
|
|
|
cluster.add_instance(
|
|
|
|
"node", main_configs=["configs/config.d/storage_conf.xml"], with_hdfs=True
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
logging.info("Starting cluster...")
|
|
|
|
cluster.start()
|
|
|
|
logging.info("Cluster started")
|
|
|
|
|
2021-05-27 04:24:16 +00:00
|
|
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
2022-03-22 16:39:58 +00:00
|
|
|
fs.mkdirs("/clickhouse")
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
logging.info("Created HDFS directory")
|
|
|
|
|
|
|
|
yield cluster
|
|
|
|
finally:
|
|
|
|
cluster.shutdown()
|
|
|
|
|
|
|
|
|
|
|
|
def wait_for_delete_hdfs_objects(cluster, expected, num_tries=30):
|
2021-05-27 04:24:16 +00:00
|
|
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
2021-05-22 20:33:15 +00:00
|
|
|
while num_tries > 0:
|
2022-03-22 16:39:58 +00:00
|
|
|
num_hdfs_objects = len(fs.listdir("/clickhouse"))
|
2021-05-22 20:33:15 +00:00
|
|
|
if num_hdfs_objects == expected:
|
2021-07-05 03:32:56 +00:00
|
|
|
break
|
2021-05-22 20:33:15 +00:00
|
|
|
num_tries -= 1
|
|
|
|
time.sleep(1)
|
2022-03-22 16:39:58 +00:00
|
|
|
assert len(fs.listdir("/clickhouse")) == expected
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
|
|
def drop_table(cluster):
|
|
|
|
node = cluster.instances["node"]
|
|
|
|
|
2021-05-27 04:24:16 +00:00
|
|
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
|
|
|
print("Number of hdfs objects to delete:", len(hdfs_objects), sep=" ")
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
node.query("DROP TABLE IF EXISTS hdfs_test SYNC")
|
|
|
|
|
|
|
|
try:
|
|
|
|
wait_for_delete_hdfs_objects(cluster, 0)
|
|
|
|
finally:
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
2021-05-22 20:33:15 +00:00
|
|
|
if len(hdfs_objects) == 0:
|
|
|
|
return
|
2022-03-22 16:39:58 +00:00
|
|
|
print(
|
|
|
|
"Manually removing extra objects to prevent tests cascade failing: ",
|
|
|
|
hdfs_objects,
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
for path in hdfs_objects:
|
|
|
|
fs.delete(path)
|
|
|
|
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"min_rows_for_wide_part,files_per_part",
|
|
|
|
[(0, FILES_OVERHEAD_PER_PART_WIDE), (8192, FILES_OVERHEAD_PER_PART_COMPACT)],
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
def test_simple_insert_select(cluster, min_rows_for_wide_part, files_per_part):
|
2022-03-22 16:39:58 +00:00
|
|
|
create_table(
|
|
|
|
cluster,
|
|
|
|
"hdfs_test",
|
|
|
|
additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part),
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
node = cluster.instances["node"]
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
values1 = generate_values("2020-01-03", 4096)
|
2021-05-22 20:33:15 +00:00
|
|
|
node.query("INSERT INTO hdfs_test VALUES {}".format(values1))
|
2022-03-22 16:39:58 +00:00
|
|
|
assert (
|
|
|
|
node.query("SELECT * FROM hdfs_test order by dt, id FORMAT Values") == values1
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
2021-05-27 04:24:16 +00:00
|
|
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
2021-05-22 20:33:15 +00:00
|
|
|
print(hdfs_objects)
|
|
|
|
assert len(hdfs_objects) == FILES_OVERHEAD + files_per_part
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
values2 = generate_values("2020-01-04", 4096)
|
2021-05-22 20:33:15 +00:00
|
|
|
node.query("INSERT INTO hdfs_test VALUES {}".format(values2))
|
2022-03-22 16:39:58 +00:00
|
|
|
assert (
|
|
|
|
node.query("SELECT * FROM hdfs_test ORDER BY dt, id FORMAT Values")
|
|
|
|
== values1 + "," + values2
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
2021-05-22 20:33:15 +00:00
|
|
|
assert len(hdfs_objects) == FILES_OVERHEAD + files_per_part * 2
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
assert (
|
|
|
|
node.query("SELECT count(*) FROM hdfs_test where id = 1 FORMAT Values") == "(2)"
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_alter_table_columns(cluster):
|
|
|
|
create_table(cluster, "hdfs_test")
|
|
|
|
|
|
|
|
node = cluster.instances["node"]
|
2021-05-27 04:24:16 +00:00
|
|
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
node.query(
|
|
|
|
"INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-03", 4096))
|
|
|
|
)
|
|
|
|
node.query(
|
|
|
|
"INSERT INTO hdfs_test VALUES {}".format(
|
|
|
|
generate_values("2020-01-03", 4096, -1)
|
|
|
|
)
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
node.query("ALTER TABLE hdfs_test ADD COLUMN col1 UInt64 DEFAULT 1")
|
|
|
|
# To ensure parts have merged
|
|
|
|
node.query("OPTIMIZE TABLE hdfs_test")
|
|
|
|
|
|
|
|
assert node.query("SELECT sum(col1) FROM hdfs_test FORMAT Values") == "(8192)"
|
2022-03-22 16:39:58 +00:00
|
|
|
assert (
|
|
|
|
node.query("SELECT sum(col1) FROM hdfs_test WHERE id > 0 FORMAT Values")
|
|
|
|
== "(4096)"
|
|
|
|
)
|
|
|
|
wait_for_delete_hdfs_objects(
|
|
|
|
cluster,
|
|
|
|
FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN,
|
|
|
|
)
|
|
|
|
|
|
|
|
node.query(
|
|
|
|
"ALTER TABLE hdfs_test MODIFY COLUMN col1 String",
|
|
|
|
settings={"mutations_sync": 2},
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
assert node.query("SELECT distinct(col1) FROM hdfs_test FORMAT Values") == "('1')"
|
|
|
|
# and file with mutation
|
2022-03-22 16:39:58 +00:00
|
|
|
wait_for_delete_hdfs_objects(
|
|
|
|
cluster,
|
|
|
|
FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1,
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
node.query("ALTER TABLE hdfs_test DROP COLUMN col1", settings={"mutations_sync": 2})
|
|
|
|
|
|
|
|
# and 2 files with mutations
|
2022-03-22 16:39:58 +00:00
|
|
|
wait_for_delete_hdfs_objects(
|
|
|
|
cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_attach_detach_partition(cluster):
|
|
|
|
create_table(cluster, "hdfs_test")
|
|
|
|
|
|
|
|
node = cluster.instances["node"]
|
2021-05-27 04:24:16 +00:00
|
|
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
node.query(
|
|
|
|
"INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-03", 4096))
|
|
|
|
)
|
|
|
|
node.query(
|
|
|
|
"INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-04", 4096))
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)"
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
2021-05-22 20:33:15 +00:00
|
|
|
assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
|
|
|
|
|
|
|
|
node.query("ALTER TABLE hdfs_test DETACH PARTITION '2020-01-03'")
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(4096)"
|
2022-09-18 22:47:56 +00:00
|
|
|
wait_for_delete_empty_parts(node, "hdfs_test")
|
2022-12-12 11:37:52 +00:00
|
|
|
wait_for_delete_inactive_parts(node, "hdfs_test")
|
2022-12-25 08:38:02 +00:00
|
|
|
wait_for_delete_hdfs_objects(
|
2023-04-30 17:27:15 +00:00
|
|
|
cluster,
|
|
|
|
FILES_OVERHEAD
|
|
|
|
+ FILES_OVERHEAD_PER_PART_WIDE * 2
|
|
|
|
- FILES_OVERHEAD_METADATA_VERSION,
|
2022-12-25 08:38:02 +00:00
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
node.query("ALTER TABLE hdfs_test ATTACH PARTITION '2020-01-03'")
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)"
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
2023-06-29 13:26:21 +00:00
|
|
|
assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
node.query("ALTER TABLE hdfs_test DROP PARTITION '2020-01-03'")
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(4096)"
|
2022-09-18 22:47:56 +00:00
|
|
|
wait_for_delete_empty_parts(node, "hdfs_test")
|
2022-12-12 11:37:52 +00:00
|
|
|
wait_for_delete_inactive_parts(node, "hdfs_test")
|
2022-12-25 08:30:20 +00:00
|
|
|
wait_for_delete_hdfs_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
node.query("ALTER TABLE hdfs_test DETACH PARTITION '2020-01-04'")
|
2022-03-22 16:39:58 +00:00
|
|
|
node.query(
|
|
|
|
"ALTER TABLE hdfs_test DROP DETACHED PARTITION '2020-01-04'",
|
|
|
|
settings={"allow_drop_detached": 1},
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(0)"
|
2022-09-18 22:47:56 +00:00
|
|
|
wait_for_delete_empty_parts(node, "hdfs_test")
|
2022-12-12 11:37:52 +00:00
|
|
|
wait_for_delete_inactive_parts(node, "hdfs_test")
|
2022-12-25 08:30:20 +00:00
|
|
|
wait_for_delete_hdfs_objects(cluster, FILES_OVERHEAD)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_move_partition_to_another_disk(cluster):
|
|
|
|
create_table(cluster, "hdfs_test")
|
|
|
|
|
|
|
|
node = cluster.instances["node"]
|
2021-05-27 04:24:16 +00:00
|
|
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
node.query(
|
|
|
|
"INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-03", 4096))
|
|
|
|
)
|
|
|
|
node.query(
|
|
|
|
"INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-04", 4096))
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)"
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
2021-05-22 20:33:15 +00:00
|
|
|
assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
|
|
|
|
|
|
|
|
node.query("ALTER TABLE hdfs_test MOVE PARTITION '2020-01-04' TO DISK 'hdd'")
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)"
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
2021-05-22 20:33:15 +00:00
|
|
|
assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE
|
|
|
|
|
|
|
|
node.query("ALTER TABLE hdfs_test MOVE PARTITION '2020-01-04' TO DISK 'hdfs'")
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)"
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
2021-05-22 20:33:15 +00:00
|
|
|
assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
|
|
|
|
|
|
|
|
|
|
|
|
def test_table_manipulations(cluster):
|
|
|
|
create_table(cluster, "hdfs_test")
|
|
|
|
|
|
|
|
node = cluster.instances["node"]
|
2021-05-27 04:24:16 +00:00
|
|
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
node.query(
|
|
|
|
"INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-03", 4096))
|
|
|
|
)
|
|
|
|
node.query(
|
|
|
|
"INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-04", 4096))
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
node.query("RENAME TABLE hdfs_test TO hdfs_renamed")
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_renamed FORMAT Values") == "(8192)"
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
2021-05-22 20:33:15 +00:00
|
|
|
assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
|
|
|
|
|
|
|
|
node.query("RENAME TABLE hdfs_renamed TO hdfs_test")
|
|
|
|
assert node.query("CHECK TABLE hdfs_test FORMAT Values") == "(1)"
|
|
|
|
|
|
|
|
node.query("DETACH TABLE hdfs_test")
|
|
|
|
node.query("ATTACH TABLE hdfs_test")
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)"
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
2021-05-22 20:33:15 +00:00
|
|
|
assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
|
|
|
|
|
|
|
|
node.query("TRUNCATE TABLE hdfs_test")
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(0)"
|
2022-09-18 22:47:56 +00:00
|
|
|
wait_for_delete_empty_parts(node, "hdfs_test")
|
2022-12-12 11:37:52 +00:00
|
|
|
wait_for_delete_inactive_parts(node, "hdfs_test")
|
2022-12-25 08:30:20 +00:00
|
|
|
wait_for_delete_hdfs_objects(cluster, FILES_OVERHEAD)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_move_replace_partition_to_another_table(cluster):
|
|
|
|
create_table(cluster, "hdfs_test")
|
|
|
|
|
|
|
|
node = cluster.instances["node"]
|
2021-05-27 04:24:16 +00:00
|
|
|
fs = HdfsClient(hosts=cluster.hdfs_ip)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
node.query(
|
|
|
|
"INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-03", 4096))
|
|
|
|
)
|
|
|
|
node.query(
|
|
|
|
"INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-04", 4096))
|
|
|
|
)
|
|
|
|
node.query(
|
|
|
|
"INSERT INTO hdfs_test VALUES {}".format(
|
|
|
|
generate_values("2020-01-05", 4096, -1)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
node.query(
|
|
|
|
"INSERT INTO hdfs_test VALUES {}".format(
|
|
|
|
generate_values("2020-01-06", 4096, -1)
|
|
|
|
)
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
assert node.query("SELECT sum(id) FROM hdfs_test FORMAT Values") == "(0)"
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(16384)"
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
2021-05-22 20:33:15 +00:00
|
|
|
assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
|
|
|
|
|
|
|
|
create_table(cluster, "hdfs_clone")
|
|
|
|
|
|
|
|
node.query("ALTER TABLE hdfs_test MOVE PARTITION '2020-01-03' TO TABLE hdfs_clone")
|
|
|
|
node.query("ALTER TABLE hdfs_test MOVE PARTITION '2020-01-05' TO TABLE hdfs_clone")
|
|
|
|
assert node.query("SELECT sum(id) FROM hdfs_test FORMAT Values") == "(0)"
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)"
|
|
|
|
assert node.query("SELECT sum(id) FROM hdfs_clone FORMAT Values") == "(0)"
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_clone FORMAT Values") == "(8192)"
|
|
|
|
|
|
|
|
# Number of objects in HDFS should be unchanged.
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
2023-04-28 12:03:49 +00:00
|
|
|
for obj in hdfs_objects:
|
|
|
|
print("Object in HDFS after move", obj)
|
2023-05-01 12:01:42 +00:00
|
|
|
wait_for_delete_hdfs_objects(
|
2023-05-01 12:02:07 +00:00
|
|
|
cluster,
|
|
|
|
FILES_OVERHEAD * 2
|
2023-04-28 12:03:49 +00:00
|
|
|
+ FILES_OVERHEAD_PER_PART_WIDE * 4
|
2023-05-01 12:02:07 +00:00
|
|
|
- FILES_OVERHEAD_METADATA_VERSION * 2,
|
2023-04-28 12:03:49 +00:00
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
# Add new partitions to source table, but with different values and replace them from copied table.
|
2022-03-22 16:39:58 +00:00
|
|
|
node.query(
|
|
|
|
"INSERT INTO hdfs_test VALUES {}".format(
|
|
|
|
generate_values("2020-01-03", 4096, -1)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
node.query(
|
|
|
|
"INSERT INTO hdfs_test VALUES {}".format(generate_values("2020-01-05", 4096))
|
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
assert node.query("SELECT sum(id) FROM hdfs_test FORMAT Values") == "(0)"
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(16384)"
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
2023-04-28 12:03:49 +00:00
|
|
|
for obj in hdfs_objects:
|
|
|
|
print("Object in HDFS after insert", obj)
|
|
|
|
|
2023-05-01 12:01:42 +00:00
|
|
|
wait_for_delete_hdfs_objects(
|
2023-05-01 12:02:07 +00:00
|
|
|
cluster,
|
|
|
|
FILES_OVERHEAD * 2
|
2023-04-28 12:03:49 +00:00
|
|
|
+ FILES_OVERHEAD_PER_PART_WIDE * 6
|
2023-05-01 12:02:07 +00:00
|
|
|
- FILES_OVERHEAD_METADATA_VERSION * 2,
|
2023-04-28 12:03:49 +00:00
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
|
|
|
node.query("ALTER TABLE hdfs_test REPLACE PARTITION '2020-01-03' FROM hdfs_clone")
|
|
|
|
node.query("ALTER TABLE hdfs_test REPLACE PARTITION '2020-01-05' FROM hdfs_clone")
|
|
|
|
assert node.query("SELECT sum(id) FROM hdfs_test FORMAT Values") == "(0)"
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(16384)"
|
|
|
|
assert node.query("SELECT sum(id) FROM hdfs_clone FORMAT Values") == "(0)"
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_clone FORMAT Values") == "(8192)"
|
|
|
|
|
|
|
|
# Wait for outdated partitions deletion.
|
2022-03-22 16:39:58 +00:00
|
|
|
wait_for_delete_hdfs_objects(
|
2023-04-30 17:27:15 +00:00
|
|
|
cluster,
|
|
|
|
FILES_OVERHEAD * 2
|
|
|
|
+ FILES_OVERHEAD_PER_PART_WIDE * 4
|
|
|
|
- FILES_OVERHEAD_METADATA_VERSION * 2,
|
2022-03-22 16:39:58 +00:00
|
|
|
)
|
2021-05-22 20:33:15 +00:00
|
|
|
|
2023-05-03 18:06:46 +00:00
|
|
|
node.query("DROP TABLE hdfs_clone SYNC")
|
2021-05-22 20:33:15 +00:00
|
|
|
assert node.query("SELECT sum(id) FROM hdfs_test FORMAT Values") == "(0)"
|
|
|
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(16384)"
|
|
|
|
|
|
|
|
# Data should remain in hdfs
|
2022-03-22 16:39:58 +00:00
|
|
|
hdfs_objects = fs.listdir("/clickhouse")
|
2023-04-28 12:03:49 +00:00
|
|
|
|
|
|
|
for obj in hdfs_objects:
|
|
|
|
print("Object in HDFS after drop", obj)
|
|
|
|
|
2023-05-01 12:01:42 +00:00
|
|
|
wait_for_delete_hdfs_objects(
|
2023-05-01 12:02:07 +00:00
|
|
|
cluster,
|
|
|
|
FILES_OVERHEAD
|
2023-04-28 12:03:49 +00:00
|
|
|
+ FILES_OVERHEAD_PER_PART_WIDE * 4
|
2023-05-01 12:02:07 +00:00
|
|
|
- FILES_OVERHEAD_METADATA_VERSION * 2,
|
2023-04-28 12:03:49 +00:00
|
|
|
)
|