Remove a test for in-memory parts

This commit is contained in:
Alexey Milovidov 2023-05-05 23:29:48 +02:00
parent 09f171822e
commit 290bf0ae58

View File

@ -489,187 +489,6 @@ def test_polymorphic_parts_non_adaptive(start_cluster):
)
def test_in_memory(start_cluster):
node9.query("SYSTEM STOP MERGES")
node10.query("SYSTEM STOP MERGES")
for size in [200, 200, 300, 600]:
insert_random_data("in_memory_table", node9, size)
node10.query("SYSTEM SYNC REPLICA in_memory_table", timeout=20)
assert node9.query("SELECT count() FROM in_memory_table") == "1300\n"
assert node10.query("SELECT count() FROM in_memory_table") == "1300\n"
expected = "Compact\t1\nInMemory\t2\nWide\t1\n"
assert TSV(
node9.query(
"SELECT part_type, count() FROM system.parts "
"WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type"
)
) == TSV(expected)
assert TSV(
node10.query(
"SELECT part_type, count() FROM system.parts "
"WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type"
)
) == TSV(expected)
node9.query("SYSTEM START MERGES")
node10.query("SYSTEM START MERGES")
assert_eq_with_retry(
node9,
"OPTIMIZE TABLE in_memory_table FINAL SETTINGS optimize_throw_if_noop = 1",
"",
)
node10.query("SYSTEM SYNC REPLICA in_memory_table", timeout=20)
assert node9.query("SELECT count() FROM in_memory_table") == "1300\n"
assert node10.query("SELECT count() FROM in_memory_table") == "1300\n"
assert TSV(
node9.query(
"SELECT part_type, count() FROM system.parts "
"WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type"
)
) == TSV("Wide\t1\n")
assert TSV(
node10.query(
"SELECT part_type, count() FROM system.parts "
"WHERE table = 'in_memory_table' AND active GROUP BY part_type ORDER BY part_type"
)
) == TSV("Wide\t1\n")
def test_in_memory_wal_rotate(start_cluster):
# Write every part to single wal
node11.query(
"ALTER TABLE restore_table MODIFY SETTING write_ahead_log_max_bytes = 10"
)
for i in range(5):
insert_random_data("restore_table", node11, 50)
for i in range(5):
# Check file exists
node11.exec_in_container(
[
"bash",
"-c",
"test -f /var/lib/clickhouse/data/default/restore_table/wal_{0}_{0}.bin".format(
i
),
]
)
for node in [node11, node12]:
node.query(
"ALTER TABLE restore_table MODIFY SETTING number_of_free_entries_in_pool_to_lower_max_size_of_merge = 0"
)
node.query(
"ALTER TABLE restore_table MODIFY SETTING max_bytes_to_merge_at_max_space_in_pool = 10000000"
)
assert_eq_with_retry(
node11,
"OPTIMIZE TABLE restore_table FINAL SETTINGS optimize_throw_if_noop = 1",
"",
)
# Restart to be sure, that clearing stale logs task was ran
node11.restart_clickhouse(kill=True)
for i in range(5):
# check file doesn't exist
node11.exec_in_container(
[
"bash",
"-c",
"test ! -e /var/lib/clickhouse/data/default/restore_table/wal_{0}_{0}.bin".format(
i
),
]
)
# New wal file was created and ready to write part to it
# Check file exists
node11.exec_in_container(
["bash", "-c", "test -f /var/lib/clickhouse/data/default/restore_table/wal.bin"]
)
# Chech file empty
node11.exec_in_container(
[
"bash",
"-c",
"test ! -s /var/lib/clickhouse/data/default/restore_table/wal.bin",
]
)
def test_in_memory_deduplication(start_cluster):
for i in range(3):
# table can be in readonly node
exec_query_with_retry(
node9,
"INSERT INTO deduplication_table (date, id, s) VALUES (toDate('2020-03-03'), 1, 'foo')",
)
exec_query_with_retry(
node10,
"INSERT INTO deduplication_table (date, id, s) VALUES (toDate('2020-03-03'), 1, 'foo')",
)
node9.query("SYSTEM SYNC REPLICA deduplication_table", timeout=20)
node10.query("SYSTEM SYNC REPLICA deduplication_table", timeout=20)
assert (
node9.query("SELECT date, id, s FROM deduplication_table")
== "2020-03-03\t1\tfoo\n"
)
assert (
node10.query("SELECT date, id, s FROM deduplication_table")
== "2020-03-03\t1\tfoo\n"
)
# Checks that restoring from WAL works after table schema changed
def test_in_memory_alters(start_cluster):
def check_parts_type(parts_num):
assert (
node9.query(
"SELECT part_type, count() FROM system.parts WHERE table = 'alters_table' \
AND active GROUP BY part_type"
)
== "InMemory\t{}\n".format(parts_num)
)
node9.query(
"INSERT INTO alters_table (date, id, s) VALUES (toDate('2020-10-10'), 1, 'ab'), (toDate('2020-10-10'), 2, 'cd')"
)
node9.query("ALTER TABLE alters_table ADD COLUMN col1 UInt32")
node9.restart_clickhouse(kill=True)
expected = "1\tab\t0\n2\tcd\t0\n"
assert node9.query("SELECT id, s, col1 FROM alters_table ORDER BY id") == expected
check_parts_type(1)
node9.query(
"INSERT INTO alters_table (date, id, col1) VALUES (toDate('2020-10-10'), 3, 100)"
)
node9.query("ALTER TABLE alters_table MODIFY COLUMN col1 String")
node9.query("ALTER TABLE alters_table DROP COLUMN s")
node9.restart_clickhouse(kill=True)
check_parts_type(2)
with pytest.raises(Exception):
node9.query("SELECT id, s, col1 FROM alters_table")
# Values of col1 was not materialized as integers, so they have
# default string values after alter
expected = "1\t_foo\n2\t_foo\n3\t100_foo\n"
assert (
node9.query("SELECT id, col1 || '_foo' FROM alters_table ORDER BY id")
== expected
)
def test_polymorphic_parts_index(start_cluster):
node1.query(
"CREATE DATABASE test_index ENGINE=Ordinary",