mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-18 04:12:19 +00:00
Try to make test_dir.tar smaller
This commit is contained in:
parent
4cfcc18a7f
commit
ab8db8a239
@ -69,3 +69,6 @@ def test_limited_fetches(started_cluster):
|
||||
|
||||
assert max([len(parts) for parts in fetches_result]) == 3, "Strange, but we don't utilize max concurrent threads for fetches"
|
||||
assert(max(background_fetches_metric)) == 3, "Just checking metric consistent with table"
|
||||
|
||||
node1.query("DROP TABLE IF EXISTS t SYNC")
|
||||
node2.query("DROP TABLE IF EXISTS t SYNC")
|
@ -287,6 +287,8 @@ def test_query_parser(start_cluster):
|
||||
"ALTER TABLE table_with_normal_policy MODIFY SETTING storage_policy='moving_jbod_with_external'")
|
||||
finally:
|
||||
node1.query("DROP TABLE IF EXISTS table_with_normal_policy SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS table_with_absent_policy SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name,engine", [
|
||||
@ -328,6 +330,7 @@ def test_alter_policy(start_cluster, name, engine):
|
||||
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
def get_random_string(length):
|
||||
@ -397,6 +400,7 @@ def test_round_robin(start_cluster, name, engine):
|
||||
assert used_disks[2] == used_disks[0]
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name,engine", [
|
||||
@ -424,6 +428,7 @@ def test_max_data_part_size(start_cluster, name, engine):
|
||||
assert used_disks[0] == 'external'
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name,engine", [
|
||||
@ -477,6 +482,8 @@ def test_jbod_overflow(start_cluster, name, engine):
|
||||
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name,engine", [
|
||||
@ -527,6 +534,7 @@ def test_background_move(start_cluster, name, engine):
|
||||
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name,engine", [
|
||||
@ -614,6 +622,7 @@ def test_start_stop_moves(start_cluster, name, engine):
|
||||
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
def get_path_for_part_from_part_log(node, table, part_name):
|
||||
@ -702,7 +711,7 @@ def test_alter_move(start_cluster, name, engine):
|
||||
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
@pytest.mark.parametrize("volume_or_disk", [
|
||||
"DISK",
|
||||
@ -751,6 +760,7 @@ def test_alter_move_half_of_partition(start_cluster, volume_or_disk):
|
||||
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("volume_or_disk", [
|
||||
@ -795,6 +805,7 @@ def test_alter_double_move_partition(start_cluster, volume_or_disk):
|
||||
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
def produce_alter_move(node, name):
|
||||
@ -879,6 +890,7 @@ def test_concurrent_alter_move(start_cluster, name, engine):
|
||||
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "500\n"
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name,engine", [
|
||||
@ -932,6 +944,7 @@ def test_concurrent_alter_move_and_drop(start_cluster, name, engine):
|
||||
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name,engine", [
|
||||
@ -963,6 +976,7 @@ def test_detach_attach(start_cluster, name, engine):
|
||||
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name,engine", [
|
||||
@ -1009,6 +1023,7 @@ def test_mutate_to_another_disk(start_cluster, name, engine):
|
||||
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name,engine", [
|
||||
@ -1067,6 +1082,7 @@ def test_concurrent_alter_modify(start_cluster, name, engine):
|
||||
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
def test_simple_replication_and_moves(start_cluster):
|
||||
@ -1134,6 +1150,7 @@ def test_simple_replication_and_moves(start_cluster):
|
||||
finally:
|
||||
for node in [node1, node2]:
|
||||
node.query("DROP TABLE IF EXISTS replicated_table_for_moves SYNC")
|
||||
node.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
def test_download_appropriate_disk(start_cluster):
|
||||
@ -1168,6 +1185,7 @@ def test_download_appropriate_disk(start_cluster):
|
||||
finally:
|
||||
for node in [node1, node2]:
|
||||
node.query("DROP TABLE IF EXISTS replicated_table_for_download SYNC")
|
||||
node.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
def test_rename(start_cluster):
|
||||
@ -1207,6 +1225,7 @@ def test_rename(start_cluster):
|
||||
node1.query("DROP TABLE IF EXISTS default.renaming_table SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS default.renaming_table1 SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS test.renaming_table2 SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
def test_freeze(start_cluster):
|
||||
@ -1242,6 +1261,7 @@ def test_freeze(start_cluster):
|
||||
finally:
|
||||
node1.query("DROP TABLE IF EXISTS default.freezing_table SYNC")
|
||||
node1.exec_in_container(["rm", "-rf", "/jbod1/shadow", "/external/shadow"])
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
def test_kill_while_insert(start_cluster):
|
||||
@ -1285,6 +1305,7 @@ def test_kill_while_insert(start_cluster):
|
||||
finally:
|
||||
try:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
except:
|
||||
"""ClickHouse may be inactive at this moment and we don't want to mask a meaningful exception."""
|
||||
|
||||
@ -1346,6 +1367,7 @@ def test_move_while_merge(start_cluster):
|
||||
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
def test_move_across_policies_does_not_work(start_cluster):
|
||||
@ -1388,6 +1410,7 @@ def test_move_across_policies_does_not_work(start_cluster):
|
||||
finally:
|
||||
node1.query(f"DROP TABLE IF EXISTS {name} SYNC")
|
||||
node1.query(f"DROP TABLE IF EXISTS {name}2 SYNC")
|
||||
node1.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
def _insert_merge_execute(node, name, policy, parts, cmds, parts_before_cmds, parts_after_cmds):
|
||||
@ -1487,6 +1510,7 @@ def test_no_merges_in_configuration_allow_from_query_without_reload(start_cluste
|
||||
|
||||
finally:
|
||||
node1.query("SYSTEM STOP MERGES ON VOLUME {}.external".format(policy))
|
||||
node.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
def test_no_merges_in_configuration_allow_from_query_with_reload(start_cluster):
|
||||
@ -1506,6 +1530,7 @@ def test_no_merges_in_configuration_allow_from_query_with_reload(start_cluster):
|
||||
|
||||
finally:
|
||||
node1.query("SYSTEM STOP MERGES ON VOLUME {}.external".format(policy))
|
||||
node.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
def test_yes_merges_in_configuration_disallow_from_query_without_reload(start_cluster):
|
||||
@ -1525,6 +1550,7 @@ def test_yes_merges_in_configuration_disallow_from_query_without_reload(start_cl
|
||||
|
||||
finally:
|
||||
node1.query("SYSTEM START MERGES ON VOLUME {}.external".format(policy))
|
||||
node.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
||||
|
||||
def test_yes_merges_in_configuration_disallow_from_query_with_reload(start_cluster):
|
||||
@ -1545,3 +1571,4 @@ def test_yes_merges_in_configuration_disallow_from_query_with_reload(start_clust
|
||||
|
||||
finally:
|
||||
node1.query("SYSTEM START MERGES ON VOLUME {}.external".format(policy))
|
||||
node.query("DROP TABLE IF EXISTS system.part_log SYNC")
|
||||
|
@ -91,3 +91,6 @@ def test_system_replicated_fetches(started_cluster):
|
||||
for elem in fetches_result:
|
||||
assert elem['elapsed'] >= prev_elapsed, "Elapsed time decreasing prev {}, next {}? It's a bug".format(prev_elapsed, elem['elapsed'])
|
||||
prev_elapsed = elem['elapsed']
|
||||
|
||||
node1.query("DROP TABLE IF EXISTS t SYNC")
|
||||
node2.query("DROP TABLE IF EXISTS t SYNC")
|
||||
|
Loading…
Reference in New Issue
Block a user