From cb673630263adde5d9b4aa7eee9be3d6b7e95a2d Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Fri, 11 Jun 2021 11:06:37 +0300 Subject: [PATCH 1/2] fix test_partition --- tests/integration/test_partition/test.py | 127 +++++++++++------------ 1 file changed, 62 insertions(+), 65 deletions(-) diff --git a/tests/integration/test_partition/test.py b/tests/integration/test_partition/test.py index b0a414d737a..baac5367c00 100644 --- a/tests/integration/test_partition/test.py +++ b/tests/integration/test_partition/test.py @@ -1,6 +1,6 @@ import pytest - -from helpers.cluster import ClickHouseCluster, subprocess_check_call +import logging +from helpers.cluster import ClickHouseCluster from helpers.test_tools import TSV cluster = ClickHouseCluster(__file__) @@ -23,107 +23,104 @@ def started_cluster(): @pytest.fixture def partition_table_simple(started_cluster): - q("DROP TABLE IF EXISTS test.partition") - q("CREATE TABLE test.partition (date MATERIALIZED toDate(0), x UInt64, sample_key MATERIALIZED intHash64(x)) " + q("DROP TABLE IF EXISTS test.partition_simple") + q("CREATE TABLE test.partition_simple (date MATERIALIZED toDate(0), x UInt64, sample_key MATERIALIZED intHash64(x)) " "ENGINE=MergeTree PARTITION BY date SAMPLE BY sample_key ORDER BY (date,x,sample_key) " "SETTINGS index_granularity=8192, index_granularity_bytes=0") - q("INSERT INTO test.partition ( x ) VALUES ( now() )") - q("INSERT INTO test.partition ( x ) VALUES ( now()+1 )") + q("INSERT INTO test.partition_simple ( x ) VALUES ( now() )") + q("INSERT INTO test.partition_simple ( x ) VALUES ( now()+1 )") yield - q('DROP TABLE test.partition') + q('DROP TABLE test.partition_simple') def test_partition_simple(partition_table_simple): - q("ALTER TABLE test.partition DETACH PARTITION 197001") - q("ALTER TABLE test.partition ATTACH PARTITION 197001") - q("OPTIMIZE TABLE test.partition") + q("ALTER TABLE test.partition_simple DETACH PARTITION 197001") + q("ALTER TABLE test.partition_simple ATTACH PARTITION 197001") + q("OPTIMIZE TABLE test.partition_simple") def partition_complex_assert_columns_txt(): - path_to_parts = path_to_data + 'data/test/partition/' - parts = TSV(q("SELECT name FROM system.parts WHERE database='test' AND table='partition'")) + path_to_parts = path_to_data + 'data/test/partition_complex/' + parts = TSV(q("SELECT name FROM system.parts WHERE database='test' AND table='partition_complex'")) + assert len(parts) > 0 for part_name in parts.lines: path_to_columns = path_to_parts + part_name + '/columns.txt' # 2 header lines + 3 columns - assert subprocess_check_call('cat {} | wc -l'.format(path_to_columns)) == '5\n' + assert instance.exec_in_container(['wc', '-l', path_to_columns]).split()[0] == '5' def partition_complex_assert_checksums(): - # Do `cd` for consistent output for reference # Do not check increment.txt - it can be changed by other tests with FREEZE - cmd = 'cd ' + path_to_data + " && find shadow -type f -exec md5sum {} \\;" \ - " | grep partition" \ - " | sed 's!shadow/[0-9]*/data/[a-z0-9_-]*/!shadow/1/data/test/!g'" \ - " | sort" \ - " | uniq" + cmd = ["bash", "-c", f"cd {path_to_data} && find shadow -type f -exec" + " md5sum {} \\; | grep partition_complex" \ + " | sed 's shadow/[0-9]*/data/[a-z0-9_-]*/ shadow/1/data/test/ g' | sort | uniq"] - checksums = "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition/19700102_2_2_0/k.bin\n" \ - "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition/19700201_1_1_0/v1.bin\n" \ - "13cae8e658e0ca4f75c56b1fc424e150\tshadow/1/data/test/partition/19700102_2_2_0/minmax_p.idx\n" \ - "25daad3d9e60b45043a70c4ab7d3b1c6\tshadow/1/data/test/partition/19700102_2_2_0/partition.dat\n" \ - "3726312af62aec86b64a7708d5751787\tshadow/1/data/test/partition/19700201_1_1_0/partition.dat\n" \ - "37855b06a39b79a67ea4e86e4a3299aa\tshadow/1/data/test/partition/19700102_2_2_0/checksums.txt\n" \ - "38e62ff37e1e5064e9a3f605dfe09d13\tshadow/1/data/test/partition/19700102_2_2_0/v1.bin\n" \ - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700102_2_2_0/k.mrk\n" \ - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700102_2_2_0/p.mrk\n" \ - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700102_2_2_0/v1.mrk\n" \ - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700201_1_1_0/k.mrk\n" \ - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700201_1_1_0/p.mrk\n" \ - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition/19700201_1_1_0/v1.mrk\n" \ - "55a54008ad1ba589aa210d2629c1df41\tshadow/1/data/test/partition/19700201_1_1_0/primary.idx\n" \ - "5f087cb3e7071bf9407e095821e2af8f\tshadow/1/data/test/partition/19700201_1_1_0/checksums.txt\n" \ - "77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition/19700102_2_2_0/columns.txt\n" \ - "77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition/19700201_1_1_0/columns.txt\n" \ - "88cdc31ded355e7572d68d8cde525d3a\tshadow/1/data/test/partition/19700201_1_1_0/p.bin\n" \ - "9e688c58a5487b8eaf69c9e1005ad0bf\tshadow/1/data/test/partition/19700102_2_2_0/primary.idx\n" \ - "c0904274faa8f3f06f35666cc9c5bd2f\tshadow/1/data/test/partition/19700102_2_2_0/default_compression_codec.txt\n" \ - "c0904274faa8f3f06f35666cc9c5bd2f\tshadow/1/data/test/partition/19700201_1_1_0/default_compression_codec.txt\n" \ - "c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition/19700102_2_2_0/count.txt\n" \ - "c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition/19700201_1_1_0/count.txt\n" \ - "cfcb770c3ecd0990dcceb1bde129e6c6\tshadow/1/data/test/partition/19700102_2_2_0/p.bin\n" \ - "e2af3bef1fd129aea73a890ede1e7a30\tshadow/1/data/test/partition/19700201_1_1_0/k.bin\n" \ - "f2312862cc01adf34a93151377be2ddf\tshadow/1/data/test/partition/19700201_1_1_0/minmax_p.idx\n" + checksums = "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition_complex/19700102_2_2_0/k.bin\n" \ + "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition_complex/19700201_1_1_0/v1.bin\n" \ + "13cae8e658e0ca4f75c56b1fc424e150\tshadow/1/data/test/partition_complex/19700102_2_2_0/minmax_p.idx\n" \ + "25daad3d9e60b45043a70c4ab7d3b1c6\tshadow/1/data/test/partition_complex/19700102_2_2_0/partition.dat\n" \ + "3726312af62aec86b64a7708d5751787\tshadow/1/data/test/partition_complex/19700201_1_1_0/partition.dat\n" \ + "37855b06a39b79a67ea4e86e4a3299aa\tshadow/1/data/test/partition_complex/19700102_2_2_0/checksums.txt\n" \ + "38e62ff37e1e5064e9a3f605dfe09d13\tshadow/1/data/test/partition_complex/19700102_2_2_0/v1.bin\n" \ + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/k.mrk\n" \ + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/p.mrk\n" \ + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/v1.mrk\n" \ + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/k.mrk\n" \ + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/p.mrk\n" \ + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/v1.mrk\n" \ + "55a54008ad1ba589aa210d2629c1df41\tshadow/1/data/test/partition_complex/19700201_1_1_0/primary.idx\n" \ + "5f087cb3e7071bf9407e095821e2af8f\tshadow/1/data/test/partition_complex/19700201_1_1_0/checksums.txt\n" \ + "77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition_complex/19700102_2_2_0/columns.txt\n" \ + "77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition_complex/19700201_1_1_0/columns.txt\n" \ + "88cdc31ded355e7572d68d8cde525d3a\tshadow/1/data/test/partition_complex/19700201_1_1_0/p.bin\n" \ + "9e688c58a5487b8eaf69c9e1005ad0bf\tshadow/1/data/test/partition_complex/19700102_2_2_0/primary.idx\n" \ + "c0904274faa8f3f06f35666cc9c5bd2f\tshadow/1/data/test/partition_complex/19700102_2_2_0/default_compression_codec.txt\n" \ + "c0904274faa8f3f06f35666cc9c5bd2f\tshadow/1/data/test/partition_complex/19700201_1_1_0/default_compression_codec.txt\n" \ + "c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition_complex/19700102_2_2_0/count.txt\n" \ + "c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition_complex/19700201_1_1_0/count.txt\n" \ + "cfcb770c3ecd0990dcceb1bde129e6c6\tshadow/1/data/test/partition_complex/19700102_2_2_0/p.bin\n" \ + "e2af3bef1fd129aea73a890ede1e7a30\tshadow/1/data/test/partition_complex/19700201_1_1_0/k.bin\n" \ + "f2312862cc01adf34a93151377be2ddf\tshadow/1/data/test/partition_complex/19700201_1_1_0/minmax_p.idx\n" - assert TSV(subprocess_check_call(cmd).replace(' ', '\t')) == TSV(checksums) + assert TSV(instance.exec_in_container(cmd).replace(' ', '\t')) == TSV(checksums) @pytest.fixture def partition_table_complex(started_cluster): - q("DROP TABLE IF EXISTS test.partition") - q("CREATE TABLE test.partition (p Date, k Int8, v1 Int8 MATERIALIZED k + 1) " + q("DROP TABLE IF EXISTS test.partition_complex") + q("CREATE TABLE test.partition_complex (p Date, k Int8, v1 Int8 MATERIALIZED k + 1) " "ENGINE = MergeTree PARTITION BY p ORDER BY k SETTINGS index_granularity=1, index_granularity_bytes=0") - q("INSERT INTO test.partition (p, k) VALUES(toDate(31), 1)") - q("INSERT INTO test.partition (p, k) VALUES(toDate(1), 2)") + q("INSERT INTO test.partition_complex (p, k) VALUES(toDate(31), 1)") + q("INSERT INTO test.partition_complex (p, k) VALUES(toDate(1), 2)") yield - q("DROP TABLE test.partition") + q("DROP TABLE test.partition_complex") def test_partition_complex(partition_table_complex): partition_complex_assert_columns_txt() - q("ALTER TABLE test.partition FREEZE") + q("ALTER TABLE test.partition_complex FREEZE") partition_complex_assert_checksums() - q("ALTER TABLE test.partition DETACH PARTITION 197001") - q("ALTER TABLE test.partition ATTACH PARTITION 197001") + q("ALTER TABLE test.partition_complex DETACH PARTITION 197001") + q("ALTER TABLE test.partition_complex ATTACH PARTITION 197001") partition_complex_assert_columns_txt() - q("ALTER TABLE test.partition MODIFY COLUMN v1 Int8") + q("ALTER TABLE test.partition_complex MODIFY COLUMN v1 Int8") # Check the backup hasn't changed partition_complex_assert_checksums() - q("OPTIMIZE TABLE test.partition") + q("OPTIMIZE TABLE test.partition_complex") expected = TSV('31\t1\t2\n' '1\t2\t3') - res = q("SELECT toUInt16(p), k, v1 FROM test.partition ORDER BY k") + res = q("SELECT toUInt16(p), k, v1 FROM test.partition_complex ORDER BY k") assert (TSV(res) == expected) @@ -166,9 +163,9 @@ def test_attach_check_all_parts(attach_check_all_parts_table): q("ALTER TABLE test.attach_partition DETACH PARTITION 0") path_to_detached = path_to_data + 'data/test/attach_partition/detached/' - subprocess_check_call('mkdir {}'.format(path_to_detached + '0_5_5_0')) - subprocess_check_call('cp -pr {} {}'.format(path_to_detached + '0_1_1_0', path_to_detached + 'attaching_0_6_6_0')) - subprocess_check_call('cp -pr {} {}'.format(path_to_detached + '0_3_3_0', path_to_detached + 'deleting_0_7_7_0')) + instance.exec_in_container(['mkdir', '{}'.format(path_to_detached + '0_5_5_0')]) + instance.exec_in_container(['cp', '-pr', path_to_detached + '0_1_1_0', path_to_detached + 'attaching_0_6_6_0']) + instance.exec_in_container(['cp', '-pr', path_to_detached + '0_3_3_0', path_to_detached + 'deleting_0_7_7_0']) error = instance.client.query_and_get_error("ALTER TABLE test.attach_partition ATTACH PARTITION 0") assert 0 <= error.find('No columns in part 0_5_5_0') or 0 <= error.find('No columns.txt in part 0_5_5_0') @@ -179,7 +176,7 @@ def test_attach_check_all_parts(attach_check_all_parts_table): "WHERE table='attach_partition' AND database='test' ORDER BY name") assert TSV(detached) == TSV('0_1_1_0\n0_3_3_0\n0_5_5_0\nattaching_0_6_6_0\ndeleting_0_7_7_0') - subprocess_check_call('rm -r {}'.format(path_to_detached + '0_5_5_0')) + instance.exec_in_container(['rm', '-r', path_to_detached + '0_5_5_0']) q("ALTER TABLE test.attach_partition ATTACH PARTITION 0") parts = q("SElECT name FROM system.parts WHERE table='attach_partition' AND database='test' ORDER BY name") @@ -212,10 +209,10 @@ def test_drop_detached_parts(drop_detached_parts_table): q("ALTER TABLE test.drop_detached DETACH PARTITION 1") path_to_detached = path_to_data + 'data/test/drop_detached/detached/' - subprocess_check_call('mkdir {}'.format(path_to_detached + 'attaching_0_6_6_0')) - subprocess_check_call('mkdir {}'.format(path_to_detached + 'deleting_0_7_7_0')) - subprocess_check_call('mkdir {}'.format(path_to_detached + 'any_other_name')) - subprocess_check_call('mkdir {}'.format(path_to_detached + 'prefix_1_2_2_0_0')) + instance.exec_in_container(['mkdir', '{}'.format(path_to_detached + 'attaching_0_6_6_0')]) + instance.exec_in_container(['mkdir', '{}'.format(path_to_detached + 'deleting_0_7_7_0')]) + instance.exec_in_container(['mkdir', '{}'.format(path_to_detached + 'any_other_name')]) + instance.exec_in_container(['mkdir', '{}'.format(path_to_detached + 'prefix_1_2_2_0_0')]) error = instance.client.query_and_get_error("ALTER TABLE test.drop_detached DROP DETACHED PART '../1_2_2_0'", settings=s) From 7be062d1b4fa472436bb8bf53ae29c58fc57fc98 Mon Sep 17 00:00:00 2001 From: Yatsishin Ilya <2159081+qoega@users.noreply.github.com> Date: Fri, 11 Jun 2021 13:50:44 +0300 Subject: [PATCH 2/2] typo --- tests/integration/test_redirect_url_storage/test.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/integration/test_redirect_url_storage/test.py b/tests/integration/test_redirect_url_storage/test.py index 8c555ef5aaf..c99e5182c91 100644 --- a/tests/integration/test_redirect_url_storage/test.py +++ b/tests/integration/test_redirect_url_storage/test.py @@ -16,7 +16,7 @@ def started_cluster(): def test_url_without_redirect(started_cluster): - hdfs_api = start_cluster.hdfs_api + hdfs_api = started_cluster.hdfs_api hdfs_api.write_data("/simple_storage", "1\tMark\t72.53\n") assert hdfs_api.read_data("/simple_storage") == "1\tMark\t72.53\n" @@ -28,7 +28,7 @@ def test_url_without_redirect(started_cluster): def test_url_with_globs(started_cluster): - hdfs_api = start_cluster.hdfs_api + hdfs_api = started_cluster.hdfs_api hdfs_api.write_data("/simple_storage_1_1", "1\n") hdfs_api.write_data("/simple_storage_1_2", "2\n") @@ -43,7 +43,7 @@ def test_url_with_globs(started_cluster): def test_url_with_globs_and_failover(started_cluster): - hdfs_api = start_cluster.hdfs_api + hdfs_api = started_cluster.hdfs_api hdfs_api.write_data("/simple_storage_1_1", "1\n") hdfs_api.write_data("/simple_storage_1_2", "2\n") @@ -58,7 +58,7 @@ def test_url_with_globs_and_failover(started_cluster): def test_url_with_redirect_not_allowed(started_cluster): - hdfs_api = start_cluster.hdfs_api + hdfs_api = started_cluster.hdfs_api hdfs_api.write_data("/simple_storage", "1\tMark\t72.53\n") assert hdfs_api.read_data("/simple_storage") == "1\tMark\t72.53\n" @@ -71,7 +71,7 @@ def test_url_with_redirect_not_allowed(started_cluster): def test_url_with_redirect_allowed(started_cluster): - hdfs_api = start_cluster.hdfs_api + hdfs_api = started_cluster.hdfs_api hdfs_api.write_data("/simple_storage", "1\tMark\t72.53\n") assert hdfs_api.read_data("/simple_storage") == "1\tMark\t72.53\n"