mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-09-20 00:30:49 +00:00
fix flaky tests
This commit is contained in:
parent
27cdbb54d7
commit
c6c0a44b93
@ -1272,7 +1272,7 @@ def test_filtering_by_file_or_path(cluster):
|
||||
node.query("SYSTEM FLUSH LOGS")
|
||||
|
||||
result = node.query(
|
||||
f"SELECT ProfileEvents['EngineFileLikeReadFiles'] FROM system.query_log WHERE query ilike '%select%azure%test_filter%' AND type='QueryFinish'"
|
||||
f"SELECT ProfileEvents['EngineFileLikeReadFiles'] FROM system.query_log WHERE query ilike '%select%azure%test_filter%' AND type='QueryFinish' ORDER BY event_time_microseconds DESC LIMIT 1"
|
||||
)
|
||||
|
||||
assert int(result) == 1
|
||||
|
@ -41,7 +41,6 @@ def test_read_write_storage(started_cluster):
|
||||
node1.query("insert into SimpleHDFSStorage values (1, 'Mark', 72.53)")
|
||||
assert hdfs_api.read_data("/simple_storage") == "1\tMark\t72.53\n"
|
||||
assert node1.query("select * from SimpleHDFSStorage") == "1\tMark\t72.53\n"
|
||||
node1.query("drop table if exists SimpleHDFSStorage")
|
||||
|
||||
|
||||
def test_read_write_storage_with_globs(started_cluster):
|
||||
@ -95,11 +94,6 @@ def test_read_write_storage_with_globs(started_cluster):
|
||||
print(ex)
|
||||
assert "in readonly mode" in str(ex)
|
||||
|
||||
node1.query("DROP TABLE HDFSStorageWithRange")
|
||||
node1.query("DROP TABLE HDFSStorageWithEnum")
|
||||
node1.query("DROP TABLE HDFSStorageWithQuestionMark")
|
||||
node1.query("DROP TABLE HDFSStorageWithAsterisk")
|
||||
|
||||
|
||||
def test_storage_with_multidirectory_glob(started_cluster):
|
||||
hdfs_api = started_cluster.hdfs_api
|
||||
@ -341,7 +335,6 @@ def test_virtual_columns(started_cluster):
|
||||
)
|
||||
== expected
|
||||
)
|
||||
node1.query("DROP TABLE virtual_cols")
|
||||
|
||||
|
||||
def test_read_files_with_spaces(started_cluster):
|
||||
@ -363,7 +356,6 @@ def test_read_files_with_spaces(started_cluster):
|
||||
)
|
||||
assert node1.query("select * from test order by id") == "1\n2\n3\n"
|
||||
fs.delete(dir, recursive=True)
|
||||
node1.query("DROP TABLE test")
|
||||
|
||||
|
||||
def test_truncate_table(started_cluster):
|
||||
@ -435,7 +427,7 @@ def test_seekable_formats(started_cluster):
|
||||
f"hdfs('hdfs://hdfs1:9000/parquet', 'Parquet', 'a Int32, b String')"
|
||||
)
|
||||
node1.query(
|
||||
f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) SETTINGS hdfs_truncate_on_insert=1"
|
||||
f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000)"
|
||||
)
|
||||
|
||||
result = node1.query(f"SELECT count() FROM {table_function}")
|
||||
@ -443,7 +435,7 @@ def test_seekable_formats(started_cluster):
|
||||
|
||||
table_function = f"hdfs('hdfs://hdfs1:9000/orc', 'ORC', 'a Int32, b String')"
|
||||
node1.query(
|
||||
f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) SETTINGS hdfs_truncate_on_insert=1"
|
||||
f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000)"
|
||||
)
|
||||
result = node1.query(f"SELECT count() FROM {table_function}")
|
||||
assert int(result) == 5000000
|
||||
@ -467,7 +459,7 @@ def test_read_table_with_default(started_cluster):
|
||||
|
||||
def test_schema_inference(started_cluster):
|
||||
node1.query(
|
||||
f"insert into table function hdfs('hdfs://hdfs1:9000/native', 'Native', 'a Int32, b String') SELECT number, randomString(100) FROM numbers(5000000) SETTINGS hdfs_truncate_on_insert=1"
|
||||
f"insert into table function hdfs('hdfs://hdfs1:9000/native', 'Native', 'a Int32, b String') SELECT number, randomString(100) FROM numbers(5000000)"
|
||||
)
|
||||
|
||||
result = node1.query(f"desc hdfs('hdfs://hdfs1:9000/native', 'Native')")
|
||||
@ -520,7 +512,6 @@ def test_hdfs_directory_not_exist(started_cluster):
|
||||
assert "" == node1.query(
|
||||
"select * from HDFSStorageWithNotExistDir settings hdfs_ignore_file_doesnt_exist=1"
|
||||
)
|
||||
node1.query("DROP TABLE HDFSStorageWithNotExistDir")
|
||||
|
||||
|
||||
def test_overwrite(started_cluster):
|
||||
@ -540,7 +531,6 @@ def test_overwrite(started_cluster):
|
||||
|
||||
result = node1.query(f"select count() from test_overwrite")
|
||||
assert int(result) == 10
|
||||
node1.query(f"DROP TABLE test_overwrite")
|
||||
|
||||
|
||||
def test_multiple_inserts(started_cluster):
|
||||
@ -577,7 +567,6 @@ def test_multiple_inserts(started_cluster):
|
||||
|
||||
result = node1.query(f"select count() from test_multiple_inserts")
|
||||
assert int(result) == 60
|
||||
node1.query(f"DROP TABLE test_multiple_inserts")
|
||||
|
||||
|
||||
def test_format_detection(started_cluster):
|
||||
@ -591,10 +580,10 @@ def test_format_detection(started_cluster):
|
||||
|
||||
def test_schema_inference_with_globs(started_cluster):
|
||||
node1.query(
|
||||
f"insert into table function hdfs('hdfs://hdfs1:9000/data1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL SETTINGS hdfs_truncate_on_insert=1"
|
||||
f"insert into table function hdfs('hdfs://hdfs1:9000/data1.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL"
|
||||
)
|
||||
node1.query(
|
||||
f"insert into table function hdfs('hdfs://hdfs1:9000/data2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0 SETTINGS hdfs_truncate_on_insert=1"
|
||||
f"insert into table function hdfs('hdfs://hdfs1:9000/data2.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select 0"
|
||||
)
|
||||
|
||||
result = node1.query(
|
||||
@ -608,7 +597,7 @@ def test_schema_inference_with_globs(started_cluster):
|
||||
assert sorted(result.split()) == ["0", "\\N"]
|
||||
|
||||
node1.query(
|
||||
f"insert into table function hdfs('hdfs://hdfs1:9000/data3.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL SETTINGS hdfs_truncate_on_insert=1"
|
||||
f"insert into table function hdfs('hdfs://hdfs1:9000/data3.jsoncompacteachrow', 'JSONCompactEachRow', 'x Nullable(UInt32)') select NULL"
|
||||
)
|
||||
|
||||
filename = "data{1,3}.jsoncompacteachrow"
|
||||
@ -620,7 +609,7 @@ def test_schema_inference_with_globs(started_cluster):
|
||||
assert "All attempts to extract table structure from files failed" in result
|
||||
|
||||
node1.query(
|
||||
f"insert into table function hdfs('hdfs://hdfs1:9000/data0.jsoncompacteachrow', 'TSV', 'x String') select '[123;]' SETTINGS hdfs_truncate_on_insert=1"
|
||||
f"insert into table function hdfs('hdfs://hdfs1:9000/data0.jsoncompacteachrow', 'TSV', 'x String') select '[123;]'"
|
||||
)
|
||||
|
||||
result = node1.query_and_get_error(
|
||||
@ -632,7 +621,7 @@ def test_schema_inference_with_globs(started_cluster):
|
||||
|
||||
def test_insert_select_schema_inference(started_cluster):
|
||||
node1.query(
|
||||
f"insert into table function hdfs('hdfs://hdfs1:9000/test.native.zst') select toUInt64(1) as x SETTINGS hdfs_truncate_on_insert=1"
|
||||
f"insert into table function hdfs('hdfs://hdfs1:9000/test.native.zst') select toUInt64(1) as x"
|
||||
)
|
||||
|
||||
result = node1.query(f"desc hdfs('hdfs://hdfs1:9000/test.native.zst')")
|
||||
@ -675,9 +664,7 @@ def test_virtual_columns_2(started_cluster):
|
||||
table_function = (
|
||||
f"hdfs('hdfs://hdfs1:9000/parquet_2', 'Parquet', 'a Int32, b String')"
|
||||
)
|
||||
node1.query(
|
||||
f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1"
|
||||
)
|
||||
node1.query(f"insert into table function {table_function} SELECT 1, 'kek'")
|
||||
|
||||
result = node1.query(f"SELECT _path FROM {table_function}")
|
||||
assert result.strip() == "parquet_2"
|
||||
@ -685,9 +672,7 @@ def test_virtual_columns_2(started_cluster):
|
||||
table_function = (
|
||||
f"hdfs('hdfs://hdfs1:9000/parquet_3', 'Parquet', 'a Int32, _path String')"
|
||||
)
|
||||
node1.query(
|
||||
f"insert into table function {table_function} SELECT 1, 'kek' SETTINGS hdfs_truncate_on_insert=1"
|
||||
)
|
||||
node1.query(f"insert into table function {table_function} SELECT 1, 'kek'")
|
||||
|
||||
result = node1.query(f"SELECT _path FROM {table_function}")
|
||||
assert result.strip() == "kek"
|
||||
@ -984,11 +969,11 @@ def test_read_subcolumns(started_cluster):
|
||||
node = started_cluster.instances["node1"]
|
||||
|
||||
node.query(
|
||||
f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.tsv', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3) SETTINGS hdfs_truncate_on_insert=1"
|
||||
f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.tsv', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3)"
|
||||
)
|
||||
|
||||
node.query(
|
||||
f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.jsonl', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3) SETTINGS hdfs_truncate_on_insert=1"
|
||||
f"insert into function hdfs('hdfs://hdfs1:9000/test_subcolumns.jsonl', auto, 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3)"
|
||||
)
|
||||
|
||||
res = node.query(
|
||||
@ -1034,11 +1019,11 @@ def test_union_schema_inference_mode(started_cluster):
|
||||
node = started_cluster.instances["node1"]
|
||||
|
||||
node.query(
|
||||
"insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference1.jsonl') select 1 as a SETTINGS hdfs_truncate_on_insert=1"
|
||||
"insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference1.jsonl') select 1 as a"
|
||||
)
|
||||
|
||||
node.query(
|
||||
"insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference2.jsonl') select 2 as b SETTINGS hdfs_truncate_on_insert=1"
|
||||
"insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference2.jsonl') select 2 as b"
|
||||
)
|
||||
|
||||
node.query("system drop schema cache for hdfs")
|
||||
@ -1070,7 +1055,7 @@ def test_union_schema_inference_mode(started_cluster):
|
||||
)
|
||||
assert result == "a\tNullable(Int64)\n" "b\tNullable(Int64)\n"
|
||||
node.query(
|
||||
f"insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference3.jsonl', TSV) select 'Error' SETTINGS hdfs_truncate_on_insert=1"
|
||||
f"insert into function hdfs('hdfs://hdfs1:9000/test_union_schema_inference3.jsonl', TSV) select 'Error'"
|
||||
)
|
||||
|
||||
error = node.query_and_get_error(
|
||||
@ -1083,11 +1068,11 @@ def test_format_detection(started_cluster):
|
||||
node = started_cluster.instances["node1"]
|
||||
|
||||
node.query(
|
||||
"insert into function hdfs('hdfs://hdfs1:9000/test_format_detection0', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(0) SETTINGS hdfs_truncate_on_insert=1"
|
||||
"insert into function hdfs('hdfs://hdfs1:9000/test_format_detection0', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(0)"
|
||||
)
|
||||
|
||||
node.query(
|
||||
"insert into function hdfs('hdfs://hdfs1:9000/test_format_detection1', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(10) SETTINGS hdfs_truncate_on_insert=1"
|
||||
"insert into function hdfs('hdfs://hdfs1:9000/test_format_detection1', JSONEachRow) select number as x, 'str_' || toString(number) as y from numbers(10)"
|
||||
)
|
||||
|
||||
expected_desc_result = node.query(
|
||||
@ -1151,7 +1136,7 @@ def test_write_to_globbed_partitioned_path(started_cluster):
|
||||
node = started_cluster.instances["node1"]
|
||||
|
||||
error = node.query_and_get_error(
|
||||
"insert into function hdfs('hdfs://hdfs1:9000/test_data_*_{_partition_id}.csv') partition by 42 select 42 SETTINGS hdfs_truncate_on_insert=1"
|
||||
"insert into function hdfs('hdfs://hdfs1:9000/test_data_*_{_partition_id}.csv') partition by 42 select 42"
|
||||
)
|
||||
|
||||
assert "DATABASE_ACCESS_DENIED" in error
|
||||
|
Loading…
Reference in New Issue
Block a user