Another attempt to enable pytest (#22664)

* Fix some tests

* Fix tests
This commit is contained in:
Ivan 2021-04-14 19:35:17 +03:00 committed by GitHub
parent 496c2e45d0
commit 57f61c954c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 72 additions and 16 deletions

View File

@ -87,3 +87,5 @@ select 5 = windowFunnel(10000)(timestamp, event = 1000, event = 1001, event = 10
select 2 = windowFunnel(10000, 'strict_increase')(timestamp, event = 1000, event = 1001, event = 1002, event = 1003, event = 1004) from funnel_test_strict_increase; select 2 = windowFunnel(10000, 'strict_increase')(timestamp, event = 1000, event = 1001, event = 1002, event = 1003, event = 1004) from funnel_test_strict_increase;
select 3 = windowFunnel(10000)(timestamp, event = 1004, event = 1004, event = 1004) from funnel_test_strict_increase; select 3 = windowFunnel(10000)(timestamp, event = 1004, event = 1004, event = 1004) from funnel_test_strict_increase;
select 1 = windowFunnel(10000, 'strict_increase')(timestamp, event = 1004, event = 1004, event = 1004) from funnel_test_strict_increase; select 1 = windowFunnel(10000, 'strict_increase')(timestamp, event = 1004, event = 1004, event = 1004) from funnel_test_strict_increase;
drop table funnel_test_strict_increase;

View File

@ -26,3 +26,5 @@ INSERT INTO geo VALUES ('MULTIPOLYGON(((1 0,10 0,10 10,0 10,1 0),(4 4,5 4,5 5,4
INSERT INTO geo VALUES ('MULTIPOLYGON(((0 0,10 0,10 10,0 10,0 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10)))', 2); INSERT INTO geo VALUES ('MULTIPOLYGON(((0 0,10 0,10 10,0 10,0 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10)))', 2);
INSERT INTO geo VALUES ('MULTIPOLYGON(((2 0,10 0,10 10,0 10,2 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10)))', 3); INSERT INTO geo VALUES ('MULTIPOLYGON(((2 0,10 0,10 10,0 10,2 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10)))', 3);
SELECT readWktMultiPolygon(s) FROM geo ORDER BY id; SELECT readWktMultiPolygon(s) FROM geo ORDER BY id;
DROP TABLE geo;

View File

@ -46,3 +46,5 @@ SELECT svg(p) FROM geo ORDER BY id;
SELECT svg(p, 'b') FROM geo ORDER BY id; SELECT svg(p, 'b') FROM geo ORDER BY id;
SELECT svg([[[(0., 0.), (10, 0), (10, 10), (0, 10)], [(4., 4.), (5, 4), (5, 5), (4, 5)]], [[(-10., -10.), (-10, -9), (-9, 10)]]], s) FROM geo ORDER BY id; SELECT svg([[[(0., 0.), (10, 0), (10, 10), (0, 10)], [(4., 4.), (5, 4), (5, 5), (4, 5)]], [[(-10., -10.), (-10, -9), (-9, 10)]]], s) FROM geo ORDER BY id;
SELECT svg(p, s) FROM geo ORDER BY id; SELECT svg(p, s) FROM geo ORDER BY id;
DROP TABLE geo;

View File

@ -30,3 +30,5 @@ INSERT INTO geo VALUES ([[[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (5, 4),
INSERT INTO geo VALUES ([[[(1, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (5, 4), (5, 5), (4, 5)]], [[(-10, -10), (-10, -9), (-9, 10)]]], 2); INSERT INTO geo VALUES ([[[(1, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (5, 4), (5, 5), (4, 5)]], [[(-10, -10), (-10, -9), (-9, 10)]]], 2);
INSERT INTO geo VALUES ([[[(2, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (5, 4), (5, 5), (4, 5)]], [[(-10, -10), (-10, -9), (-9, 10)]]], 3); INSERT INTO geo VALUES ([[[(2, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (5, 4), (5, 5), (4, 5)]], [[(-10, -10), (-10, -9), (-9, 10)]]], 3);
SELECT wkt(p) FROM geo ORDER BY id; SELECT wkt(p) FROM geo ORDER BY id;
DROP TABLE geo;

View File

@ -6,3 +6,5 @@ drop table if exists polygon_01302;
create table polygon_01302 (x Array(Array(Array(Tuple(Float64, Float64)))), y Array(Array(Array(Tuple(Float64, Float64))))) engine=Memory(); create table polygon_01302 (x Array(Array(Array(Tuple(Float64, Float64)))), y Array(Array(Array(Tuple(Float64, Float64))))) engine=Memory();
insert into polygon_01302 values ([[[(23.725750, 37.971536)]]], [[[(4.3826169, 50.8119483)]]]); insert into polygon_01302 values ([[[(23.725750, 37.971536)]]], [[[(4.3826169, 50.8119483)]]]);
select polygonsDistanceSpherical(x, y) from polygon_01302; select polygonsDistanceSpherical(x, y) from polygon_01302;
drop table polygon_01302;

View File

@ -1108,3 +1108,4 @@ from (
-- -INT_MIN row offset that can lead to problems with negation, found when fuzzing -- -INT_MIN row offset that can lead to problems with negation, found when fuzzing
-- under UBSan. Should be limited to at most INT_MAX. -- under UBSan. Should be limited to at most INT_MAX.
select count() over (rows between 2147483648 preceding and 2147493648 following) from numbers(2); -- { serverError 36 } select count() over (rows between 2147483648 preceding and 2147493648 following) from numbers(2); -- { serverError 36 }
drop table window_mt;

View File

@ -414,3 +414,5 @@ from (
-- -INT_MIN row offset that can lead to problems with negation, found when fuzzing -- -INT_MIN row offset that can lead to problems with negation, found when fuzzing
-- under UBSan. Should be limited to at most INT_MAX. -- under UBSan. Should be limited to at most INT_MAX.
select count() over (rows between 2147483648 preceding and 2147493648 following) from numbers(2); -- { serverError 36 } select count() over (rows between 2147483648 preceding and 2147493648 following) from numbers(2); -- { serverError 36 }
drop table window_mt;

View File

@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# Data preparation. # Data preparation.
# Now we can get the user_files_path by use the table file function for trick. also we can get it by query as: # Now we can get the user_files_path by use the table file function for trick. also we can get it by query as:
# "insert into function file('exist.txt', 'CSV', 'val1 char') values ('aaaa'); select _path from file('exist.txt', 'CSV', 'val1 char')" # "insert into function file('exist.txt', 'CSV', 'val1 char') values ('aaaa'); select _path from file('exist.txt', 'CSV', 'val1 char')"
user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 |grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') user_files_path=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}')
mkdir -p ${user_files_path}/ mkdir -p ${user_files_path}/
echo -n aaaaaaaaa > ${user_files_path}/a.txt echo -n aaaaaaaaa > ${user_files_path}/a.txt

View File

@ -1,9 +1,11 @@
#!/usr/bin/env bash #!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh # shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh . "$CURDIR"/../shell_config.sh
${CLICKHOUSE_CLIENT} -q "create table insert_big_json(a String, b String) engine=MergeTree() order by tuple()"; ${CLICKHOUSE_CLIENT} -q "create table insert_big_json(a String, b String) engine=MergeTree() order by tuple()";
python3 -c "[print('{{\"a\":\"{}\", \"b\":\"{}\"'.format('clickhouse'* 1000000, 'dbms' * 1000000)) for i in range(10)]; [print('{{\"a\":\"{}\", \"b\":\"{}\"}}'.format('clickhouse'* 100000, 'dbms' * 100000)) for i in range(10)]" 2>/dev/null | ${CLICKHOUSE_CLIENT} --input_format_parallel_parsing=1 --max_memory_usage=0 -q "insert into insert_big_json FORMAT JSONEachRow" 2>&1 | grep -q "min_chunk_bytes_for_parallel_parsing" && echo "Ok." || echo "FAIL" ||: python3 -c "[print('{{\"a\":\"{}\", \"b\":\"{}\"'.format('clickhouse'* 1000000, 'dbms' * 1000000)) for i in range(10)]; [print('{{\"a\":\"{}\", \"b\":\"{}\"}}'.format('clickhouse'* 100000, 'dbms' * 100000)) for i in range(10)]" 2>/dev/null | ${CLICKHOUSE_CLIENT} --input_format_parallel_parsing=1 --max_memory_usage=0 -q "insert into insert_big_json FORMAT JSONEachRow" 2>&1 | grep -q "min_chunk_bytes_for_parallel_parsing" && echo "Ok." || echo "FAIL" ||:
${CLICKHOUSE_CLIENT} -q "drop table insert_big_json"

View File

@ -22,4 +22,6 @@ ${CLICKHOUSE_CLIENT} -q "SELECT name, polygonPerimeterSpherical(p) from country_
${CLICKHOUSE_CLIENT} -q "SELECT '-------------------------------------'" ${CLICKHOUSE_CLIENT} -q "SELECT '-------------------------------------'"
${CLICKHOUSE_CLIENT} -q "SELECT name, polygonAreaSpherical(p) from country_rings" ${CLICKHOUSE_CLIENT} -q "SELECT name, polygonAreaSpherical(p) from country_rings"
${CLICKHOUSE_CLIENT} -q "SELECT '-------------------------------------'" ${CLICKHOUSE_CLIENT} -q "SELECT '-------------------------------------'"
${CLICKHOUSE_CLIENT} -q "drop table if exists country_rings;" ${CLICKHOUSE_CLIENT} -q "drop table if exists country_rings;"
${CLICKHOUSE_CLIENT} -q "drop table country_polygons"

View File

@ -1,5 +1,5 @@
drop table if exists test_num; drop table if exists test_enum;
create table test_enum (c Nullable(Enum16('A' = 1, 'B' = 2))) engine Log; create table test_enum (c Nullable(Enum16('A' = 1, 'B' = 2))) engine Log;
insert into test_enum values (1), (NULL); insert into test_enum values (1), (NULL);
select * from test_enum; select * from test_enum;
drop table if exists test_num; drop table test_enum;

View File

@ -10,3 +10,5 @@ $CLICKHOUSE_CLIENT --optimize_skip_unused_shards=1 -nm -q "
create table dist_01758 as system.one engine=Distributed(test_cluster_two_shards, system, one, dummy); create table dist_01758 as system.one engine=Distributed(test_cluster_two_shards, system, one, dummy);
select * from dist_01758 where dummy = 0 format Null; select * from dist_01758 where dummy = 0 format Null;
" |& grep -o "StorageDistributed (dist_01758).*" " |& grep -o "StorageDistributed (dist_01758).*"
$CLICKHOUSE_CLIENT -q "drop table dist_01758" 2>/dev/null

View File

@ -1,2 +1,3 @@
create table dist_01756 (dummy UInt8) ENGINE = Distributed('test_cluster_two_shards', 'system', 'one', dummy); create table dist_01756 (dummy UInt8) ENGINE = Distributed('test_cluster_two_shards', 'system', 'one', dummy);
select ignore(1), * from dist_01756 where 0 settings optimize_skip_unused_shards=1, force_optimize_skip_unused_shards=1 select ignore(1), * from dist_01756 where 0 settings optimize_skip_unused_shards=1, force_optimize_skip_unused_shards=1;
drop table dist_01756;

View File

@ -65,3 +65,5 @@ SELECT tuple(inf, inf) as key, dictGet('01760_db.dict_array', 'name', key); --{s
DROP DICTIONARY 01760_db.dict_array; DROP DICTIONARY 01760_db.dict_array;
DROP TABLE 01760_db.points; DROP TABLE 01760_db.points;
DROP TABLE 01760_db.polygons; DROP TABLE 01760_db.polygons;
DROP DATABASE 01760_db;

View File

@ -1,5 +1,3 @@
import pytest
import difflib import difflib
import os import os
import random import random
@ -7,6 +5,8 @@ import string
import subprocess import subprocess
import sys import sys
import pytest
SKIP_LIST = [ SKIP_LIST = [
# these couple of tests hangs everything # these couple of tests hangs everything
@ -14,44 +14,63 @@ SKIP_LIST = [
"00987_distributed_stack_overflow", "00987_distributed_stack_overflow",
# just fail # just fail
"00133_long_shard_memory_tracker_and_exception_safety",
"00505_secure", "00505_secure",
"00505_shard_secure", "00505_shard_secure",
"00646_url_engine", "00646_url_engine",
"00725_memory_tracking", # BROKEN "00725_memory_tracking", # BROKEN
"00738_lock_for_inner_table",
"00821_distributed_storage_with_join_on",
"00825_protobuf_format_array_3dim",
"00825_protobuf_format_array_of_arrays",
"00825_protobuf_format_enum_mapping",
"00825_protobuf_format_nested_in_nested",
"00825_protobuf_format_nested_optional",
"00825_protobuf_format_no_length_delimiter",
"00825_protobuf_format_persons",
"00825_protobuf_format_squares",
"00825_protobuf_format_table_default",
"00834_cancel_http_readonly_queries_on_client_close", "00834_cancel_http_readonly_queries_on_client_close",
"00877_memory_limit_for_new_delete",
"00900_parquet_load",
"00933_test_fix_extra_seek_on_compressed_cache", "00933_test_fix_extra_seek_on_compressed_cache",
"00965_logs_level_bugfix", "00965_logs_level_bugfix",
"00965_send_logs_level_concurrent_queries", "00965_send_logs_level_concurrent_queries",
"00974_query_profiler",
"00990_hasToken", "00990_hasToken",
"00990_metric_log_table_not_empty", "00990_metric_log_table_not_empty",
"01014_lazy_database_concurrent_recreate_reattach_and_show_tables", "01014_lazy_database_concurrent_recreate_reattach_and_show_tables",
"01017_uniqCombined_memory_usage",
"01018_Distributed__shard_num", "01018_Distributed__shard_num",
"01018_ip_dictionary_long", "01018_ip_dictionary_long",
"01035_lc_empty_part_bug", # FLAKY
"01050_clickhouse_dict_source_with_subquery", "01050_clickhouse_dict_source_with_subquery",
"01053_ssd_dictionary", "01053_ssd_dictionary",
"01054_cache_dictionary_overflow_cell", "01054_cache_dictionary_overflow_cell",
"01057_http_compression_prefer_brotli", "01057_http_compression_prefer_brotli",
"01080_check_for_error_incorrect_size_of_nested_column", "01080_check_for_error_incorrect_size_of_nested_column",
"01083_expressions_in_engine_arguments", "01083_expressions_in_engine_arguments",
# "01086_odbc_roundtrip", "01086_odbc_roundtrip",
"01088_benchmark_query_id", "01088_benchmark_query_id",
"01092_memory_profiler",
"01098_temporary_and_external_tables", "01098_temporary_and_external_tables",
"01099_parallel_distributed_insert_select", "01099_parallel_distributed_insert_select",
"01103_check_cpu_instructions_at_startup", "01103_check_cpu_instructions_at_startup",
"01114_database_atomic", "01114_database_atomic",
"01148_zookeeper_path_macros_unfolding", "01148_zookeeper_path_macros_unfolding",
"01175_distributed_ddl_output_mode_long",
"01181_db_atomic_drop_on_cluster", # tcp port in reference "01181_db_atomic_drop_on_cluster", # tcp port in reference
"01280_ssd_complex_key_dictionary", "01280_ssd_complex_key_dictionary",
"01293_client_interactive_vertical_multiline", # expect-test "01293_client_interactive_vertical_multiline", # expect-test
"01293_client_interactive_vertical_singleline", # expect-test "01293_client_interactive_vertical_singleline", # expect-test
"01293_system_distribution_queue", # FLAKY
"01293_show_clusters", "01293_show_clusters",
"01293_show_settings",
"01293_system_distribution_queue", # FLAKY
"01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long", "01294_lazy_database_concurrent_recreate_reattach_and_show_tables_long",
"01294_system_distributed_on_cluster", "01294_system_distributed_on_cluster",
"01300_client_save_history_when_terminated", # expect-test "01300_client_save_history_when_terminated", # expect-test
"01304_direct_io", "01304_direct_io",
"01306_benchmark_json", "01306_benchmark_json",
"01035_lc_empty_part_bug", # FLAKY
"01320_create_sync_race_condition_zookeeper", "01320_create_sync_race_condition_zookeeper",
"01355_CSV_input_format_allow_errors", "01355_CSV_input_format_allow_errors",
"01370_client_autocomplete_word_break_characters", # expect-test "01370_client_autocomplete_word_break_characters", # expect-test
@ -66,18 +85,33 @@ SKIP_LIST = [
"01514_distributed_cancel_query_on_error", "01514_distributed_cancel_query_on_error",
"01520_client_print_query_id", # expect-test "01520_client_print_query_id", # expect-test
"01526_client_start_and_exit", # expect-test "01526_client_start_and_exit", # expect-test
"01526_max_untracked_memory",
"01527_dist_sharding_key_dictGet_reload", "01527_dist_sharding_key_dictGet_reload",
"01528_play",
"01545_url_file_format_settings", "01545_url_file_format_settings",
"01553_datetime64_comparison", "01553_datetime64_comparison",
"01555_system_distribution_queue_mask", "01555_system_distribution_queue_mask",
"01558_ttest_scipy", "01558_ttest_scipy",
"01561_mann_whitney_scipy", "01561_mann_whitney_scipy",
"01582_distinct_optimization", "01582_distinct_optimization",
"01591_window_functions",
"01599_multiline_input_and_singleline_comments", # expect-test "01599_multiline_input_and_singleline_comments", # expect-test
"01601_custom_tld", "01601_custom_tld",
"01606_git_import",
"01610_client_spawn_editor", # expect-test "01610_client_spawn_editor", # expect-test
"01658_read_file_to_stringcolumn",
"01666_merge_tree_max_query_limit",
"01674_unicode_asan",
"01676_clickhouse_client_autocomplete", # expect-test (partially) "01676_clickhouse_client_autocomplete", # expect-test (partially)
"01683_text_log_deadlock", # secure tcp "01683_text_log_deadlock", # secure tcp
"01684_ssd_cache_dictionary_simple_key",
"01685_ssd_cache_dictionary_complex_key",
"01746_executable_pool_dictionary",
"01747_executable_pool_dictionary_implicit_key",
"01747_join_view_filter_dictionary",
"01748_dictionary_table_dot",
"01754_cluster_all_replicas_shard_num",
"01759_optimize_skip_unused_shards_zero_shards",
] ]