diff --git a/docker/test/stateful/run.sh b/docker/test/stateful/run.sh
index 35ffeee5438..857385f4715 100755
--- a/docker/test/stateful/run.sh
+++ b/docker/test/stateful/run.sh
@@ -4,6 +4,9 @@
source /setup_export_logs.sh
set -e -x
+MAX_RUN_TIME=${MAX_RUN_TIME:-3600}
+MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 3600 : MAX_RUN_TIME))
+
# Choose random timezone for this test run
TZ="$(rg -v '#' /usr/share/zoneinfo/zone.tab | awk '{print $3}' | shuf | head -n1)"
echo "Choosen random timezone $TZ"
@@ -242,7 +245,22 @@ function run_tests()
}
export -f run_tests
-timeout "$MAX_RUN_TIME" bash -c run_tests ||:
+
+function timeout_with_logging() {
+ local exit_code=0
+
+ timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
+
+ if [[ "${exit_code}" -eq "124" ]]
+ then
+ echo "The command 'timeout ${*}' has been killed by timeout"
+ fi
+
+ return $exit_code
+}
+
+TIMEOUT=$((MAX_RUN_TIME - 700))
+timeout_with_logging "$TIMEOUT" bash -c run_tests ||:
echo "Files in current directory"
ls -la ./
diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh
index 2b535f8dd23..0647ed02839 100755
--- a/docker/test/stateless/run.sh
+++ b/docker/test/stateless/run.sh
@@ -12,12 +12,6 @@ MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 7200 : MAX_RUN_TIME))
USE_DATABASE_REPLICATED=${USE_DATABASE_REPLICATED:=0}
USE_SHARED_CATALOG=${USE_SHARED_CATALOG:=0}
-RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
-
-if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]] || [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
- RUN_SEQUENTIAL_TESTS_IN_PARALLEL=0
-fi
-
# Choose random timezone for this test run.
#
# NOTE: that clickhouse-test will randomize session_timezone by itself as well
@@ -101,53 +95,6 @@ if [ "$NUM_TRIES" -gt "1" ]; then
mkdir -p /var/run/clickhouse-server
fi
-# Run a CH instance to execute sequential tests on it in parallel with all other tests.
-if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
- mkdir -p /var/run/clickhouse-server3 /etc/clickhouse-server3 /var/lib/clickhouse3
- cp -r -L /etc/clickhouse-server/* /etc/clickhouse-server3/
-
- sudo chown clickhouse:clickhouse /var/run/clickhouse-server3 /var/lib/clickhouse3 /etc/clickhouse-server3/
- sudo chown -R clickhouse:clickhouse /etc/clickhouse-server3/*
-
- function replace(){
- sudo find /etc/clickhouse-server3/ -type f -name '*.xml' -exec sed -i "$1" {} \;
- }
-
- replace "s|9000|19000|g"
- replace "s|9440|19440|g"
- replace "s|9988|19988|g"
- replace "s|9234|19234|g"
- replace "s|9181|19181|g"
- replace "s|8443|18443|g"
- replace "s|9000|19000|g"
- replace "s|9181|19181|g"
- replace "s|9440|19440|g"
- replace "s|9010|19010|g"
- replace "s|9004|19004|g"
- replace "s|9005|19005|g"
- replace "s|9009|19009|g"
- replace "s|8123|18123|g"
- replace "s|/var/lib/clickhouse/|/var/lib/clickhouse3/|g"
- replace "s|/etc/clickhouse-server/|/etc/clickhouse-server3/|g"
- # distributed cache
- replace "s|10001|10003|g"
- replace "s|10002|10004|g"
-
- sudo -E -u clickhouse /usr/bin/clickhouse server --daemon --config /etc/clickhouse-server3/config.xml \
- --pid-file /var/run/clickhouse-server3/clickhouse-server.pid \
- -- --path /var/lib/clickhouse3/ --logger.stderr /var/log/clickhouse-server/stderr3.log \
- --logger.log /var/log/clickhouse-server/clickhouse-server3.log --logger.errorlog /var/log/clickhouse-server/clickhouse-server3.err.log \
- --tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
- --prometheus.port 19988 --keeper_server.raft_configuration.server.port 19234 --keeper_server.tcp_port 19181 \
- --mysql_port 19004 --postgresql_port 19005
-
- for _ in {1..100}
- do
- clickhouse-client --port 19000 --query "SELECT 1" && break
- sleep 1
- done
-fi
-
# simplest way to forward env variables to server
sudo -E -u clickhouse /usr/bin/clickhouse-server --config /etc/clickhouse-server/config.xml --daemon --pid-file /var/run/clickhouse-server/clickhouse-server.pid
@@ -183,9 +130,6 @@ if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
--keeper_server.tcp_port 29181 --keeper_server.server_id 3 \
--prometheus.port 29988 \
--macros.shard s2 # It doesn't work :(
-
- MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)
- MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
fi
if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
@@ -210,9 +154,6 @@ if [[ "$USE_SHARED_CATALOG" -eq 1 ]]; then
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
--prometheus.port 19988 \
--macros.replica r2 # It doesn't work :(
-
- MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)
- MAX_RUN_TIME=$((MAX_RUN_TIME != 0 ? MAX_RUN_TIME : 9000)) # set to 2.5 hours if 0 (unlimited)
fi
# Wait for the server to start, but not for too long.
@@ -223,7 +164,6 @@ do
done
setup_logs_replication
-
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
function fn_exists() {
@@ -284,11 +224,7 @@ function run_tests()
else
# All other configurations are OK.
ADDITIONAL_OPTIONS+=('--jobs')
- ADDITIONAL_OPTIONS+=('5')
- fi
-
- if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
- ADDITIONAL_OPTIONS+=('--run-sequential-tests-in-parallel')
+ ADDITIONAL_OPTIONS+=('7')
fi
if [[ -n "$RUN_BY_HASH_NUM" ]] && [[ -n "$RUN_BY_HASH_TOTAL" ]]; then
@@ -373,9 +309,6 @@ done
# Because it's the simplest way to read it when server has crashed.
sudo clickhouse stop ||:
-if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
- sudo clickhouse stop --pid-path /var/run/clickhouse-server3 ||:
-fi
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
sudo clickhouse stop --pid-path /var/run/clickhouse-server1 ||:
@@ -393,12 +326,6 @@ rg -Fa "" /var/log/clickhouse-server/clickhouse-server.log ||:
rg -A50 -Fa "============" /var/log/clickhouse-server/stderr.log ||:
zstd --threads=0 < /var/log/clickhouse-server/clickhouse-server.log > /test_output/clickhouse-server.log.zst &
-if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
- rg -Fa "" /var/log/clickhouse-server3/clickhouse-server.log ||:
- rg -A50 -Fa "============" /var/log/clickhouse-server3/stderr.log ||:
- zstd --threads=0 < /var/log/clickhouse-server3/clickhouse-server.log > /test_output/clickhouse-server3.log.zst &
-fi
-
data_path_config="--path=/var/lib/clickhouse/"
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
# We need s3 storage configuration (but it's more likely that clickhouse-local will fail for some reason)
@@ -419,10 +346,6 @@ if [ $failed_to_save_logs -ne 0 ]; then
do
clickhouse-local "$data_path_config" --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
- if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
- clickhouse-local --path /var/lib/clickhouse3/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.3.tsv.zst ||:
- fi
-
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
clickhouse-local --path /var/lib/clickhouse1/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.1.tsv.zst ||:
clickhouse-local --path /var/lib/clickhouse2/ --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.2.tsv.zst ||:
@@ -464,12 +387,6 @@ rm -rf /var/lib/clickhouse/data/system/*/
tar -chf /test_output/store.tar /var/lib/clickhouse/store ||:
tar -chf /test_output/metadata.tar /var/lib/clickhouse/metadata/*.sql ||:
-if [[ "$RUN_SEQUENTIAL_TESTS_IN_PARALLEL" -eq 1 ]]; then
- rm -rf /var/lib/clickhouse3/data/system/*/
- tar -chf /test_output/store.tar /var/lib/clickhouse3/store ||:
- tar -chf /test_output/metadata.tar /var/lib/clickhouse3/metadata/*.sql ||:
-fi
-
if [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
rg -Fa "" /var/log/clickhouse-server/clickhouse-server1.log ||:
diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py
index 74dd4d8fbd7..66db082677f 100644
--- a/tests/ci/functional_test_check.py
+++ b/tests/ci/functional_test_check.py
@@ -112,8 +112,8 @@ def get_run_command(
]
if flaky_check:
- envs.append("-e NUM_TRIES=100")
- envs.append("-e MAX_RUN_TIME=1800")
+ envs.append("-e NUM_TRIES=50")
+ envs.append("-e MAX_RUN_TIME=2800")
envs += [f"-e {e}" for e in additional_envs]
diff --git a/tests/clickhouse-test b/tests/clickhouse-test
index 60160e71e81..a8a6ce68b8f 100755
--- a/tests/clickhouse-test
+++ b/tests/clickhouse-test
@@ -2187,7 +2187,7 @@ def run_tests_array(all_tests_with_params: Tuple[List[str], int, TestSuite, bool
description = ""
test_case_name = removesuffix(test_case.name, ".gen", ".sql") + ": "
- if is_concurrent or args.run_sequential_tests_in_parallel:
+ if is_concurrent:
description = f"{test_case_name:72}"
else:
sys.stdout.flush()
@@ -2447,35 +2447,6 @@ def extract_key(key: str) -> str:
)[1]
-def override_envs(*args_, **kwargs):
- global args
- args.client += " --port 19000"
- args.http_port = 18123
- args.https_port = 18443
-
- updated_env = {
- "CLICKHOUSE_CONFIG": "/etc/clickhouse-server3/config.xml",
- "CLICKHOUSE_CONFIG_DIR": "/etc/clickhouse-server3",
- "CLICKHOUSE_CONFIG_GREP": "/etc/clickhouse-server3/preprocessed/config.xml",
- "CLICKHOUSE_USER_FILES": "/var/lib/clickhouse3/user_files",
- "CLICKHOUSE_SCHEMA_FILES": "/var/lib/clickhouse3/format_schemas",
- "CLICKHOUSE_PATH": "/var/lib/clickhouse3",
- "CLICKHOUSE_PORT_TCP": "19000",
- "CLICKHOUSE_PORT_TCP_SECURE": "19440",
- "CLICKHOUSE_PORT_TCP_WITH_PROXY": "19010",
- "CLICKHOUSE_PORT_HTTP": "18123",
- "CLICKHOUSE_PORT_HTTPS": "18443",
- "CLICKHOUSE_PORT_INTERSERVER": "19009",
- "CLICKHOUSE_PORT_KEEPER": "19181",
- "CLICKHOUSE_PORT_PROMTHEUS_PORT": "19988",
- "CLICKHOUSE_PORT_MYSQL": "19004",
- "CLICKHOUSE_PORT_POSTGRESQL": "19005",
- }
- os.environ.update(updated_env)
-
- run_tests_array(*args_, **kwargs)
-
-
def run_tests_process(*args, **kwargs):
return run_tests_array(*args, **kwargs)
@@ -2519,24 +2490,6 @@ def do_run_tests(jobs, test_suite: TestSuite):
processes.append(process)
process.start()
- if args.run_sequential_tests_in_parallel:
- # Run parallel tests and sequential tests at the same time
- # Sequential tests will use different ClickHouse instance
- # In this process we can safely override values in `args` and `os.environ`
- process = multiprocessing.Process(
- target=override_envs,
- args=(
- (
- test_suite.sequential_tests,
- len(test_suite.sequential_tests),
- test_suite,
- False,
- ),
- ),
- )
- processes.append(process)
- process.start()
-
while processes:
sys.stdout.flush()
# Periodically check the server for hangs
@@ -2568,15 +2521,15 @@ def do_run_tests(jobs, test_suite: TestSuite):
sleep(5)
- if not args.run_sequential_tests_in_parallel:
- run_tests_array(
- (
- test_suite.sequential_tests,
- len(test_suite.sequential_tests),
- test_suite,
- False,
- )
+ run_tests_array(
+ (
+ test_suite.sequential_tests,
+ len(test_suite.sequential_tests),
+ test_suite,
+ False,
)
+ )
+
return len(test_suite.sequential_tests) + len(test_suite.parallel_tests)
else:
num_tests = len(test_suite.all_tests)
@@ -3419,15 +3372,6 @@ def parse_args():
help="Path to file for fatal logs from client",
)
- parser.add_argument(
- "--run-sequential-tests-in-parallel",
- action="store_true",
- default=False,
- help="If `true`, tests with the tag `no-parallel` will run on a "
- "separate ClickHouse instance in parallel with other tests. "
- "This is used in CI to make test jobs run faster.",
- )
-
return parser.parse_args()
diff --git a/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh b/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh
index ae728c8d10d..f0bc52ee356 100755
--- a/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh
+++ b/tests/queries/0_stateless/00840_long_concurrent_select_and_drop_deadlock.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Tags: deadlock, no-debug
+# Tags: deadlock, no-debug, no-parallel
# NOTE: database = $CLICKHOUSE_DATABASE is unwanted
diff --git a/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql b/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql
index 79600c6f67e..16b62c37d80 100644
--- a/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql
+++ b/tests/queries/0_stateless/00965_shard_unresolvable_addresses.sql
@@ -1,4 +1,4 @@
--- Tags: shard
+-- Tags: shard, no-parallel
SET prefer_localhost_replica = 1;
diff --git a/tests/queries/0_stateless/01004_rename_deadlock.sh b/tests/queries/0_stateless/01004_rename_deadlock.sh
index f0adf136e94..60d67e1744a 100755
--- a/tests/queries/0_stateless/01004_rename_deadlock.sh
+++ b/tests/queries/0_stateless/01004_rename_deadlock.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Tags: deadlock
+# Tags: deadlock, no-parallel
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
diff --git a/tests/queries/0_stateless/01005_rwr_shard_deadlock.sh b/tests/queries/0_stateless/01005_rwr_shard_deadlock.sh
index ef352606b69..51e8cdbc8e8 100755
--- a/tests/queries/0_stateless/01005_rwr_shard_deadlock.sh
+++ b/tests/queries/0_stateless/01005_rwr_shard_deadlock.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Tags: deadlock, shard
+# Tags: deadlock, shard, no-parallel
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
diff --git a/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh b/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh
index 9f4b2241732..d602ff6df3a 100755
--- a/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh
+++ b/tests/queries/0_stateless/01007_r1r2_w_r2r1_deadlock.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Tags: deadlock
+# Tags: deadlock, no-parallel
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
diff --git a/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh
index 3046fcbcd73..e4b3a31b13f 100755
--- a/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh
+++ b/tests/queries/0_stateless/01014_lazy_database_concurrent_recreate_reattach_and_show_tables.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Tags: no-fasttest
+# Tags: no-fasttest, no-parallel
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
diff --git a/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh b/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh
index 0d57bb25543..59a1e3cb980 100755
--- a/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh
+++ b/tests/queries/0_stateless/01035_concurrent_move_partition_from_table_zookeeper.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Tags: zookeeper, no-fasttest
+# Tags: zookeeper, no-fasttest, long, no-parallel
set -e
diff --git a/tests/queries/0_stateless/01053_ssd_dictionary.sh b/tests/queries/0_stateless/01053_ssd_dictionary.sh
index 00e5719a9a9..fdd40940ab5 100755
--- a/tests/queries/0_stateless/01053_ssd_dictionary.sh
+++ b/tests/queries/0_stateless/01053_ssd_dictionary.sh
@@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Tags: no-parallel, no-fasttest
-# Tag no-fasttest: this test mistakenly requires acces to /var/lib/clickhouse -- can't run this locally, disabled
+# Tag no-fasttest: this test mistakenly requires access to /var/lib/clickhouse -- can't run this locally, disabled
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
@@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
$CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 -n --query="
DROP DATABASE IF EXISTS 01053_db;
- CREATE DATABASE 01053_db Engine = Ordinary;
+ CREATE DATABASE 01053_db;
DROP TABLE IF EXISTS 01053_db.table_for_dict;
diff --git a/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.sh b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.sh
index 399c9e488a4..382e3279711 100755
--- a/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.sh
+++ b/tests/queries/0_stateless/01079_parallel_alter_modify_zookeeper_long.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Tags: long, zookeeper, no-fasttest
+# Tags: long, zookeeper, no-fasttest, no-parallel
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
diff --git a/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh b/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh
index 90128d7a8ad..619b6e91d11 100755
--- a/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh
+++ b/tests/queries/0_stateless/01111_create_drop_replicated_db_stress.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Tags: race, zookeeper
+# Tags: race, zookeeper, no-parallel
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
diff --git a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh
index 9a80820dd58..60a65b9a253 100755
--- a/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh
+++ b/tests/queries/0_stateless/01280_ssd_complex_key_dictionary.sh
@@ -6,9 +6,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT -n --query="
- set allow_deprecated_database_ordinary=1;
DROP DATABASE IF EXISTS 01280_db;
- CREATE DATABASE 01280_db Engine = Ordinary;
+ CREATE DATABASE 01280_db;
DROP TABLE IF EXISTS 01280_db.table_for_dict;
CREATE TABLE 01280_db.table_for_dict
(
diff --git a/tests/queries/0_stateless/01412_cache_dictionary_race.sh b/tests/queries/0_stateless/01412_cache_dictionary_race.sh
index 9aa39652021..36295ca01ea 100755
--- a/tests/queries/0_stateless/01412_cache_dictionary_race.sh
+++ b/tests/queries/0_stateless/01412_cache_dictionary_race.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Tags: race
+# Tags: race, no-parallel
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
diff --git a/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh b/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh
index 2bfd350ec51..fa9238041b1 100755
--- a/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh
+++ b/tests/queries/0_stateless/01454_storagememory_data_race_challenge.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Tags: race
+# Tags: race, no-parallel
set -e
diff --git a/tests/queries/0_stateless/01475_read_subcolumns.sql b/tests/queries/0_stateless/01475_read_subcolumns.sql
index d6eec2f84a1..d387230d9e5 100644
--- a/tests/queries/0_stateless/01475_read_subcolumns.sql
+++ b/tests/queries/0_stateless/01475_read_subcolumns.sql
@@ -1,4 +1,4 @@
--- Tags: no-object-storage, no-random-settings
+-- Tags: no-object-storage, no-random-settings, no-parallel
SET use_uncompressed_cache = 0;
diff --git a/tests/queries/0_stateless/01502_long_log_tinylog_deadlock_race.sh b/tests/queries/0_stateless/01502_long_log_tinylog_deadlock_race.sh
index b8efee89b4a..5a146968a3b 100755
--- a/tests/queries/0_stateless/01502_long_log_tinylog_deadlock_race.sh
+++ b/tests/queries/0_stateless/01502_long_log_tinylog_deadlock_race.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Tags: deadlock
+# Tags: deadlock, no-parallel
set -e
diff --git a/tests/queries/0_stateless/01533_multiple_nested.sql b/tests/queries/0_stateless/01533_multiple_nested.sql
index 80e9fc7e2fb..4756bd4e919 100644
--- a/tests/queries/0_stateless/01533_multiple_nested.sql
+++ b/tests/queries/0_stateless/01533_multiple_nested.sql
@@ -1,4 +1,4 @@
--- Tags: no-object-storage, no-random-merge-tree-settings
+-- Tags: no-object-storage, no-random-merge-tree-settings, no-parallel
-- no-s3 because read FileOpen metric
DROP TABLE IF EXISTS nested;
diff --git a/tests/queries/0_stateless/01778_mmap_cache_infra.sql b/tests/queries/0_stateless/01778_mmap_cache_infra.sql
index 29a84c5507b..50fdb6ffbba 100644
--- a/tests/queries/0_stateless/01778_mmap_cache_infra.sql
+++ b/tests/queries/0_stateless/01778_mmap_cache_infra.sql
@@ -1,3 +1,4 @@
+-- Tags: no-parallel
-- We check the existence of queries and metrics and don't check the results (a smoke test).
SYSTEM DROP MMAP CACHE;
diff --git a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh
index edffc0a3807..0dfde4997a7 100755
--- a/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh
+++ b/tests/queries/0_stateless/01921_concurrent_ttl_and_normal_merges_zookeeper_long.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-# Tags: long, zookeeper
+# Tags: long, zookeeper, no-parallel
CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL=error
diff --git a/tests/queries/0_stateless/02417_load_marks_async.sh b/tests/queries/0_stateless/02417_load_marks_async.sh
index 72b35a565df..950656e7ab6 100755
--- a/tests/queries/0_stateless/02417_load_marks_async.sh
+++ b/tests/queries/0_stateless/02417_load_marks_async.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+# Tags: no-parallel
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
diff --git a/tests/queries/0_stateless/03148_async_queries_in_query_log_errors.sh b/tests/queries/0_stateless/03148_async_queries_in_query_log_errors.sh
index 9c290133bf9..2b4b96a9cbf 100755
--- a/tests/queries/0_stateless/03148_async_queries_in_query_log_errors.sh
+++ b/tests/queries/0_stateless/03148_async_queries_in_query_log_errors.sh
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+# Tags: no-parallel
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
diff --git a/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql b/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql
index 652b27b8a67..ac2070fbd76 100644
--- a/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql
+++ b/tests/queries/0_stateless/03164_s3_settings_for_queries_and_merges.sql
@@ -1,4 +1,4 @@
--- Tags: no-random-settings, no-fasttest
+-- Tags: no-random-settings, no-fasttest, no-parallel
SET allow_prefetched_read_pool_for_remote_filesystem=0;
SET allow_prefetched_read_pool_for_local_filesystem=0;
diff --git a/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql b/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql
index 5d473064c68..bbe701f022b 100644
--- a/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql
+++ b/tests/queries/0_stateless/03198_dynamic_read_subcolumns.sql
@@ -1,4 +1,4 @@
--- Tags: no-random-settings, no-object-storage
+-- Tags: no-random-settings, no-object-storage, no-parallel
-- Tag no-object-storage: this test relies on the number of opened files in MergeTree that can differ in object storages
SET allow_experimental_dynamic_type = 1;