From d62121e6d95396e991d008de74819b4abee254ea Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 2 Oct 2024 10:06:46 +0000 Subject: [PATCH] Backport #70159 to 24.8: Update test_storage_s3_queue/test.py --- tests/integration/test_storage_s3_queue/test.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index 8b959daba1c..66dad88ccbc 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -2,6 +2,7 @@ import io import logging import random import time +import uuid import pytest from helpers.client import QueryRuntimeException @@ -924,7 +925,7 @@ def test_max_set_age(started_cluster): ).encode() # use a different filename for each test to allow running a bunch of them sequentially with --count - file_with_error = f"max_set_age_fail_{uuid4().hex[:8]}.csv" + file_with_error = f"max_set_age_fail_{uuid.uuid4().hex[:8]}.csv" put_s3_file_content(started_cluster, f"{files_path}/{file_with_error}", values_csv) wait_for_condition(lambda: failed_count + 1 == get_object_storage_failures()) @@ -1814,10 +1815,10 @@ def test_commit_on_limit(started_cluster): def test_upgrade_2(started_cluster): node = started_cluster.instances["instance_24.5"] - table_name = f"test_upgrade_2_{uuid4().hex[:8]}" + table_name = f"test_upgrade_2_{uuid.uuid4().hex[:8]}" dst_table_name = f"{table_name}_dst" # A unique path is necessary for repeatable tests - keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" + keeper_path = f"/clickhouse/test_{table_name}" files_path = f"{table_name}_data" files_to_generate = 10