Fix all problems in tests that had been found by flake8

Signed-off-by: Azat Khuzhin <a.khuzhin@semrush.com>
This commit is contained in:
Azat Khuzhin 2023-12-29 15:02:11 +01:00
parent 9b7944e3a5
commit 69d23f5e67
19 changed files with 28 additions and 37 deletions

View File

@ -110,10 +110,9 @@ class HDFSApi(object):
logging.debug(
"Stdout:\n{}\n".format(res.stdout.decode("utf-8"))
)
logging.debug("Env:\n{}\n".format(env))
raise Exception(
"Command {} return non-zero code {}: {}".format(
args, res.returncode, res.stderr.decode("utf-8")
cmd, res.returncode, res.stderr.decode("utf-8")
)
)

View File

@ -1474,7 +1474,7 @@ def test_backup_all(exclude_system_log_tables):
restore_settings = []
if not exclude_system_log_tables:
restore_settings.append("allow_non_empty_tables=true")
restore_command = f"RESTORE ALL FROM {backup_name} {'SETTINGS '+ ', '.join(restore_settings) if restore_settings else ''}"
restore_command = f"RESTORE ALL FROM {backup_name} {'SETTINGS ' + ', '.join(restore_settings) if restore_settings else ''}"
session_id = new_session_id()
instance.http_query(

View File

@ -161,13 +161,13 @@ def wait_for_fail_restore(node, restore_id):
elif status == "RESTORING":
assert_eq_with_retry(
node,
f"SELECT status FROM system.backups WHERE id = '{backup_id}'",
f"SELECT status FROM system.backups WHERE id = '{restore_id}'",
"RESTORE_FAILED",
sleep_time=2,
retry_count=50,
)
error = node.query(
f"SELECT error FROM system.backups WHERE id == '{backup_id}'"
f"SELECT error FROM system.backups WHERE id == '{restore_id}'"
).rstrip("\n")
assert re.search(
"Cannot restore the table default.tbl because it already contains some data",

View File

@ -187,7 +187,7 @@ def check_convert_all_dbs_to_atomic():
# 6 tables, MVs contain 2 rows (inner tables does not match regexp)
assert "8\t{}\n".format(8 * len("atomic")) == node.query(
"SELECT count(), sum(n) FROM atomic.merge".format(db)
"SELECT count(), sum(n) FROM atomic.merge"
)
node.query("DETACH TABLE ordinary.detached PERMANENTLY")

View File

@ -89,7 +89,7 @@ def test_aggregate_states(start_cluster):
logging.info("Skipping %s", aggregate_function)
skipped += 1
continue
logging.exception("Failed %s", function)
logging.exception("Failed %s", aggregate_function)
failed += 1
continue

View File

@ -116,7 +116,7 @@ def test_usage(cluster, node_name):
(id Int32) ENGINE = MergeTree() ORDER BY id
SETTINGS storage_policy = 'web';
""".format(
i, uuids[i], i, i
i, uuids[i]
)
)
@ -338,7 +338,7 @@ def test_page_cache(cluster):
(id Int32) ENGINE = MergeTree() ORDER BY id
SETTINGS storage_policy = 'web';
""".format(
i, uuids[i], i, i
i, uuids[i]
)
)

View File

@ -90,7 +90,7 @@ def wait_until_fully_merged(node, table):
except:
return
raise Exception(f"There are still merges on-going after {retry} assignments")
raise Exception(f"There are still merges on-going after {i} assignments")
def test_jbod_balanced_merge(start_cluster):

View File

@ -91,7 +91,7 @@ def test_jdbc_insert(started_cluster):
"""
CREATE TABLE test.test_insert ENGINE = Memory AS
SELECT * FROM test.ClickHouseTable;
SELECT *
SELECT *
FROM jdbc('{0}?mutation', 'INSERT INTO test.test_insert VALUES({1}, ''{1}'', ''{1}'')');
""".format(
datasource, records
@ -115,7 +115,7 @@ def test_jdbc_update(started_cluster):
"""
CREATE TABLE test.test_update ENGINE = Memory AS
SELECT * FROM test.ClickHouseTable;
SELECT *
SELECT *
FROM jdbc(
'{}?mutation',
'SET mutations_sync = 1; ALTER TABLE test.test_update UPDATE Str=''{}'' WHERE Num = {} - 1;'
@ -145,7 +145,7 @@ def test_jdbc_delete(started_cluster):
"""
CREATE TABLE test.test_delete ENGINE = Memory AS
SELECT * FROM test.ClickHouseTable;
SELECT *
SELECT *
FROM jdbc(
'{}?mutation',
'SET mutations_sync = 1; ALTER TABLE test.test_delete DELETE WHERE Num < {} - 1;'
@ -158,7 +158,7 @@ def test_jdbc_delete(started_cluster):
expected = records - 1
actual = instance.query(
"SELECT Str FROM jdbc('{}', 'SELECT * FROM test.test_delete')".format(
datasource, records
datasource
)
)
assert int(actual) == expected, "expecting {} but got {}".format(expected, actual)

View File

@ -1,5 +1,5 @@
#!/usr/bin/env python3
##!/usr/bin/env python3
import pytest
from helpers.cluster import ClickHouseCluster
import helpers.keeper_utils as keeper_utils

View File

@ -1,6 +1,5 @@
#!/usr/bin/env python3
#!/usr/bin/env python3
import pytest
from helpers.cluster import ClickHouseCluster
import helpers.keeper_utils as keeper_utils

View File

@ -1,6 +1,5 @@
#!/usr/bin/env python3
#!/usr/bin/env python3
import pytest
from helpers.cluster import ClickHouseCluster
import random

View File

@ -537,10 +537,7 @@ def test_freeze_unfreeze(cluster):
def test_apply_new_settings(cluster):
node = cluster.instances[NODE_NAME]
create_table(node, TABLE_NAME)
config_path = os.path.join(
SCRIPT_DIR,
"./_gen/disk_storage_conf.xml".format(cluster.instances_dir_name),
)
config_path = os.path.join(SCRIPT_DIR, "./_gen/disk_storage_conf.xml")
azure_query(
node, f"INSERT INTO {TABLE_NAME} VALUES {generate_values('2020-01-03', 4096)}"

View File

@ -179,9 +179,7 @@ def test_different_data_types(started_cluster):
for i in range(10):
col = random.choice(["a", "b", "c"])
cursor.execute("UPDATE test_data_types SET {} = {};".format(col, i))
cursor.execute(
"""UPDATE test_data_types SET i = '2020-12-12';""".format(col, i)
)
cursor.execute("UPDATE test_data_types SET i = '2020-12-12';")
check_tables_are_synchronized(instance, "test_data_types", "id")
@ -452,7 +450,7 @@ def test_many_concurrent_queries(started_cluster):
# also change primary key value
print("try update primary key {}".format(thread_id))
cursor.execute(
"UPDATE {table}_{} SET key=key%100000+100000*{} WHERE key%{}=0".format(
"UPDATE {} SET key=key%100000+100000*{} WHERE key%{}=0".format(
table_name, i + 1, i + 1
)
)

View File

@ -6,6 +6,7 @@ import time
import threading
import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)

View File

@ -4,7 +4,7 @@ import os
import json
import helpers.client
from helpers.cluster import ClickHouseCluster
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
from helpers.test_tools import TSV
from helpers.s3_tools import prepare_s3_bucket, upload_directory, get_file_contents

View File

@ -1,5 +1,5 @@
import helpers.client
from helpers.cluster import ClickHouseCluster
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
from helpers.test_tools import TSV
import pyspark

View File

@ -702,7 +702,7 @@ def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster):
assert (
int(result1) == messages_num * threads_num
), "ClickHouse lost some messages: {}".format(result)
), "ClickHouse lost some messages: {}".format(result1)
assert int(result2) == 10
@ -1516,7 +1516,7 @@ def test_rabbitmq_hash_exchange(rabbitmq_cluster):
assert (
int(result1) == messages_num * threads_num
), "ClickHouse lost some messages: {}".format(result)
), "ClickHouse lost some messages: {}".format(result1)
assert int(result2) == 4 * num_tables
@ -1966,7 +1966,7 @@ def test_rabbitmq_many_consumers_to_each_queue(rabbitmq_cluster):
assert (
int(result1) == messages_num * threads_num
), "ClickHouse lost some messages: {}".format(result)
), "ClickHouse lost some messages: {}".format(result1)
# 4 tables, 2 consumers for each table => 8 consumer tags
assert int(result2) == 8
@ -2427,9 +2427,7 @@ def test_rabbitmq_drop_table_properly(rabbitmq_cluster):
time.sleep(30)
try:
exists = channel.queue_declare(
callback, queue="rabbit_queue_drop", passive=True
)
exists = channel.queue_declare(queue="rabbit_queue_drop", passive=True)
except Exception as e:
exists = False
@ -3364,7 +3362,7 @@ def test_rabbitmq_flush_by_block_size(rabbitmq_cluster):
routing_key="",
body=json.dumps({"key": 0, "value": 0}),
)
except e:
except Exception as e:
logging.debug(f"Got error: {str(e)}")
produce_thread = threading.Thread(target=produce)
@ -3442,7 +3440,7 @@ def test_rabbitmq_flush_by_time(rabbitmq_cluster):
)
logging.debug("Produced a message")
time.sleep(0.8)
except e:
except Exception as e:
logging.debug(f"Got error: {str(e)}")
produce_thread = threading.Thread(target=produce)

View File

@ -1850,7 +1850,7 @@ class TestCancelBackgroundMoving:
config = inspect.cleandoc(
f"""
<clickhouse>
<max_local_write_bandwidth_for_server>{ 256 * 1024 }</max_local_write_bandwidth_for_server>
<max_local_write_bandwidth_for_server>{256 * 1024}</max_local_write_bandwidth_for_server>
</clickhouse>
"""
)

View File

@ -325,7 +325,7 @@ def optimize_with_retry(node, table_name, retry=20):
settings={"optimize_throw_if_noop": "1"},
)
break
except e:
except:
time.sleep(0.5)