mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-09 17:14:47 +00:00
Automatic style fix
This commit is contained in:
parent
b46ea5572d
commit
e46ec88ccb
@ -26,6 +26,7 @@ logging.basicConfig(
|
||||
total_start_seconds = time.perf_counter()
|
||||
stage_start_seconds = total_start_seconds
|
||||
|
||||
|
||||
# Thread executor that does not hides exception that happens during function
|
||||
# execution, and rethrows it after join()
|
||||
class SafeThread(Thread):
|
||||
@ -158,6 +159,7 @@ for e in subst_elems:
|
||||
|
||||
available_parameters[name] = values
|
||||
|
||||
|
||||
# Takes parallel lists of templates, substitutes them with all combos of
|
||||
# parameters. The set of parameters is determined based on the first list.
|
||||
# Note: keep the order of queries -- sometimes we have DROP IF EXISTS
|
||||
|
@ -670,7 +670,6 @@ if args.report == "main":
|
||||
)
|
||||
|
||||
elif args.report == "all-queries":
|
||||
|
||||
print((header_template.format()))
|
||||
|
||||
add_tested_commits()
|
||||
|
@ -141,7 +141,6 @@ def prepare_tests_results_for_clickhouse(
|
||||
report_url: str,
|
||||
check_name: str,
|
||||
) -> List[dict]:
|
||||
|
||||
pull_request_url = "https://github.com/ClickHouse/ClickHouse/commits/master"
|
||||
base_ref = "master"
|
||||
head_ref = "master"
|
||||
|
@ -96,7 +96,6 @@ def get_images_dict(repo_path: str, image_file_path: str) -> ImagesDict:
|
||||
def get_changed_docker_images(
|
||||
pr_info: PRInfo, images_dict: ImagesDict
|
||||
) -> Set[DockerImage]:
|
||||
|
||||
if not images_dict:
|
||||
return set()
|
||||
|
||||
|
@ -51,7 +51,6 @@ def find_previous_release(
|
||||
|
||||
for release in releases:
|
||||
if release.version < server_version:
|
||||
|
||||
# Check if the artifact exists on GitHub.
|
||||
# It can be not true for a short period of time
|
||||
# after creating a tag for a new release before uploading the packages.
|
||||
|
@ -473,7 +473,7 @@ def create_build_html_report(
|
||||
commit_url: str,
|
||||
) -> str:
|
||||
rows = ""
|
||||
for (build_result, build_log_url, artifact_urls) in zip(
|
||||
for build_result, build_log_url, artifact_urls in zip(
|
||||
build_results, build_logs_urls, artifact_urls_list
|
||||
):
|
||||
row = "<tr>"
|
||||
|
@ -63,6 +63,7 @@ DEFAULT_ENV_NAME = ".env"
|
||||
|
||||
SANITIZER_SIGN = "=================="
|
||||
|
||||
|
||||
# to create docker-compose env file
|
||||
def _create_env_file(path, variables):
|
||||
logging.debug(f"Env {variables} stored in {path}")
|
||||
@ -1454,7 +1455,6 @@ class ClickHouseCluster:
|
||||
config_root_name="clickhouse",
|
||||
extra_configs=[],
|
||||
) -> "ClickHouseInstance":
|
||||
|
||||
"""Add an instance to the cluster.
|
||||
|
||||
name - the name of the instance directory and the value of the 'instance' macro in ClickHouse.
|
||||
@ -3089,7 +3089,6 @@ class ClickHouseInstance:
|
||||
config_root_name="clickhouse",
|
||||
extra_configs=[],
|
||||
):
|
||||
|
||||
self.name = name
|
||||
self.base_cmd = cluster.base_cmd
|
||||
self.docker_id = cluster.get_instance_docker_id(self.name)
|
||||
|
@ -216,7 +216,6 @@ class _NetworkManager:
|
||||
container_exit_timeout=60,
|
||||
docker_api_version=os.environ.get("DOCKER_API_VERSION"),
|
||||
):
|
||||
|
||||
self.container_expire_timeout = container_expire_timeout
|
||||
self.container_exit_timeout = container_exit_timeout
|
||||
|
||||
@ -232,7 +231,6 @@ class _NetworkManager:
|
||||
|
||||
def _ensure_container(self):
|
||||
if self._container is None or self._container_expire_time <= time.time():
|
||||
|
||||
for i in range(5):
|
||||
if self._container is not None:
|
||||
try:
|
||||
|
@ -1,6 +1,7 @@
|
||||
import logging
|
||||
import os.path
|
||||
|
||||
|
||||
# Makes the parallel workers of pytest-xdist to log to separate files.
|
||||
# Without this function all workers will log to the same log file
|
||||
# and mix everything together making it much more difficult for troubleshooting.
|
||||
|
@ -24,7 +24,6 @@ def start_cluster():
|
||||
|
||||
|
||||
def test_detach_part_wrong_partition_id(start_cluster):
|
||||
|
||||
# Here we create table with partition by UUID.
|
||||
node_21_6.query(
|
||||
"create table tab (id UUID, value UInt32) engine = MergeTree PARTITION BY (id) order by tuple()"
|
||||
|
@ -19,7 +19,6 @@ cluster = ClickHouseCluster(__file__)
|
||||
def started_cluster():
|
||||
global cluster
|
||||
try:
|
||||
|
||||
for name in ["first", "second", "third"]:
|
||||
cluster.add_instance(
|
||||
name,
|
||||
|
@ -19,7 +19,6 @@ cluster = ClickHouseCluster(__file__)
|
||||
def started_cluster():
|
||||
global cluster
|
||||
try:
|
||||
|
||||
for name in ["first_of_two", "second_of_two"]:
|
||||
instance = cluster.add_instance(
|
||||
name,
|
||||
|
@ -63,7 +63,6 @@ def netcat(hostname, port, content):
|
||||
|
||||
|
||||
def test_connections():
|
||||
|
||||
client = Client(server.ip_address, 9000, command=cluster.client_bin_path)
|
||||
assert client.query("SELECT 1") == "1\n"
|
||||
|
||||
|
@ -25,7 +25,6 @@ def start_cluster():
|
||||
|
||||
|
||||
def test_create_query_const_constraints():
|
||||
|
||||
instance.query("CREATE USER u_const SETTINGS max_threads = 1 CONST")
|
||||
instance.query("GRANT ALL ON *.* TO u_const")
|
||||
|
||||
@ -57,7 +56,6 @@ def test_create_query_const_constraints():
|
||||
|
||||
|
||||
def test_create_query_minmax_constraints():
|
||||
|
||||
instance.query("CREATE USER u_minmax SETTINGS max_threads = 4 MIN 2 MAX 6")
|
||||
instance.query("GRANT ALL ON *.* TO u_minmax")
|
||||
|
||||
|
@ -348,7 +348,6 @@ class RangedLayoutTester(BaseLayoutTester):
|
||||
self.layouts = LAYOUTS_RANGED
|
||||
|
||||
def execute(self, layout_name, node):
|
||||
|
||||
if layout_name not in self.layout_to_dictionary:
|
||||
raise RuntimeError("Source doesn't support layout: {}".format(layout_name))
|
||||
|
||||
|
@ -7,7 +7,6 @@ import pytest
|
||||
def started_cluster():
|
||||
global cluster
|
||||
try:
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
cluster.add_instance(
|
||||
"disks_app_test", main_configs=["config.xml"], with_minio=True
|
||||
|
@ -10,6 +10,7 @@ from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
|
||||
# By default the exceptions that was throwed in threads will be ignored
|
||||
# (they will not mark the test as failed, only printed to stderr).
|
||||
#
|
||||
|
@ -18,7 +18,6 @@ def started_cluster():
|
||||
|
||||
|
||||
def test_huge_column(started_cluster):
|
||||
|
||||
if (
|
||||
node.is_built_with_thread_sanitizer()
|
||||
or node.is_built_with_memory_sanitizer()
|
||||
|
@ -13,7 +13,6 @@ number_of_iterations = 100
|
||||
|
||||
|
||||
def perform_request():
|
||||
|
||||
buffer = BytesIO()
|
||||
crl = pycurl.Curl()
|
||||
crl.setopt(pycurl.INTERFACE, client_ip)
|
||||
|
@ -45,7 +45,6 @@ def start_cluster():
|
||||
|
||||
|
||||
def check_balance(node, table):
|
||||
|
||||
partitions = node.query(
|
||||
"""
|
||||
WITH
|
||||
|
@ -10,6 +10,7 @@ node1 = cluster.add_instance(
|
||||
"node1", main_configs=["configs/keeper.xml"], stay_alive=True
|
||||
)
|
||||
|
||||
|
||||
# test that server is able to start
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
|
@ -546,7 +546,6 @@ def test_random_requests(started_cluster):
|
||||
|
||||
|
||||
def test_end_of_session(started_cluster):
|
||||
|
||||
fake_zk1 = None
|
||||
fake_zk2 = None
|
||||
genuine_zk1 = None
|
||||
@ -685,6 +684,7 @@ def test_concurrent_watches(started_cluster):
|
||||
nonlocal watches_created
|
||||
nonlocal all_paths_created
|
||||
fake_zk.ensure_path(global_path + "/" + str(i))
|
||||
|
||||
# new function each time
|
||||
def dumb_watch(event):
|
||||
nonlocal dumb_watch_triggered_counter
|
||||
|
@ -163,7 +163,6 @@ def test_state_duplicate_restart(started_cluster):
|
||||
|
||||
# http://zookeeper-user.578899.n2.nabble.com/Why-are-ephemeral-nodes-written-to-disk-tp7583403p7583418.html
|
||||
def test_ephemeral_after_restart(started_cluster):
|
||||
|
||||
try:
|
||||
node_zk = None
|
||||
node_zk2 = None
|
||||
|
@ -114,7 +114,6 @@ def start_clickhouse():
|
||||
|
||||
|
||||
def copy_zookeeper_data(make_zk_snapshots):
|
||||
|
||||
if make_zk_snapshots: # force zookeeper to create snapshot
|
||||
generate_zk_snapshot()
|
||||
else:
|
||||
|
@ -148,17 +148,17 @@ def test_merge_tree_load_parts_corrupted(started_cluster):
|
||||
node1.query("SYSTEM WAIT LOADING PARTS mt_load_parts_2")
|
||||
|
||||
def check_parts_loading(node, partition, loaded, failed, skipped):
|
||||
for (min_block, max_block) in loaded:
|
||||
for min_block, max_block in loaded:
|
||||
part_name = f"{partition}_{min_block}_{max_block}"
|
||||
assert node.contains_in_log(f"Loading Active part {part_name}")
|
||||
assert node.contains_in_log(f"Finished loading Active part {part_name}")
|
||||
|
||||
for (min_block, max_block) in failed:
|
||||
for min_block, max_block in failed:
|
||||
part_name = f"{partition}_{min_block}_{max_block}"
|
||||
assert node.contains_in_log(f"Loading Active part {part_name}")
|
||||
assert not node.contains_in_log(f"Finished loading Active part {part_name}")
|
||||
|
||||
for (min_block, max_block) in skipped:
|
||||
for min_block, max_block in skipped:
|
||||
part_name = f"{partition}_{min_block}_{max_block}"
|
||||
assert not node.contains_in_log(f"Loading Active part {part_name}")
|
||||
assert not node.contains_in_log(f"Finished loading Active part {part_name}")
|
||||
|
@ -42,7 +42,6 @@ def delete(_bucket):
|
||||
|
||||
@route("/<_bucket>/<_path:path>", ["GET", "POST", "PUT", "DELETE"])
|
||||
def server(_bucket, _path):
|
||||
|
||||
# It's delete query for failed part
|
||||
if _path.endswith("delete"):
|
||||
response.set_header("Location", "http://minio1:9001/" + _bucket + "/" + _path)
|
||||
|
@ -20,7 +20,6 @@ def start_cluster():
|
||||
|
||||
|
||||
def test_merge_tree_settings_constraints():
|
||||
|
||||
assert "Setting storage_policy should not be changed" in instance.query_and_get_error(
|
||||
f"CREATE TABLE wrong_table (number Int64) engine = MergeTree() ORDER BY number SETTINGS storage_policy = 'secret_policy'"
|
||||
)
|
||||
|
@ -63,7 +63,6 @@ def test_part_finally_removed(started_cluster):
|
||||
)
|
||||
|
||||
for i in range(60):
|
||||
|
||||
if (
|
||||
node1.query(
|
||||
"SELECT count() from system.parts WHERE table = 'drop_outdated_part'"
|
||||
|
@ -528,7 +528,9 @@ def test_make_clone_in_detached(started_cluster):
|
||||
["cp", "-r", path + "all_0_0_0", path + "detached/broken_all_0_0_0"]
|
||||
)
|
||||
assert_eq_with_retry(instance, "select * from clone_in_detached", "\n")
|
||||
assert ["broken_all_0_0_0",] == sorted(
|
||||
assert [
|
||||
"broken_all_0_0_0",
|
||||
] == sorted(
|
||||
instance.exec_in_container(["ls", path + "detached/"]).strip().split("\n")
|
||||
)
|
||||
|
||||
|
@ -17,7 +17,6 @@ def start_cluster():
|
||||
|
||||
|
||||
def test_complexity_rules(start_cluster):
|
||||
|
||||
error_message = "DB::Exception: Invalid password. The password should: be at least 12 characters long, contain at least 1 numeric character, contain at least 1 lowercase character, contain at least 1 uppercase character, contain at least 1 special character"
|
||||
assert error_message in node.query_and_get_error(
|
||||
"CREATE USER u_1 IDENTIFIED WITH plaintext_password BY ''"
|
||||
|
@ -49,7 +49,6 @@ def start_cluster():
|
||||
|
||||
|
||||
def test_restart_zookeeper(start_cluster):
|
||||
|
||||
for table_id in range(NUM_TABLES):
|
||||
node1.query(
|
||||
f"INSERT INTO test_table_{table_id} VALUES (1), (2), (3), (4), (5);"
|
||||
|
@ -20,7 +20,6 @@ def start_cluster():
|
||||
|
||||
|
||||
def test_reload_auxiliary_zookeepers(start_cluster):
|
||||
|
||||
node.query(
|
||||
"CREATE TABLE simple (date Date, id UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', 'node') ORDER BY tuple() PARTITION BY date;"
|
||||
)
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
from bottle import request, route, run, response
|
||||
|
||||
|
||||
# Handle for MultipleObjectsDelete.
|
||||
@route("/<_bucket>", ["POST"])
|
||||
def delete(_bucket):
|
||||
|
@ -5,6 +5,7 @@ import time
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
|
||||
# Runs simple proxy resolver in python env container.
|
||||
def run_resolver(cluster):
|
||||
container_id = cluster.get_container_id("resolver")
|
||||
|
@ -87,7 +87,6 @@ config = """<clickhouse>
|
||||
|
||||
|
||||
def execute_query_native(node, query, user, cert_name):
|
||||
|
||||
config_path = f"{SCRIPT_DIR}/configs/client.xml"
|
||||
|
||||
formatted = config.format(
|
||||
|
@ -21,7 +21,6 @@ _builder.BuildTopDescriptorsAndMessages(
|
||||
DESCRIPTOR, "clickhouse_path.format_schemas.kafka_pb2", globals()
|
||||
)
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
_KEYVALUEPAIR._serialized_start = 46
|
||||
_KEYVALUEPAIR._serialized_end = 88
|
||||
|
@ -21,7 +21,6 @@ _builder.BuildTopDescriptorsAndMessages(
|
||||
DESCRIPTOR, "clickhouse_path.format_schemas.message_with_repeated_pb2", globals()
|
||||
)
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
DESCRIPTOR._serialized_options = b"H\001"
|
||||
_MESSAGE._serialized_start = 62
|
||||
|
@ -21,7 +21,6 @@ _builder.BuildTopDescriptorsAndMessages(
|
||||
DESCRIPTOR, "clickhouse_path.format_schemas.social_pb2", globals()
|
||||
)
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
_USER._serialized_start = 47
|
||||
_USER._serialized_end = 90
|
||||
|
@ -121,7 +121,7 @@ def kafka_create_topic(
|
||||
|
||||
def kafka_delete_topic(admin_client, topic, max_retries=50):
|
||||
result = admin_client.delete_topics([topic])
|
||||
for (topic, e) in result.topic_error_codes:
|
||||
for topic, e in result.topic_error_codes:
|
||||
if e == 0:
|
||||
logging.debug(f"Topic {topic} deleted")
|
||||
else:
|
||||
@ -917,9 +917,7 @@ def describe_consumer_group(kafka_cluster, name):
|
||||
member_info["client_id"] = client_id
|
||||
member_info["client_host"] = client_host
|
||||
member_topics_assignment = []
|
||||
for (topic, partitions) in MemberAssignment.decode(
|
||||
member_assignment
|
||||
).assignment:
|
||||
for topic, partitions in MemberAssignment.decode(member_assignment).assignment:
|
||||
member_topics_assignment.append({"topic": topic, "partitions": partitions})
|
||||
member_info["assignment"] = member_topics_assignment
|
||||
res.append(member_info)
|
||||
@ -1537,7 +1535,6 @@ def test_kafka_protobuf_no_delimiter(kafka_cluster):
|
||||
|
||||
|
||||
def test_kafka_materialized_view(kafka_cluster):
|
||||
|
||||
instance.query(
|
||||
"""
|
||||
DROP TABLE IF EXISTS test.view;
|
||||
@ -2315,7 +2312,6 @@ def test_kafka_virtual_columns2(kafka_cluster):
|
||||
|
||||
|
||||
def test_kafka_produce_key_timestamp(kafka_cluster):
|
||||
|
||||
admin_client = KafkaAdminClient(
|
||||
bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)
|
||||
)
|
||||
@ -2444,7 +2440,6 @@ def test_kafka_insert_avro(kafka_cluster):
|
||||
|
||||
|
||||
def test_kafka_produce_consume_avro(kafka_cluster):
|
||||
|
||||
admin_client = KafkaAdminClient(
|
||||
bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port)
|
||||
)
|
||||
@ -4031,7 +4026,6 @@ def test_kafka_predefined_configuration(kafka_cluster):
|
||||
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/26643
|
||||
def test_issue26643(kafka_cluster):
|
||||
|
||||
# for backporting:
|
||||
# admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
|
||||
admin_client = KafkaAdminClient(
|
||||
@ -4313,7 +4307,6 @@ def test_row_based_formats(kafka_cluster):
|
||||
"RowBinaryWithNamesAndTypes",
|
||||
"MsgPack",
|
||||
]:
|
||||
|
||||
print(format_name)
|
||||
|
||||
kafka_create_topic(admin_client, format_name)
|
||||
@ -4438,7 +4431,6 @@ def test_block_based_formats_2(kafka_cluster):
|
||||
"ORC",
|
||||
"JSONCompactColumns",
|
||||
]:
|
||||
|
||||
kafka_create_topic(admin_client, format_name)
|
||||
|
||||
instance.query(
|
||||
|
@ -31,7 +31,6 @@ ProtoKeyValue = _reflection.GeneratedProtocolMessageType(
|
||||
_sym_db.RegisterMessage(ProtoKeyValue)
|
||||
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
_PROTOKEYVALUE._serialized_start = 45
|
||||
_PROTOKEYVALUE._serialized_end = 88
|
||||
|
@ -706,7 +706,6 @@ def test_abrupt_connection_loss_while_heavy_replication(started_cluster):
|
||||
|
||||
|
||||
def test_abrupt_server_restart_while_heavy_replication(started_cluster):
|
||||
|
||||
# FIXME (kssenii) temporary disabled
|
||||
if instance.is_built_with_sanitizer():
|
||||
pytest.skip("Temporary disabled (FIXME)")
|
||||
|
@ -21,7 +21,6 @@ _builder.BuildTopDescriptorsAndMessages(
|
||||
DESCRIPTOR, "clickhouse_path.format_schemas.rabbitmq_pb2", globals()
|
||||
)
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
_KEYVALUEPROTO._serialized_start = 49
|
||||
_KEYVALUEPROTO._serialized_end = 92
|
||||
|
@ -2864,7 +2864,6 @@ def test_rabbitmq_predefined_configuration(rabbitmq_cluster):
|
||||
|
||||
|
||||
def test_rabbitmq_msgpack(rabbitmq_cluster):
|
||||
|
||||
instance.query(
|
||||
"""
|
||||
drop table if exists rabbit_in;
|
||||
@ -2908,7 +2907,6 @@ def test_rabbitmq_msgpack(rabbitmq_cluster):
|
||||
|
||||
|
||||
def test_rabbitmq_address(rabbitmq_cluster):
|
||||
|
||||
instance2.query(
|
||||
"""
|
||||
drop table if exists rabbit_in;
|
||||
@ -3243,7 +3241,6 @@ def test_block_based_formats_2(rabbitmq_cluster):
|
||||
"ORC",
|
||||
"JSONCompactColumns",
|
||||
]:
|
||||
|
||||
print(format_name)
|
||||
|
||||
instance.query(
|
||||
|
@ -18,6 +18,7 @@ MINIO_INTERNAL_PORT = 9001
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
||||
# Creates S3 bucket for tests and allows anonymous read-write access to it.
|
||||
def prepare_s3_bucket(started_cluster):
|
||||
# Allows read-write access for bucket without authorization.
|
||||
|
@ -11,6 +11,7 @@ MINIO_INTERNAL_PORT = 9001
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
||||
# Creates S3 bucket for tests and allows anonymous read-write access to it.
|
||||
def prepare_s3_bucket(started_cluster):
|
||||
# Allows read-write access for bucket without authorization.
|
||||
|
@ -171,7 +171,6 @@ def test_mutation_simple(started_cluster, replicated):
|
||||
starting_block = 0 if replicated else 1
|
||||
|
||||
try:
|
||||
|
||||
for node in nodes:
|
||||
node.query(
|
||||
f"create table {name} (a Int64) engine={engine} order by tuple()"
|
||||
|
@ -1863,7 +1863,7 @@ def test_ttl_move_if_exists(started_cluster, name, dest_type):
|
||||
)
|
||||
)
|
||||
|
||||
for (node, policy) in zip(
|
||||
for node, policy in zip(
|
||||
[node1, node2], ["only_jbod_1", "small_jbod_with_external"]
|
||||
):
|
||||
node.query(
|
||||
|
@ -16,7 +16,6 @@ cluster = ClickHouseCluster(__file__)
|
||||
@pytest.fixture(scope="module")
|
||||
def started_cluster():
|
||||
try:
|
||||
|
||||
cluster.add_instance(
|
||||
"node1",
|
||||
main_configs=["configs/storage_conf.xml"],
|
||||
|
@ -20,6 +20,7 @@ parser.add_argument(
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
# This function mirrors the PR description checks in ClickhousePullRequestTrigger.
|
||||
# Returns False if the PR should not be mentioned changelog.
|
||||
def parse_one_pull_request(item):
|
||||
|
@ -166,7 +166,7 @@ def main(args):
|
||||
keeper_bench_path = args.keeper_bench_path
|
||||
|
||||
keepers = []
|
||||
for (port, server_id) in zip(PORTS, SERVER_IDS):
|
||||
for port, server_id in zip(PORTS, SERVER_IDS):
|
||||
keepers.append(
|
||||
Keeper(
|
||||
keeper_binary_path, server_id, port, workdir, args.with_thread_fuzzer
|
||||
|
Loading…
Reference in New Issue
Block a user