mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 23:52:03 +00:00
ENDPOINT_SUBPATH is unique for each instance in test_s3_plain_rewritable
This commit is contained in:
parent
d4af1f3464
commit
b11b9515a2
@ -9,18 +9,6 @@ cluster = ClickHouseCluster(__file__)
|
||||
|
||||
NUM_WORKERS = 5
|
||||
|
||||
nodes = []
|
||||
for i in range(NUM_WORKERS):
|
||||
name = "node{}".format(i + 1)
|
||||
node = cluster.add_instance(
|
||||
name,
|
||||
main_configs=["configs/storage_conf.xml"],
|
||||
env_variables={"ENDPOINT_SUBPATH": name},
|
||||
with_minio=True,
|
||||
stay_alive=True,
|
||||
)
|
||||
nodes.append(node)
|
||||
|
||||
MAX_ROWS = 1000
|
||||
|
||||
|
||||
@ -38,6 +26,17 @@ insert_values = ",".join(
|
||||
|
||||
@pytest.fixture(scope="module", autouse=True)
|
||||
def start_cluster():
|
||||
for i in range(NUM_WORKERS):
|
||||
cluster.add_instance(
|
||||
f"node{i + 1}",
|
||||
main_configs=["configs/storage_conf.xml"],
|
||||
with_minio=True,
|
||||
env_variables={"ENDPOINT_SUBPATH": f"node{i + 1}"},
|
||||
stay_alive=True,
|
||||
# Override ENDPOINT_SUBPATH.
|
||||
instance_env_variables=i > 0,
|
||||
)
|
||||
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
@ -64,10 +63,10 @@ def test_insert():
|
||||
gen_insert_values(random.randint(1, MAX_ROWS)) for _ in range(0, NUM_WORKERS)
|
||||
]
|
||||
threads = []
|
||||
assert len(cluster.instances) == NUM_WORKERS
|
||||
for i in range(NUM_WORKERS):
|
||||
t = threading.Thread(
|
||||
target=create_insert, args=(nodes[i], insert_values_arr[i])
|
||||
)
|
||||
node = cluster.instances[f"node{i + 1}"]
|
||||
t = threading.Thread(target=create_insert, args=(node, insert_values_arr[i]))
|
||||
threads.append(t)
|
||||
t.start()
|
||||
|
||||
@ -75,38 +74,34 @@ def test_insert():
|
||||
t.join()
|
||||
|
||||
for i in range(NUM_WORKERS):
|
||||
node = cluster.instances[f"node{i + 1}"]
|
||||
assert (
|
||||
nodes[i].query("SELECT * FROM test ORDER BY id FORMAT Values")
|
||||
node.query("SELECT * FROM test ORDER BY id FORMAT Values")
|
||||
== insert_values_arr[i]
|
||||
)
|
||||
|
||||
for i in range(NUM_WORKERS):
|
||||
nodes[i].query("ALTER TABLE test MODIFY SETTING old_parts_lifetime = 59")
|
||||
node = cluster.instances[f"node{i + 1}"]
|
||||
node.query("ALTER TABLE test MODIFY SETTING old_parts_lifetime = 59")
|
||||
assert (
|
||||
nodes[i]
|
||||
.query(
|
||||
node.query(
|
||||
"SELECT engine_full from system.tables WHERE database = currentDatabase() AND name = 'test'"
|
||||
)
|
||||
.find("old_parts_lifetime = 59")
|
||||
).find("old_parts_lifetime = 59")
|
||||
!= -1
|
||||
)
|
||||
|
||||
nodes[i].query("ALTER TABLE test RESET SETTING old_parts_lifetime")
|
||||
node.query("ALTER TABLE test RESET SETTING old_parts_lifetime")
|
||||
assert (
|
||||
nodes[i]
|
||||
.query(
|
||||
node.query(
|
||||
"SELECT engine_full from system.tables WHERE database = currentDatabase() AND name = 'test'"
|
||||
)
|
||||
.find("old_parts_lifetime")
|
||||
).find("old_parts_lifetime")
|
||||
== -1
|
||||
)
|
||||
nodes[i].query("ALTER TABLE test MODIFY COMMENT 'new description'")
|
||||
node.query("ALTER TABLE test MODIFY COMMENT 'new description'")
|
||||
assert (
|
||||
nodes[i]
|
||||
.query(
|
||||
node.query(
|
||||
"SELECT comment from system.tables WHERE database = currentDatabase() AND name = 'test'"
|
||||
)
|
||||
.find("new description")
|
||||
).find("new description")
|
||||
!= -1
|
||||
)
|
||||
|
||||
@ -115,8 +110,9 @@ def test_insert():
|
||||
def test_restart():
|
||||
insert_values_arr = []
|
||||
for i in range(NUM_WORKERS):
|
||||
node = cluster.instances[f"node{i + 1}"]
|
||||
insert_values_arr.append(
|
||||
nodes[i].query("SELECT * FROM test ORDER BY id FORMAT Values")
|
||||
node.query("SELECT * FROM test ORDER BY id FORMAT Values")
|
||||
)
|
||||
|
||||
def restart(node):
|
||||
@ -124,7 +120,7 @@ def test_restart():
|
||||
|
||||
threads = []
|
||||
for i in range(NUM_WORKERS):
|
||||
t = threading.Thread(target=restart, args=(nodes[i],))
|
||||
t = threading.Thread(target=restart, args=(node,))
|
||||
threads.append(t)
|
||||
t.start()
|
||||
|
||||
@ -132,8 +128,9 @@ def test_restart():
|
||||
t.join()
|
||||
|
||||
for i in range(NUM_WORKERS):
|
||||
node = cluster.instances[f"node{i + 1}"]
|
||||
assert (
|
||||
nodes[i].query("SELECT * FROM test ORDER BY id FORMAT Values")
|
||||
node.query("SELECT * FROM test ORDER BY id FORMAT Values")
|
||||
== insert_values_arr[i]
|
||||
)
|
||||
|
||||
@ -141,7 +138,8 @@ def test_restart():
|
||||
@pytest.mark.order(2)
|
||||
def test_drop():
|
||||
for i in range(NUM_WORKERS):
|
||||
nodes[i].query("DROP TABLE IF EXISTS test SYNC")
|
||||
node = cluster.instances[f"node{i + 1}"]
|
||||
node.query("DROP TABLE IF EXISTS test SYNC")
|
||||
|
||||
it = cluster.minio_client.list_objects(
|
||||
cluster.minio_bucket, "data/", recursive=True
|
||||
|
Loading…
Reference in New Issue
Block a user