mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
wip
This commit is contained in:
parent
14c14dcd00
commit
ad2376c03b
@ -87,14 +87,16 @@ def assert_logs_contain_with_retry(instance, substring, retry_count=20, sleep_ti
|
||||
|
||||
def exec_query_with_retry(instance, query, retry_count=40, sleep_time=0.5, silent=False, settings={}):
|
||||
exception = None
|
||||
for _ in range(retry_count):
|
||||
for cnt in range(retry_count):
|
||||
try:
|
||||
instance.query(query, timeout=30, settings=settings)
|
||||
res = instance.query(query, timeout=30, settings=settings)
|
||||
if not silent:
|
||||
logging.debug(f"Result of {query} on {cnt} try is {res}")
|
||||
break
|
||||
except Exception as ex:
|
||||
exception = ex
|
||||
if not silent:
|
||||
logging.exception(f"Failed to execute query '{query}' on instance '{instance.name}' will retry")
|
||||
logging.exception(f"Failed to execute query '{query}' on {cnt} try on instance '{instance.name}' will retry")
|
||||
time.sleep(sleep_time)
|
||||
else:
|
||||
raise exception
|
||||
|
@ -155,7 +155,7 @@ class Task2:
|
||||
assert TSV(self.cluster.instances['s0_0_0'].query("SELECT count() FROM cluster(cluster0, default, a)")) == TSV(
|
||||
"85\n")
|
||||
assert TSV(self.cluster.instances['s1_0_0'].query(
|
||||
"SELECT count(), uniqExact(date) FROM cluster(cluster1, default, b)")) == TSV("85\t85\n")
|
||||
"SELECT count(), uniqExact(date) FROM cluster(cluster1, default, b)")) == TSV("test_copy_month_to_week_partition_with_recovering\t85\n")
|
||||
|
||||
assert TSV(self.cluster.instances['s1_0_0'].query(
|
||||
"SELECT DISTINCT jumpConsistentHash(intHash64(d), 2) FROM b")) == TSV("0\n")
|
||||
|
@ -1,7 +1,8 @@
|
||||
import random
|
||||
import string
|
||||
|
||||
import logging
|
||||
import pytest
|
||||
import time
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
@ -130,6 +131,9 @@ def test_default_codec_single(start_cluster):
|
||||
assert node1.query("SELECT COUNT() FROM compression_table") == "3\n"
|
||||
assert node2.query("SELECT COUNT() FROM compression_table") == "3\n"
|
||||
|
||||
node1.query("DROP TABLE compression_table SYNC")
|
||||
node2.query("DROP TABLE compression_table SYNC")
|
||||
|
||||
|
||||
def test_default_codec_multiple(start_cluster):
|
||||
for i, node in enumerate([node1, node2]):
|
||||
@ -199,6 +203,9 @@ def test_default_codec_multiple(start_cluster):
|
||||
assert node1.query("SELECT COUNT() FROM compression_table_multiple") == "3\n"
|
||||
assert node2.query("SELECT COUNT() FROM compression_table_multiple") == "3\n"
|
||||
|
||||
node1.query("DROP TABLE compression_table_multiple SYNC")
|
||||
node2.query("DROP TABLE compression_table_multiple SYNC")
|
||||
|
||||
|
||||
def test_default_codec_version_update(start_cluster):
|
||||
node3.query("""
|
||||
@ -212,8 +219,10 @@ def test_default_codec_version_update(start_cluster):
|
||||
node3.query("INSERT INTO compression_table VALUES (2, '{}')".format(get_random_string(2048)))
|
||||
node3.query("INSERT INTO compression_table VALUES (3, '{}')".format(get_random_string(22048)))
|
||||
|
||||
old_version = node3.query("SELECT version()")
|
||||
node3.restart_with_latest_version()
|
||||
|
||||
new_version = node3.query("SELECT version()")
|
||||
logging.debug(f"Updated from {old_version} to {new_version}")
|
||||
assert node3.query(
|
||||
"SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_1_1_0'") == "ZSTD(1)\n"
|
||||
assert node3.query(
|
||||
@ -230,6 +239,16 @@ def test_default_codec_version_update(start_cluster):
|
||||
assert node3.query(
|
||||
"SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_3_3_1'") == "LZ4\n"
|
||||
|
||||
node3.query("DROP TABLE compression_table SYNC")
|
||||
|
||||
def callback(n):
|
||||
n.exec_in_container(['bash', '-c', 'rm -rf /var/lib/clickhouse/metadata/system /var/lib/clickhouse/data/system '], user='root')
|
||||
node3.restart_with_original_version(callback_onstop=callback)
|
||||
|
||||
cur_version = node3.query("SELECT version()")
|
||||
logging.debug(f"End with {cur_version}")
|
||||
|
||||
|
||||
def test_default_codec_for_compact_parts(start_cluster):
|
||||
node4.query("""
|
||||
CREATE TABLE compact_parts_table (
|
||||
@ -254,3 +273,4 @@ def test_default_codec_for_compact_parts(start_cluster):
|
||||
node4.query("ATTACH TABLE compact_parts_table")
|
||||
|
||||
assert node4.query("SELECT COUNT() FROM compact_parts_table") == "1\n"
|
||||
node4.query("DROP TABLE compact_parts_table SYNC")
|
||||
|
2
tests/integration/test_dictionaries_all_layouts_separate_sources/configs/.gitignore
vendored
Normal file
2
tests/integration/test_dictionaries_all_layouts_separate_sources/configs/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
dictionaries/*
|
||||
!.gitignore
|
@ -4,7 +4,6 @@ import time
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.cluster import ClickHouseKiller
|
||||
from helpers.network import PartitionManager
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
|
@ -51,7 +51,7 @@ def test_default_reading(started_cluster):
|
||||
test_helper()
|
||||
|
||||
with PartitionManager() as pm, ClickHouseKiller(dictionary_node):
|
||||
assert None == dictionary_node.get_process_pid("clickhouse"), "CLickHouse must be alive"
|
||||
assert None == dictionary_node.get_process_pid("clickhouse"), "ClickHouse must be alive"
|
||||
|
||||
# Remove connection between main_node and dictionary for sure
|
||||
pm.heal_all()
|
||||
|
@ -1,7 +1,6 @@
|
||||
import os
|
||||
import pwd
|
||||
import re
|
||||
import logging
|
||||
import pytest
|
||||
from helpers.cluster import ClickHouseCluster, CLICKHOUSE_START_COMMAND
|
||||
|
||||
|
@ -37,6 +37,7 @@ def started_cluster():
|
||||
cluster.shutdown()
|
||||
|
||||
def test_create_replicated_table(started_cluster):
|
||||
main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")
|
||||
assert "Explicit zookeeper_path and replica_name are specified" in \
|
||||
main_node.query_and_get_error("CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) "
|
||||
"ENGINE=ReplicatedMergeTree('/test/tmp', 'r') ORDER BY k PARTITION BY toYYYYMM(d);")
|
||||
@ -57,9 +58,11 @@ def test_create_replicated_table(started_cluster):
|
||||
assert_create_query([main_node, dummy_node], "testdb.replicated_table", expected)
|
||||
# assert without replacing uuid
|
||||
assert main_node.query("show create testdb.replicated_table") == dummy_node.query("show create testdb.replicated_table")
|
||||
main_node.query("DROP DATABASE testdb SYNC")
|
||||
|
||||
@pytest.mark.parametrize("engine", ['MergeTree', 'ReplicatedMergeTree'])
|
||||
def test_simple_alter_table(started_cluster, engine):
|
||||
main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")
|
||||
# test_simple_alter_table
|
||||
name = "testdb.alter_test_{}".format(engine)
|
||||
main_node.query("CREATE TABLE {} "
|
||||
@ -100,12 +103,11 @@ def test_simple_alter_table(started_cluster, engine):
|
||||
"SETTINGS index_granularity = 8192".format(name, full_engine)
|
||||
|
||||
assert_create_query([main_node, dummy_node, competing_node], name, expected)
|
||||
|
||||
main_node.query("DROP DATABASE testdb SYNC")
|
||||
|
||||
def get_table_uuid(database, name):
|
||||
return main_node.query(f"SELECT uuid FROM system.tables WHERE database = '{database}' and name = '{name}'").strip()
|
||||
|
||||
|
||||
@pytest.fixture(scope="module", name="attachable_part")
|
||||
def fixture_attachable_part(started_cluster):
|
||||
main_node.query(f"CREATE DATABASE testdb_attach_atomic ENGINE = Atomic")
|
||||
@ -115,10 +117,11 @@ def fixture_attachable_part(started_cluster):
|
||||
table_uuid = get_table_uuid("testdb_attach_atomic", "test")
|
||||
return os.path.join(main_node.path, f"database/shadow/test_attach/store/{table_uuid[:3]}/{table_uuid}/all_1_1_0")
|
||||
|
||||
|
||||
|
||||
@pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"])
|
||||
def test_alter_attach(started_cluster, attachable_part, engine):
|
||||
main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")
|
||||
dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")
|
||||
|
||||
name = "alter_attach_test_{}".format(engine)
|
||||
main_node.query(f"CREATE TABLE testdb.{name} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)")
|
||||
table_uuid = get_table_uuid("testdb", name)
|
||||
@ -134,10 +137,14 @@ def test_alter_attach(started_cluster, attachable_part, engine):
|
||||
assert dummy_node.query(f"SELECT CounterID FROM testdb.{name}") == "123\n"
|
||||
else:
|
||||
assert dummy_node.query(f"SELECT CounterID FROM testdb.{name}") == ""
|
||||
|
||||
main_node.query("DROP DATABASE testdb SYNC")
|
||||
dummy_node.query("DROP DATABASE testdb SYNC")
|
||||
|
||||
@pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"])
|
||||
def test_alter_drop_part(started_cluster, engine):
|
||||
main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")
|
||||
dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")
|
||||
|
||||
table = f"alter_drop_{engine}"
|
||||
part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0"
|
||||
main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)")
|
||||
@ -151,10 +158,14 @@ def test_alter_drop_part(started_cluster, engine):
|
||||
assert dummy_node.query(f"SELECT CounterID FROM testdb.{table}") == ""
|
||||
else:
|
||||
assert dummy_node.query(f"SELECT CounterID FROM testdb.{table}") == "456\n"
|
||||
|
||||
main_node.query("DROP DATABASE testdb SYNC")
|
||||
dummy_node.query("DROP DATABASE testdb SYNC")
|
||||
|
||||
@pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"])
|
||||
def test_alter_detach_part(started_cluster, engine):
|
||||
main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")
|
||||
dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")
|
||||
|
||||
table = f"alter_detach_{engine}"
|
||||
part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0"
|
||||
main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)")
|
||||
@ -169,10 +180,14 @@ def test_alter_detach_part(started_cluster, engine):
|
||||
assert dummy_node.query(detached_parts_query) == f"{part_name}\n"
|
||||
else:
|
||||
assert dummy_node.query(detached_parts_query) == ""
|
||||
|
||||
main_node.query("DROP DATABASE testdb SYNC")
|
||||
dummy_node.query("DROP DATABASE testdb SYNC")
|
||||
|
||||
@pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"])
|
||||
def test_alter_drop_detached_part(started_cluster, engine):
|
||||
main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")
|
||||
dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")
|
||||
|
||||
table = f"alter_drop_detached_{engine}"
|
||||
part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0"
|
||||
main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)")
|
||||
@ -186,8 +201,14 @@ def test_alter_drop_detached_part(started_cluster, engine):
|
||||
assert main_node.query(detached_parts_query) == ""
|
||||
assert dummy_node.query(detached_parts_query) == f"{part_name}\n"
|
||||
|
||||
main_node.query("DROP DATABASE testdb SYNC")
|
||||
dummy_node.query("DROP DATABASE testdb SYNC")
|
||||
|
||||
|
||||
def test_alter_fetch(started_cluster):
|
||||
main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")
|
||||
dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")
|
||||
|
||||
main_node.query("CREATE TABLE testdb.fetch_source (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID)")
|
||||
main_node.query("CREATE TABLE testdb.fetch_target (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID)")
|
||||
main_node.query("INSERT INTO testdb.fetch_source VALUES (123)")
|
||||
@ -197,8 +218,13 @@ def test_alter_fetch(started_cluster):
|
||||
assert main_node.query(detached_parts_query) == "all_0_0_0\n"
|
||||
assert dummy_node.query(detached_parts_query) == ""
|
||||
|
||||
main_node.query("DROP DATABASE testdb SYNC")
|
||||
dummy_node.query("DROP DATABASE testdb SYNC")
|
||||
|
||||
def test_alters_from_different_replicas(started_cluster):
|
||||
main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")
|
||||
dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")
|
||||
|
||||
# test_alters_from_different_replicas
|
||||
competing_node.query("CREATE DATABASE IF NOT EXISTS testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');")
|
||||
|
||||
@ -294,6 +320,8 @@ def test_alters_from_different_replicas(started_cluster):
|
||||
"9\t2021-02-11\t1241149650\n"
|
||||
|
||||
assert_eq_with_retry(dummy_node, "SELECT CounterID, StartDate, UserID FROM testdb.dist ORDER BY CounterID", expected)
|
||||
main_node.query("DROP DATABASE testdb SYNC")
|
||||
dummy_node.query("DROP DATABASE testdb SYNC")
|
||||
|
||||
def test_recover_staled_replica(started_cluster):
|
||||
main_node.query("CREATE DATABASE recover ENGINE = Replicated('/clickhouse/databases/recover', 'shard1', 'replica1');")
|
||||
@ -377,10 +405,10 @@ def test_recover_staled_replica(started_cluster):
|
||||
|
||||
dummy_node.query("DROP TABLE recover.tmp")
|
||||
assert_eq_with_retry(main_node, "SELECT count() FROM system.tables WHERE database='recover' AND name='tmp'", "0\n")
|
||||
main_node.query("DROP DATABASE recover SYNC")
|
||||
dummy_node.query("DROP DATABASE recover SYNC")
|
||||
|
||||
def test_startup_without_zk(started_cluster):
|
||||
main_node.query("DROP DATABASE IF EXISTS testdb SYNC")
|
||||
main_node.query("DROP DATABASE IF EXISTS recover SYNC")
|
||||
with PartitionManager() as pm:
|
||||
pm.drop_instance_zk_connections(main_node)
|
||||
err = main_node.query_and_get_error("CREATE DATABASE startup ENGINE = Replicated('/clickhouse/databases/startup', 'shard1', 'replica1');")
|
||||
@ -403,7 +431,7 @@ def test_startup_without_zk(started_cluster):
|
||||
|
||||
main_node.query("EXCHANGE TABLES startup.rmt AND startup.m")
|
||||
assert main_node.query("SELECT (*,).1 FROM startup.m") == "42\n"
|
||||
|
||||
main_node.query("DROP DATABASE startup")
|
||||
|
||||
def test_server_uuid(started_cluster):
|
||||
uuid1 = main_node.query("select serverUUID()")
|
||||
|
@ -4,7 +4,6 @@ import pytest
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.cluster import ClickHouseKiller
|
||||
from helpers.test_tools import assert_eq_with_retry
|
||||
from helpers.network import PartitionManager
|
||||
|
||||
def fill_nodes(nodes):
|
||||
for node in nodes:
|
||||
|
Loading…
Reference in New Issue
Block a user