Merge remote-tracking branch 'origin' into update-h3

This commit is contained in:
Yatsishin Ilya 2021-06-22 17:38:59 +03:00
commit 2b1f3d4681
37 changed files with 228 additions and 134 deletions

2
contrib/NuRaft vendored

@ -1 +1 @@
Subproject commit 2a1bf7d87b4a03561fc66fbb49cee8a288983c5d
Subproject commit 976874b7aa7f422bf4ea595bb7d1166c617b1c26

View File

@ -148,5 +148,10 @@ toc_title: Adopters
| <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) |
| <a href="https://shop.okraina.ru/" class="favicon">ООО «МПЗ Богородский»</a> | Agriculture | — | — | — | [Article in Russian, November 2020](https://cloud.yandex.ru/cases/okraina) |
| <a href="https://www.tesla.com/" class="favicon">Tesla</a> | Electric vehicle and clean energy company | — | — | — | [Vacancy description, March 2021](https://news.ycombinator.com/item?id=26306170) |
| <a href="https://www.kgk-global.com/en/" class="favicon">KGK Global</a> | Vehicle monitoring | — | — | — | [Press release, June 2021](https://zoom.cnews.ru/news/item/530921) |
| <a href="https://www.bilibili.com/" class="favicon">BiliBili</a> | Video sharing | — | — | — | [Blog post, June 2021](https://chowdera.com/2021/06/20210622012241476b.html) |
| <a href="https://gigapipe.com/" class="favicon">Gigapipe</a> | Managed ClickHouse | Main product | — | — | [Official website](https://gigapipe.com/) |
| <a href="https://www.hydrolix.io/" class="favicon">Hydrolix</a> | Cloud data platform | Main product | — | — | [Documentation](https://docs.hydrolix.io/guide/query) |
| <a href="https://www.argedor.com/en/clickhouse/" class="favicon">Argedor</a> | ClickHouse support | — | — | — | [Official website](https://www.argedor.com/en/clickhouse/) |
[Original article](https://clickhouse.tech/docs/en/introduction/adopters/) <!--hide-->

View File

@ -23,9 +23,10 @@ using IndexToLogEntry = std::unordered_map<uint64_t, LogEntryPtr>;
enum class ChangelogVersion : uint8_t
{
V0 = 0,
V1 = 1, /// with 64 bit buffer header
};
static constexpr auto CURRENT_CHANGELOG_VERSION = ChangelogVersion::V0;
static constexpr auto CURRENT_CHANGELOG_VERSION = ChangelogVersion::V1;
struct ChangelogRecordHeader
{

View File

@ -204,7 +204,7 @@ SnapshotMetadataPtr KeeperStorageSnapshot::deserialize(KeeperStorage & storage,
uint8_t version;
readBinary(version, in);
SnapshotVersion current_version = static_cast<SnapshotVersion>(version);
if (current_version > SnapshotVersion::V1)
if (current_version > CURRENT_SNAPSHOT_VERSION)
throw Exception(ErrorCodes::UNKNOWN_FORMAT_VERSION, "Unsupported snapshot version {}", version);
SnapshotMetadataPtr result = deserializeSnapshotMetadata(in);

View File

@ -14,8 +14,11 @@ enum SnapshotVersion : uint8_t
{
V0 = 0,
V1 = 1, /// with ACL map
V2 = 2, /// with 64 bit buffer header
};
static constexpr auto CURRENT_SNAPSHOT_VERSION = SnapshotVersion::V2;
struct KeeperStorageSnapshot
{
public:
@ -30,7 +33,7 @@ public:
KeeperStorage * storage;
SnapshotVersion version = SnapshotVersion::V1;
SnapshotVersion version = CURRENT_SNAPSHOT_VERSION;
SnapshotMetadataPtr snapshot_meta;
int64_t session_id;
size_t snapshot_container_size;

View File

@ -22,9 +22,10 @@ const char * ParserMultiplicativeExpression::operators[] =
nullptr
};
const char * ParserUnaryMinusExpression::operators[] =
const char * ParserUnaryExpression::operators[] =
{
"-", "negate",
"NOT", "not",
nullptr
};
@ -539,7 +540,7 @@ bool ParserPrefixUnaryOperatorExpression::parseImpl(Pos & pos, ASTPtr & node, Ex
}
bool ParserUnaryMinusExpression::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
bool ParserUnaryExpression::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{
/// As an exception, negative numbers should be parsed as literals, and not as an application of the operator.

View File

@ -245,14 +245,14 @@ protected:
};
class ParserUnaryMinusExpression : public IParserBase
class ParserUnaryExpression : public IParserBase
{
private:
static const char * operators[];
ParserPrefixUnaryOperatorExpression operator_parser {operators, std::make_unique<ParserTupleElementExpression>()};
protected:
const char * getName() const override { return "unary minus expression"; }
const char * getName() const override { return "unary expression"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};
@ -262,7 +262,7 @@ class ParserMultiplicativeExpression : public IParserBase
{
private:
static const char * operators[];
ParserLeftAssociativeBinaryOperatorList operator_parser {operators, std::make_unique<ParserUnaryMinusExpression>()};
ParserLeftAssociativeBinaryOperatorList operator_parser {operators, std::make_unique<ParserUnaryExpression>()};
protected:
const char * getName() const override { return "multiplicative expression"; }

View File

@ -423,6 +423,9 @@ KeyCondition::KeyCondition(
*/
Block block_with_constants = getBlockWithConstants(query_info.query, query_info.syntax_analyzer_result, context);
for (const auto & [name, _] : query_info.syntax_analyzer_result->array_join_result_to_source)
array_joined_columns.insert(name);
const ASTSelectQuery & select = query_info.query->as<ASTSelectQuery &>();
if (select.where() || select.prewhere())
{
@ -610,6 +613,10 @@ bool KeyCondition::canConstantBeWrappedByMonotonicFunctions(
DataTypePtr & out_type)
{
String expr_name = node->getColumnNameWithoutAlias();
if (array_joined_columns.count(expr_name))
return false;
if (key_subexpr_names.count(expr_name) == 0)
return false;
@ -714,6 +721,9 @@ bool KeyCondition::canConstantBeWrappedByFunctions(
{
String expr_name = ast->getColumnNameWithoutAlias();
if (array_joined_columns.count(expr_name))
return false;
if (key_subexpr_names.count(expr_name) == 0)
{
/// Let's check another one case.
@ -1075,6 +1085,9 @@ bool KeyCondition::isKeyPossiblyWrappedByMonotonicFunctionsImpl(
// Key columns should use canonical names for index analysis
String name = node->getColumnNameWithoutAlias();
if (array_joined_columns.count(name))
return false;
auto it = key_columns.find(name);
if (key_columns.end() != it)
{

View File

@ -459,6 +459,8 @@ private:
const ExpressionActionsPtr key_expr;
/// All intermediate columns are used to calculate key_expr.
const NameSet key_subexpr_names;
NameSet array_joined_columns;
PreparedSets prepared_sets;
// If true, always allow key_expr to be wrapped by function

View File

@ -139,7 +139,8 @@ def configure_testcase_args(args, case_file, suite_tmp_dir, stderr_file):
testcase_args = copy.deepcopy(args)
testcase_args.testcase_start_time = datetime.now()
testcase_args.testcase_client = f"{testcase_args.client} --log_comment='{case_file}'"
testcase_basename = os.path.basename(case_file)
testcase_args.testcase_client = f"{testcase_args.client} --log_comment='{testcase_basename}'"
if testcase_args.database:
database = testcase_args.database

View File

@ -377,8 +377,8 @@ class ClickhouseIntegrationTestsRunner:
test_cmd = ' '.join([test for test in sorted(test_names)])
parallel_cmd = " --parallel {} ".format(num_workers) if num_workers > 0 else ""
cmd = "cd {}/tests/integration && ./runner --tmpfs {} -t {} {} '-ss -rfEp --color=no --durations=0 {}' | tee {}".format(
repo_path, image_cmd, test_cmd, parallel_cmd, _get_deselect_option(self.should_skip_tests()), output_path)
cmd = "cd {}/tests/integration && ./runner --tmpfs {} -t {} {} '-ss -rfEp --run-id={} --color=no --durations=0 {}' | tee {}".format(
repo_path, image_cmd, test_cmd, parallel_cmd, i, _get_deselect_option(self.should_skip_tests()), output_path)
with open(log_path, 'w') as log:
logging.info("Executing cmd: %s", cmd)

View File

@ -28,4 +28,10 @@ def cleanup_environment():
logging.exception(f"cleanup_environment:{str(e)}")
pass
yield
yield
def pytest_addoption(parser):
parser.addoption("--run-id", default="", help="run-id is used as postfix in _instances_{} directory")
def pytest_configure(config):
os.environ['INTEGRATION_TESTS_RUN_ID'] = config.option.run_id

View File

@ -1,6 +1,7 @@
import os
import subprocess as sp
import tempfile
import logging
from threading import Timer
@ -105,6 +106,7 @@ class CommandRequest:
stderr = self.stderr_file.read().decode('utf-8', errors='replace')
if self.timer is not None and not self.process_finished_before_timeout and not self.ignore_error:
logging.debug(f"Timed out. Last stdout:{stdout}, stderr:{stderr}")
raise QueryTimeoutExceedException('Client timed out!')
if (self.process.returncode != 0 or stderr) and not self.ignore_error:

View File

@ -29,7 +29,6 @@ from dict2xml import dict2xml
from kazoo.client import KazooClient
from kazoo.exceptions import KazooException
from minio import Minio
from minio.deleteobjects import DeleteObject
from helpers.test_tools import assert_eq_with_retry
import docker
@ -172,6 +171,13 @@ def enable_consistent_hash_plugin(rabbitmq_id):
p.communicate()
return p.returncode == 0
def get_instances_dir():
if 'INTEGRATION_TESTS_RUN_ID' in os.environ and os.environ['INTEGRATION_TESTS_RUN_ID']:
return '_instances_' + shlex.quote(os.environ['INTEGRATION_TESTS_RUN_ID'])
else:
return '_instances'
class ClickHouseCluster:
"""ClickHouse cluster with several instances and (possibly) ZooKeeper.
@ -203,7 +209,14 @@ class ClickHouseCluster:
project_name = pwd.getpwuid(os.getuid()).pw_name + p.basename(self.base_dir) + self.name
# docker-compose removes everything non-alphanumeric from project names so we do it too.
self.project_name = re.sub(r'[^a-z0-9]', '', project_name.lower())
self.instances_dir = p.join(self.base_dir, '_instances' + ('' if not self.name else '_' + self.name))
instances_dir_name = '_instances'
if self.name:
instances_dir_name += '_' + self.name
if 'INTEGRATION_TESTS_RUN_ID' in os.environ and os.environ['INTEGRATION_TESTS_RUN_ID']:
instances_dir_name += '_' + shlex.quote(os.environ['INTEGRATION_TESTS_RUN_ID'])
self.instances_dir = p.join(self.base_dir, instances_dir_name)
self.docker_logs_path = p.join(self.instances_dir, 'docker.log')
self.env_file = p.join(self.instances_dir, DEFAULT_ENV_NAME)
self.env_variables = {}
@ -421,7 +434,15 @@ class ClickHouseCluster:
pass
def get_docker_handle(self, docker_id):
return self.docker_client.containers.get(docker_id)
exception = None
for i in range(5):
try:
return self.docker_client.containers.get(docker_id)
except Exception as ex:
print("Got exception getting docker handle", str(ex))
time.sleep(i * 2)
exception = ex
raise exception
def get_client_cmd(self):
cmd = self.client_bin_path
@ -577,7 +598,7 @@ class ClickHouseCluster:
self.base_cmd.extend(['--file', p.join(docker_compose_yml_dir, 'docker_compose_hdfs.yml')])
self.base_hdfs_cmd = ['docker-compose', '--env-file', instance.env_file, '--project-name', self.project_name,
'--file', p.join(docker_compose_yml_dir, 'docker_compose_hdfs.yml')]
print("HDFS BASE CMD:{}".format(self.base_hdfs_cmd))
logging.debug("HDFS BASE CMD:{self.base_hdfs_cmd)}")
return self.base_hdfs_cmd
def setup_kerberized_hdfs_cmd(self, instance, env_variables, docker_compose_yml_dir):
@ -1217,8 +1238,8 @@ class ClickHouseCluster:
for bucket in buckets:
if minio_client.bucket_exists(bucket):
delete_object_list = map(
lambda x: DeleteObject(x.object_name),
minio_client.list_objects(bucket, recursive=True),
lambda x: x.object_name,
minio_client.list_objects_v2(bucket, recursive=True),
)
errors = minio_client.remove_objects(bucket, delete_object_list)
for error in errors:
@ -1468,9 +1489,9 @@ class ClickHouseCluster:
instance.docker_client = self.docker_client
instance.ip_address = self.get_instance_ip(instance.name)
logging.debug("Waiting for ClickHouse start...")
logging.debug("Waiting for ClickHouse start in {instance}, ip: {instance.ip_address}...")
instance.wait_for_start(start_timeout)
logging.debug("ClickHouse started")
logging.debug("ClickHouse {instance} started")
instance.client = Client(instance.ip_address, command=self.client_bin_path)
@ -1864,8 +1885,7 @@ class ClickHouseInstance:
self.start_clickhouse(stop_start_wait_sec)
def exec_in_container(self, cmd, detach=False, nothrow=False, **kwargs):
container_id = self.get_docker_handle().id
return self.cluster.exec_in_container(container_id, cmd, detach, nothrow, **kwargs)
return self.cluster.exec_in_container(self.docker_id, cmd, detach, nothrow, **kwargs)
def contains_in_log(self, substring):
result = self.exec_in_container(
@ -1905,8 +1925,7 @@ class ClickHouseInstance:
["bash", "-c", "echo $(if [ -e '{}' ]; then echo 'yes'; else echo 'no'; fi)".format(path)]) == 'yes\n'
def copy_file_to_container(self, local_path, dest_path):
container_id = self.get_docker_handle().id
return self.cluster.copy_file_to_container(container_id, local_path, dest_path)
return self.cluster.copy_file_to_container(self.docker_id, local_path, dest_path)
def get_process_pid(self, process_name):
output = self.exec_in_container(["bash", "-c",
@ -1961,6 +1980,7 @@ class ClickHouseInstance:
self.get_docker_handle().start()
def wait_for_start(self, start_timeout=None, connection_timeout=None):
handle = self.get_docker_handle()
if start_timeout is None or start_timeout <= 0:
raise Exception("Invalid timeout: {}".format(start_timeout))
@ -1983,11 +2003,10 @@ class ClickHouseInstance:
return False
while True:
handle = self.get_docker_handle()
handle.reload()
status = handle.status
if status == 'exited':
raise Exception("Instance `{}' failed to start. Container status: {}, logs: {}"
.format(self.name, status, handle.logs().decode('utf-8')))
raise Exception(f"Instance `{self.name}' failed to start. Container status: {status}, logs: {handle.logs().decode('utf-8')}")
deadline = start_time + start_timeout
# It is possible that server starts slowly.
@ -1997,9 +2016,8 @@ class ClickHouseInstance:
current_time = time.time()
if current_time >= deadline:
raise Exception("Timed out while waiting for instance `{}' with ip address {} to start. "
"Container status: {}, logs: {}".format(self.name, self.ip_address, status,
handle.logs().decode('utf-8')))
raise Exception(f"Timed out while waiting for instance `{self.name}' with ip address {self.ip_address} to start. " \
f"Container status: {status}, logs: {handle.logs().decode('utf-8')}")
socket_timeout = min(start_timeout, deadline - current_time)

View File

@ -1,5 +1,6 @@
import difflib
import time
import logging
from io import IOBase
@ -56,7 +57,7 @@ def assert_eq_with_retry(instance, query, expectation, retry_count=20, sleep_tim
break
time.sleep(sleep_time)
except Exception as ex:
print(("assert_eq_with_retry retry {} exception {}".format(i + 1, ex)))
logging.exception(f"assert_eq_with_retry retry {i+1} exception {ex}")
time.sleep(sleep_time)
else:
val = TSV(get_result(instance.query(query, user=user, stdin=stdin, timeout=timeout, settings=settings,
@ -76,7 +77,7 @@ def assert_logs_contain_with_retry(instance, substring, retry_count=20, sleep_ti
break
time.sleep(sleep_time)
except Exception as ex:
print("contains_in_log_with_retry retry {} exception {}".format(i + 1, ex))
logging.exception(f"contains_in_log_with_retry retry {i+1} exception {ex}")
time.sleep(sleep_time)
else:
raise AssertionError("'{}' not found in logs".format(substring))
@ -89,7 +90,7 @@ def exec_query_with_retry(instance, query, retry_count=40, sleep_time=0.5, setti
break
except Exception as ex:
exception = ex
print("Failed to execute query '", query, "' on instance", instance.name, "will retry")
logging.exception(f"Failed to execute query '{query}' on instance '{instance.name}' will retry")
time.sleep(sleep_time)
else:
raise exception

View File

@ -1,6 +1,6 @@
[pytest]
python_files = test*.py
norecursedirs = _instances
norecursedirs = _instances*
timeout = 1800
junit_duration_report = call
junit_suite_name = integration

View File

@ -43,8 +43,8 @@ def test_backup_from_old_version(started_cluster):
assert node1.query("SELECT COUNT() FROM dest_table") == "1\n"
node1.exec_in_container(['bash', '-c',
'cp -r /var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/ /var/lib/clickhouse/data/default/dest_table/detached'])
node1.exec_in_container(['find', '/var/lib/clickhouse/shadow/1/data/default/source_table'])
node1.exec_in_container(['cp', '-r', '/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/', '/var/lib/clickhouse/data/default/dest_table/detached'])
assert node1.query("SELECT COUNT() FROM dest_table") == "1\n"
@ -81,8 +81,7 @@ def test_backup_from_old_version_setting(started_cluster):
assert node2.query("SELECT COUNT() FROM dest_table") == "1\n"
node2.exec_in_container(['bash', '-c',
'cp -r /var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/ /var/lib/clickhouse/data/default/dest_table/detached'])
node2.exec_in_container(['cp', '-r', '/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/', '/var/lib/clickhouse/data/default/dest_table/detached'])
assert node2.query("SELECT COUNT() FROM dest_table") == "1\n"
@ -123,8 +122,7 @@ def test_backup_from_old_version_config(started_cluster):
assert node3.query("SELECT COUNT() FROM dest_table") == "1\n"
node3.exec_in_container(['bash', '-c',
'cp -r /var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/ /var/lib/clickhouse/data/default/dest_table/detached'])
node3.exec_in_container(['cp', '-r', '/var/lib/clickhouse/shadow/1/data/default/source_table/all_1_1_0/', '/var/lib/clickhouse/data/default/dest_table/detached'])
assert node3.query("SELECT COUNT() FROM dest_table") == "1\n"
@ -156,8 +154,7 @@ def test_backup_and_alter(started_cluster):
node4.query("ALTER TABLE test.backup_table DROP PARTITION tuple()")
node4.exec_in_container(['bash', '-c',
'cp -r /var/lib/clickhouse/shadow/1/data/test/backup_table/all_1_1_0/ /var/lib/clickhouse/data/test/backup_table/detached'])
node4.exec_in_container(['cp', '-r', '/var/lib/clickhouse/shadow/1/data/test/backup_table/all_1_1_0/', '/var/lib/clickhouse/data/test/backup_table/detached'])
node4.query("ALTER TABLE test.backup_table ATTACH PARTITION tuple()")

View File

@ -39,7 +39,7 @@ class Task:
for instance_name, _ in cluster.instances.items():
instance = cluster.instances[instance_name]
instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, './task_taxi_data.xml'), self.container_task_file)
print("Copied task file to container of '{}' instance. Path {}".format(instance_name, self.container_task_file))
logging.debug(f"Copied task file to container of '{instance_name}' instance. Path {self.container_task_file}")
def start(self):
@ -48,11 +48,11 @@ class Task:
node.query("DROP DATABASE IF EXISTS dailyhistory SYNC;")
node.query("DROP DATABASE IF EXISTS monthlyhistory SYNC;")
instance = cluster.instances['first']
first = cluster.instances['first']
# daily partition database
instance.query("CREATE DATABASE IF NOT EXISTS dailyhistory on cluster events;")
instance.query("""CREATE TABLE dailyhistory.yellow_tripdata_staging ON CLUSTER events
first.query("CREATE DATABASE IF NOT EXISTS dailyhistory on cluster events;")
first.query("""CREATE TABLE dailyhistory.yellow_tripdata_staging ON CLUSTER events
(
id UUID DEFAULT generateUUIDv4(),
vendor_id String,
@ -84,12 +84,12 @@ class Task:
ORDER BY (tpep_pickup_datetime, id)
PARTITION BY (toYYYYMMDD(tpep_pickup_datetime))""")
instance.query("""CREATE TABLE dailyhistory.yellow_tripdata
first.query("""CREATE TABLE dailyhistory.yellow_tripdata
ON CLUSTER events
AS dailyhistory.yellow_tripdata_staging
ENGINE = Distributed('events', 'dailyhistory', yellow_tripdata_staging, sipHash64(id) % 3);""")
instance.query("""INSERT INTO dailyhistory.yellow_tripdata
first.query("""INSERT INTO dailyhistory.yellow_tripdata
SELECT * FROM generateRandom(
'id UUID DEFAULT generateUUIDv4(),
vendor_id String,
@ -119,8 +119,8 @@ class Task:
1, 10, 2) LIMIT 50;""")
# monthly partition database
instance.query("create database IF NOT EXISTS monthlyhistory on cluster events;")
instance.query("""CREATE TABLE monthlyhistory.yellow_tripdata_staging ON CLUSTER events
first.query("create database IF NOT EXISTS monthlyhistory on cluster events;")
first.query("""CREATE TABLE monthlyhistory.yellow_tripdata_staging ON CLUSTER events
(
id UUID DEFAULT generateUUIDv4(),
vendor_id String,
@ -153,16 +153,16 @@ class Task:
ORDER BY (tpep_pickup_datetime, id)
PARTITION BY (pickup_location_id, toYYYYMM(tpep_pickup_datetime))""")
instance.query("""CREATE TABLE monthlyhistory.yellow_tripdata
first.query("""CREATE TABLE monthlyhistory.yellow_tripdata
ON CLUSTER events
AS monthlyhistory.yellow_tripdata_staging
ENGINE = Distributed('events', 'monthlyhistory', yellow_tripdata_staging, sipHash64(id) % 3);""")
def check(self):
instance = cluster.instances["first"]
a = TSV(instance.query("SELECT count() from dailyhistory.yellow_tripdata"))
b = TSV(instance.query("SELECT count() from monthlyhistory.yellow_tripdata"))
first = cluster.instances["first"]
a = TSV(first.query("SELECT count() from dailyhistory.yellow_tripdata"))
b = TSV(first.query("SELECT count() from monthlyhistory.yellow_tripdata"))
assert a == b, "Distributed tables"
for instance_name, instance in cluster.instances.items():
@ -187,10 +187,10 @@ def execute_task(started_cluster, task, cmd_options):
task.start()
zk = started_cluster.get_kazoo_client('zoo1')
print("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1]))
logging.debug("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1]))
# Run cluster-copier processes on each node
docker_api = docker.from_env().api
docker_api = started_cluster.docker_client.api
copiers_exec_ids = []
cmd = ['/usr/bin/clickhouse', 'copier',
@ -201,9 +201,9 @@ def execute_task(started_cluster, task, cmd_options):
'--base-dir', '/var/log/clickhouse-server/copier']
cmd += cmd_options
print(cmd)
logging.debug(f"execute_task cmd: {cmd}")
for instance_name, instance in started_cluster.instances.items():
for instance_name in started_cluster.instances.keys():
instance = started_cluster.instances[instance_name]
container = instance.get_docker_handle()
instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, "configs_three_nodes/config-copier.xml"), "/etc/clickhouse-server/config-copier.xml")

View File

@ -430,7 +430,7 @@ def execute_task(started_cluster, task, cmd_options):
print("Use ZooKeeper server: {}:{}".format(zk.hosts[0][0], zk.hosts[0][1]))
# Run cluster-copier processes on each node
docker_api = docker.from_env().api
docker_api = started_cluster.docker_client.api
copiers_exec_ids = []
cmd = ['/usr/bin/clickhouse', 'copier',
@ -443,7 +443,7 @@ def execute_task(started_cluster, task, cmd_options):
print(cmd)
for instance_name, instance in started_cluster.instances.items():
for instance_name in started_cluster.instances.keys():
instance = started_cluster.instances[instance_name]
container = instance.get_docker_handle()
instance.copy_file_to_container(os.path.join(CURRENT_TEST_DIR, "configs_two_nodes/config-copier.xml"), "/etc/clickhouse-server/config-copier.xml")

View File

@ -150,7 +150,7 @@ def test_reload_after_loading(started_cluster):
time.sleep(1) # see the comment above
replace_in_file_in_container('/etc/clickhouse-server/dictionaries/executable.xml', '82', '83')
replace_in_file_in_container('/etc/clickhouse-server/dictionaries/file.txt', '102', '103')
time.sleep(7)
time.sleep(10)
assert query("SELECT dictGetInt32('file', 'a', toUInt64(9))") == "103\n"
assert query("SELECT dictGetInt32('executable', 'a', toUInt64(7))") == "83\n"

View File

@ -1,6 +1,6 @@
<yandex>
<zookeeper>
<!-- Required for correct timing in current test case -->
<session_timeout_ms replace="1">10000</session_timeout_ms>
<session_timeout_ms replace="1">15000</session_timeout_ms>
</zookeeper>
</yandex>

View File

@ -1,6 +1,6 @@
<yandex>
<zookeeper>
<!-- Required for correct timing in current test case -->
<session_timeout_ms replace="1">10000</session_timeout_ms>
<session_timeout_ms replace="1">15000</session_timeout_ms>
</zookeeper>
</yandex>

View File

@ -53,6 +53,7 @@ def test_default_database(test_cluster):
def test_create_view(test_cluster):
instance = test_cluster.instances['ch3']
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test.super_simple_view ON CLUSTER 'cluster'")
test_cluster.ddl_check_query(instance,
"CREATE VIEW test.super_simple_view ON CLUSTER 'cluster' AS SELECT * FROM system.numbers FORMAT TSV")
test_cluster.ddl_check_query(instance,
@ -76,7 +77,7 @@ def test_on_server_fail(test_cluster):
kill_instance.get_docker_handle().stop()
request = instance.get_query_request("CREATE TABLE test.test_server_fail ON CLUSTER 'cluster' (i Int8) ENGINE=Null",
timeout=30)
timeout=180)
kill_instance.get_docker_handle().start()
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test.__nope__ ON CLUSTER 'cluster'")
@ -92,27 +93,6 @@ def test_on_server_fail(test_cluster):
test_cluster.ddl_check_query(instance, "DROP TABLE test.test_server_fail ON CLUSTER 'cluster'")
def _test_on_connection_losses(test_cluster, zk_timeout):
instance = test_cluster.instances['ch1']
kill_instance = test_cluster.instances['ch2']
with PartitionManager() as pm:
pm.drop_instance_zk_connections(kill_instance)
request = instance.get_query_request("DROP TABLE IF EXISTS test.__nope__ ON CLUSTER 'cluster'", timeout=20)
time.sleep(zk_timeout)
pm.restore_instance_zk_connections(kill_instance)
test_cluster.check_all_hosts_successfully_executed(request.get_answer())
def test_on_connection_loss(test_cluster):
_test_on_connection_losses(test_cluster, 5) # connection loss will occur only (3 sec ZK timeout in config)
def test_on_session_expired(test_cluster):
_test_on_connection_losses(test_cluster, 15) # session should be expired (3 sec ZK timeout in config)
def test_simple_alters(test_cluster):
instance = test_cluster.instances['ch2']
@ -190,7 +170,7 @@ def test_implicit_macros(test_cluster):
instance = test_cluster.instances['ch2']
test_cluster.ddl_check_query(instance, "DROP DATABASE IF EXISTS test_db ON CLUSTER '{cluster}'")
test_cluster.ddl_check_query(instance, "DROP DATABASE IF EXISTS test_db ON CLUSTER '{cluster}' SYNC")
test_cluster.ddl_check_query(instance, "CREATE DATABASE IF NOT EXISTS test_db ON CLUSTER '{cluster}'")
test_cluster.ddl_check_query(instance, """
@ -270,6 +250,15 @@ def test_create_reserved(test_cluster):
def test_rename(test_cluster):
instance = test_cluster.instances['ch1']
rules = test_cluster.pm_random_drops.pop_rules()
test_cluster.ddl_check_query(instance,
"DROP TABLE IF EXISTS rename_shard ON CLUSTER cluster SYNC")
test_cluster.ddl_check_query(instance,
"DROP TABLE IF EXISTS rename_new ON CLUSTER cluster SYNC")
test_cluster.ddl_check_query(instance,
"DROP TABLE IF EXISTS rename_old ON CLUSTER cluster SYNC")
test_cluster.ddl_check_query(instance,
"DROP TABLE IF EXISTS rename ON CLUSTER cluster SYNC")
test_cluster.ddl_check_query(instance,
"CREATE TABLE rename_shard ON CLUSTER cluster (id Int64, sid String DEFAULT concat('old', toString(id))) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/staging/test_shard', '{replica}') ORDER BY (id)")
test_cluster.ddl_check_query(instance,
@ -326,12 +315,15 @@ def test_socket_timeout(test_cluster):
def test_replicated_without_arguments(test_cluster):
rules = test_cluster.pm_random_drops.pop_rules()
instance = test_cluster.instances['ch1']
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS test_atomic.rmt ON CLUSTER cluster SYNC")
test_cluster.ddl_check_query(instance, "DROP DATABASE IF EXISTS test_atomic ON CLUSTER cluster SYNC")
test_cluster.ddl_check_query(instance, "CREATE DATABASE test_atomic ON CLUSTER cluster ENGINE=Atomic")
assert "are supported only for ON CLUSTER queries with Atomic database engine" in \
instance.query_and_get_error("CREATE TABLE test_atomic.rmt (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n")
test_cluster.ddl_check_query(instance,
"CREATE TABLE test_atomic.rmt ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree() ORDER BY n")
test_cluster.ddl_check_query(instance, "DROP TABLE test_atomic.rmt ON CLUSTER cluster")
test_cluster.ddl_check_query(instance, "DROP TABLE test_atomic.rmt ON CLUSTER cluster SYNC")
test_cluster.ddl_check_query(instance,
"CREATE TABLE test_atomic.rmt UUID '12345678-0000-4000-8000-000000000001' ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree ORDER BY n")
assert instance.query("SHOW CREATE test_atomic.rmt FORMAT TSVRaw") == \
@ -349,7 +341,7 @@ def test_replicated_without_arguments(test_cluster):
"CREATE TABLE test_atomic.rsmt ON CLUSTER cluster (n UInt64, m UInt64, k UInt64) ENGINE=ReplicatedSummingMergeTree((m, k)) ORDER BY n")
test_cluster.ddl_check_query(instance,
"CREATE TABLE test_atomic.rvcmt ON CLUSTER cluster (n UInt64, m Int8, k UInt64) ENGINE=ReplicatedVersionedCollapsingMergeTree(m, k) ORDER BY n")
test_cluster.ddl_check_query(instance, "DROP DATABASE test_atomic ON CLUSTER cluster")
test_cluster.ddl_check_query(instance, "DROP DATABASE test_atomic ON CLUSTER cluster SYNC")
test_cluster.ddl_check_query(instance, "CREATE DATABASE test_ordinary ON CLUSTER cluster ENGINE=Ordinary")
assert "are supported only for ON CLUSTER queries with Atomic database engine" in \
@ -359,7 +351,7 @@ def test_replicated_without_arguments(test_cluster):
test_cluster.ddl_check_query(instance, "CREATE TABLE test_ordinary.rmt ON CLUSTER cluster (n UInt64, s String) ENGINE=ReplicatedMergeTree('/{shard}/{table}/', '{replica}') ORDER BY n")
assert instance.query("SHOW CREATE test_ordinary.rmt FORMAT TSVRaw") == \
"CREATE TABLE test_ordinary.rmt\n(\n `n` UInt64,\n `s` String\n)\nENGINE = ReplicatedMergeTree('/{shard}/rmt/', '{replica}')\nORDER BY n\nSETTINGS index_granularity = 8192\n"
test_cluster.ddl_check_query(instance, "DROP DATABASE test_ordinary ON CLUSTER cluster")
test_cluster.ddl_check_query(instance, "DROP DATABASE test_ordinary ON CLUSTER cluster SYNC")
test_cluster.pm_random_drops.push_rules(rules)

View File

@ -38,9 +38,9 @@ def test_cluster(request):
def test_replicated_alters(test_cluster):
instance = test_cluster.instances['ch2']
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS merge_for_alter ON CLUSTER cluster")
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS all_merge_32 ON CLUSTER cluster")
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS all_merge_64 ON CLUSTER cluster")
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS merge_for_alter ON CLUSTER cluster SYNC")
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS all_merge_32 ON CLUSTER cluster SYNC")
test_cluster.ddl_check_query(instance, "DROP TABLE IF EXISTS all_merge_64 ON CLUSTER cluster SYNC")
# Temporarily disable random ZK packet drops, they might broke creation if ReplicatedMergeTree replicas
firewall_drops_rules = test_cluster.pm_random_drops.pop_rules()
@ -90,10 +90,10 @@ ENGINE = Distributed(cluster, default, merge_for_alter, i)
assert TSV(instance.query("SELECT i, s FROM all_merge_64 ORDER BY i")) == TSV(
''.join(['{}\t{}\n'.format(x, x) for x in range(4)]))
test_cluster.ddl_check_query(instance, "DROP TABLE merge_for_alter ON CLUSTER cluster")
test_cluster.ddl_check_query(instance, "DROP TABLE merge_for_alter ON CLUSTER cluster SYNC")
# Enable random ZK packet drops
test_cluster.pm_random_drops.push_rules(firewall_drops_rules)
test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_32 ON CLUSTER cluster")
test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_64 ON CLUSTER cluster")
test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_32 ON CLUSTER cluster SYNC")
test_cluster.ddl_check_query(instance, "DROP TABLE all_merge_64 ON CLUSTER cluster SYNC")

View File

@ -6,7 +6,7 @@ import threading
import os
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.cluster import ClickHouseCluster, get_instances_dir
# By default the exceptions that was throwed in threads will be ignored
@ -30,7 +30,7 @@ class SafeThread(threading.Thread):
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_PATH = os.path.join(SCRIPT_DIR, './_instances/node/configs/config.d/storage_conf.xml')
CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/node/configs/config.d/storage_conf.xml'.format(get_instances_dir()))
def replace_config(old, new):

View File

@ -5,10 +5,10 @@ import string
import time
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.cluster import ClickHouseCluster, get_instances_dir
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
NOT_RESTORABLE_CONFIG_PATH = os.path.join(SCRIPT_DIR, './_instances/node_not_restorable/configs/config.d/storage_conf_not_restorable.xml')
NOT_RESTORABLE_CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/node_not_restorable/configs/config.d/storage_conf_not_restorable.xml'.format(get_instances_dir()))
COMMON_CONFIGS = ["configs/config.d/bg_processing_pool_conf.xml", "configs/config.d/log_conf.xml", "configs/config.d/clusters.xml"]

View File

@ -2,13 +2,14 @@ import os
import time
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.cluster import ClickHouseCluster, get_instances_dir
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node', main_configs=["configs/max_table_size_to_drop.xml"])
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_PATH = os.path.join(SCRIPT_DIR, './_instances/node/configs/config.d/max_table_size_to_drop.xml')
CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/node/configs/config.d/max_table_size_to_drop.xml'.format(get_instances_dir()))
@pytest.fixture(scope="module")

View File

@ -21,16 +21,27 @@ create_table_sql_template = """
PRIMARY KEY (`id`)) ENGINE=InnoDB;
"""
def create_mysql_db(conn, name):
with conn.cursor() as cursor:
cursor.execute(
"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(name))
drop_table_sql_template = """
DROP TABLE IF EXISTS `clickhouse`.`{}`;
"""
def get_mysql_conn(started_cluster, host):
conn = pymysql.connect(user='root', password='clickhouse', host=host, port=started_cluster.mysql_port)
return conn
def create_mysql_table(conn, tableName):
with conn.cursor() as cursor:
cursor.execute(create_table_sql_template.format(tableName))
def drop_mysql_table(conn, tableName):
with conn.cursor() as cursor:
cursor.execute(drop_table_sql_template.format(tableName))
def create_mysql_db(conn, name):
with conn.cursor() as cursor:
cursor.execute("DROP DATABASE IF EXISTS {}".format(name))
cursor.execute("CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(name))
@pytest.fixture(scope="module")
def started_cluster():
@ -51,7 +62,10 @@ def started_cluster():
def test_many_connections(started_cluster):
table_name = 'test_many_connections'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
@ -66,14 +80,18 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
query += "SELECT id FROM {t})"
assert node1.query(query.format(t=table_name)) == '250\n'
drop_mysql_table(conn, table_name)
conn.close()
def test_insert_select(started_cluster):
table_name = 'test_insert_select'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse');
'''.format(table_name, table_name))
@ -87,7 +105,9 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
def test_replace_select(started_cluster):
table_name = 'test_replace_select'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
@ -106,7 +126,9 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
def test_insert_on_duplicate_select(started_cluster):
table_name = 'test_insert_on_duplicate_select'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
@ -125,7 +147,10 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
def test_where(started_cluster):
table_name = 'test_where'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse');
@ -146,6 +171,7 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL
def test_table_function(started_cluster):
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, 'table_function')
create_mysql_table(conn, 'table_function')
table_function = "mysql('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')".format('table_function')
assert node1.query("SELECT count() FROM {}".format(table_function)).rstrip() == '0'
@ -168,6 +194,8 @@ def test_table_function(started_cluster):
def test_binary_type(started_cluster):
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, 'binary_type')
with conn.cursor() as cursor:
cursor.execute("CREATE TABLE clickhouse.binary_type (id INT PRIMARY KEY, data BINARY(16) NOT NULL)")
table_function = "mysql('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse')".format('binary_type')
@ -177,7 +205,10 @@ def test_binary_type(started_cluster):
def test_enum_type(started_cluster):
table_name = 'test_enum_type'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node1.query('''
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, source Enum8('IP' = 1, 'URL' = 2)) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse', 1);
@ -186,20 +217,8 @@ CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32, source Enum8('
assert node1.query("SELECT source FROM {} LIMIT 1".format(table_name)).rstrip() == 'URL'
conn.close()
def get_mysql_conn(started_cluster, host):
conn = pymysql.connect(user='root', password='clickhouse', host=host, port=started_cluster.mysql_port)
return conn
def create_mysql_db(conn, name):
with conn.cursor() as cursor:
cursor.execute("DROP DATABASE IF EXISTS {}".format(name))
cursor.execute("CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(name))
def create_mysql_table(conn, tableName):
with conn.cursor() as cursor:
cursor.execute(create_table_sql_template.format(tableName))
def test_mysql_distributed(started_cluster):
table_name = 'test_replicas'
@ -218,6 +237,8 @@ def test_mysql_distributed(started_cluster):
create_mysql_table(conn3, table_name)
create_mysql_table(conn4, table_name)
node2.query('DROP TABLE IF EXISTS test_replicas')
# Storage with with 3 replicas
node2.query('''
CREATE TABLE test_replicas
@ -227,6 +248,7 @@ def test_mysql_distributed(started_cluster):
# Fill remote tables with different data to be able to check
nodes = [node1, node2, node2, node2]
for i in range(1, 5):
nodes[i-1].query('DROP TABLE IF EXISTS test_replica{}'.format(i))
nodes[i-1].query('''
CREATE TABLE test_replica{}
(id UInt32, name String, age UInt32, money UInt32)
@ -249,6 +271,8 @@ def test_mysql_distributed(started_cluster):
assert(result == 'host2\nhost3\nhost4\n')
# Storage with with two shards, each has 2 replicas
node2.query('DROP TABLE IF EXISTS test_shards')
node2.query('''
CREATE TABLE test_shards
(id UInt32, name String, age UInt32, money UInt32)
@ -275,9 +299,12 @@ def test_mysql_distributed(started_cluster):
def test_external_settings(started_cluster):
table_name = 'test_external_settings'
node1.query(f'DROP TABLE IF EXISTS {table_name}')
conn = get_mysql_conn(started_cluster, started_cluster.mysql_ip)
drop_mysql_table(conn, table_name)
create_mysql_table(conn, table_name)
node3.query(f'DROP TABLE IF EXISTS {table_name}')
node3.query('''
CREATE TABLE {}(id UInt32, name String, age UInt32, money UInt32) ENGINE = MySQL('mysql57:3306', 'clickhouse', '{}', 'root', 'clickhouse');
'''.format(table_name, table_name))

View File

@ -9,12 +9,13 @@ import time
import helpers.client
import pytest
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
from helpers.cluster import ClickHouseCluster, ClickHouseInstance, get_instances_dir
MINIO_INTERNAL_PORT = 9001
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_PATH = os.path.join(SCRIPT_DIR, './_instances/dummy/configs/config.d/defaultS3.xml')
CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/dummy/configs/config.d/defaultS3.xml'.format(get_instances_dir()))
# Creates S3 bucket for tests and allows anonymous read-write access to it.

View File

@ -20,10 +20,6 @@ system_logs = [
('system.metric_log', 1),
]
# Default timeout for flush is 60
# decrease timeout for the test to show possible issues.
timeout = pytest.mark.timeout(30)
@pytest.fixture(scope='module', autouse=True)
def start_cluster():
@ -39,7 +35,6 @@ def flush_logs():
node.query('SYSTEM FLUSH LOGS')
@timeout
@pytest.mark.parametrize('table,exists', system_logs)
def test_system_logs(flush_logs, table, exists):
q = 'SELECT * FROM {}'.format(table)
@ -51,7 +46,6 @@ def test_system_logs(flush_logs, table, exists):
# Logic is tricky, let's check that there is no hang in case of message queue
# is not empty (this is another code path in the code).
@timeout
def test_system_logs_non_empty_queue():
node.query('SELECT 1', settings={
# right now defaults are the same,

View File

@ -30,6 +30,7 @@ def started_cluster():
def test_chroot_with_same_root(started_cluster):
for i, node in enumerate([node1, node2]):
node.query('DROP TABLE IF EXISTS simple SYNC')
node.query('''
CREATE TABLE simple (date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
@ -44,6 +45,7 @@ def test_chroot_with_same_root(started_cluster):
def test_chroot_with_different_root(started_cluster):
for i, node in [(1, node1), (3, node3)]:
node.query('DROP TABLE IF EXISTS simple_different SYNC')
node.query('''
CREATE TABLE simple_different (date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple_different', '{replica}', date, id, 8192);

View File

@ -22,6 +22,8 @@ def started_cluster():
cluster.shutdown()
def test_identity(started_cluster):
node1.query('DROP TABLE IF EXISTS simple SYNC')
node1.query('''
CREATE TABLE simple (date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);

View File

@ -0,0 +1,9 @@
-- { echo }
SELECT 1 != (NOT 1);
1
SELECT 1 != NOT 1;
1
EXPLAIN SYNTAX SELECT 1 != (NOT 1);
SELECT 1 != NOT 1
EXPLAIN SYNTAX SELECT 1 != NOT 1;
SELECT 1 != NOT 1

View File

@ -0,0 +1,5 @@
-- { echo }
SELECT 1 != (NOT 1);
SELECT 1 != NOT 1;
EXPLAIN SYNTAX SELECT 1 != (NOT 1);
EXPLAIN SYNTAX SELECT 1 != NOT 1;

View File

@ -0,0 +1 @@
a c

View File

@ -0,0 +1,10 @@
DROP TABLE IF EXISTS t_array_index;
CREATE TABLE t_array_index (n Nested(key String, value String))
ENGINE = MergeTree ORDER BY n.key;
INSERT INTO t_array_index VALUES (['a', 'b'], ['c', 'd']);
SELECT * FROM t_array_index ARRAY JOIN n WHERE n.key = 'a';
DROP TABLE IF EXISTS t_array_index;

View File

@ -22,15 +22,15 @@ def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
tasks = []
with Pool(8) as pool:
try:
#run_scenario(pool, tasks, Feature(test=load("example.regression", "regression")), args)
#run_scenario(pool, tasks, Feature(test=load("ldap.regression", "regression")), args)
#run_scenario(pool, tasks, Feature(test=load("rbac.regression", "regression")), args)
#run_scenario(pool, tasks, Feature(test=load("aes_encryption.regression", "regression")), args)
#run_scenario(pool, tasks, Feature(test=load("map_type.regression", "regression")), args)
#run_scenario(pool, tasks, Feature(test=load("window_functions.regression", "regression")), args)
#run_scenario(pool, tasks, Feature(test=load("datetime64_extended_range.regression", "regression")), args)
run_scenario(pool, tasks, Feature(test=load("example.regression", "regression")), args)
run_scenario(pool, tasks, Feature(test=load("ldap.regression", "regression")), args)
run_scenario(pool, tasks, Feature(test=load("rbac.regression", "regression")), args)
run_scenario(pool, tasks, Feature(test=load("aes_encryption.regression", "regression")), args)
run_scenario(pool, tasks, Feature(test=load("map_type.regression", "regression")), args)
run_scenario(pool, tasks, Feature(test=load("window_functions.regression", "regression")), args)
run_scenario(pool, tasks, Feature(test=load("datetime64_extended_range.regression", "regression")), args)
#run_scenario(pool, tasks, Feature(test=load("kerberos.regression", "regression")), args)
#run_scenario(pool, tasks, Feature(test=load("extended_precision_data_types.regression", "regression")), args)
run_scenario(pool, tasks, Feature(test=load("extended_precision_data_types.regression", "regression")), args)
finally:
join(tasks)