Merge branch 'master' into add_one_more_group

This commit is contained in:
mergify[bot] 2021-12-02 19:28:13 +00:00 committed by GitHub
commit e2cb91b560
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 376 additions and 47 deletions

View File

@ -19,6 +19,11 @@ KeeperStateManager::KeeperConfigurationWrapper KeeperStateManager::parseServersC
Poco::Util::AbstractConfiguration::Keys keys;
config.keys(config_prefix + ".raft_configuration", keys);
/// Sometimes (especially in cloud envs) users can provide incorrect
/// configuration with duplicated raft ids or endpoints. We check them
/// on config parsing stage and never commit to quorum.
std::unordered_map<std::string, int> check_duplicated_hostnames;
size_t total_servers = 0;
for (const auto & server_key : keys)
{
@ -37,6 +42,24 @@ KeeperStateManager::KeeperConfigurationWrapper KeeperStateManager::parseServersC
result.servers_start_as_followers.insert(new_server_id);
auto endpoint = hostname + ":" + std::to_string(port);
if (check_duplicated_hostnames.count(endpoint))
{
throw Exception(ErrorCodes::RAFT_ERROR, "Raft config contain duplicate endpoints: "
"endpoint {} has been already added with id {}, but going to add it one more time with id {}",
endpoint, check_duplicated_hostnames[endpoint], new_server_id);
}
else
{
/// Fullscan to check duplicated ids
for (const auto & [id_endpoint, id] : check_duplicated_hostnames)
{
if (new_server_id == id)
throw Exception(ErrorCodes::RAFT_ERROR, "Raft config contain duplicate ids: id {} has been already added with endpoint {}, "
"but going to add it one more time with endpoint {}", id, id_endpoint, endpoint);
}
check_duplicated_hostnames.emplace(endpoint, new_server_id);
}
auto peer_config = nuraft::cs_new<nuraft::srv_config>(new_server_id, 0, endpoint, "", !can_become_leader, priority);
if (my_server_id == new_server_id)
{

View File

@ -15,6 +15,7 @@ from docker_pull_helper import get_image_with_version
from commit_status_helper import post_commit_status
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
IMAGE_NAME = 'clickhouse/fuzzer'
@ -47,6 +48,11 @@ if __name__ == "__main__":
gh = Github(get_best_robot_token())
rerun_helper = RerunHelper(gh, pr_info, check_name)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_image = get_image_with_version(temp_path, IMAGE_NAME)
build_name = get_build_name_for_check(check_name)

View File

@ -85,6 +85,30 @@ def build_clickhouse(packager_cmd, logs_path):
logging.info("Build failed")
return build_log_path, retcode == 0
def get_build_results_if_exists(s3_helper, s3_prefix):
try:
content = s3_helper.list_prefix(s3_prefix)
return content
except Exception as ex:
logging.info("Got exception %s listing %s", ex, s3_prefix)
return None
def create_json_artifact(temp_path, build_name, log_url, build_urls, build_config, elapsed, success):
subprocess.check_call(f"echo 'BUILD_NAME=build_urls_{build_name}' >> $GITHUB_ENV", shell=True)
result = {
"log_url": log_url,
"build_urls": build_urls,
"build_config": build_config,
"elapsed_seconds": elapsed,
"status": success,
}
with open(os.path.join(temp_path, "build_urls_" + build_name + '.json'), 'w') as build_links:
json.dump(result, build_links)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
repo_path = os.getenv("REPO_COPY", os.path.abspath("../../"))
@ -104,12 +128,41 @@ if __name__ == "__main__":
logging.info("Repo copy path %s", repo_path)
gh = Github(get_best_robot_token())
s3_helper = S3Helper('https://s3.amazonaws.com')
version = get_version_from_repo(repo_path)
release_or_pr = None
if 'release' in pr_info.labels or 'release-lts' in pr_info.labels:
# for release pull requests we use branch names prefixes, not pr numbers
release_or_pr = pr_info.head_ref
elif pr_info.number == 0:
# for pushes to master - major version
release_or_pr = ".".join(version.as_tuple()[:2])
else:
# PR number for anything else
release_or_pr = str(pr_info.number)
s3_path_prefix = "/".join((release_or_pr, pr_info.sha, build_name))
# If this is rerun, then we try to find already created artifacts and just
# put them as github actions artifcat (result)
build_results = get_build_results_if_exists(s3_helper, s3_path_prefix)
if build_results is not None and len(build_results) > 0:
logging.info("Some build results found %s", build_results)
build_urls = []
log_url = ''
for url in build_results:
if 'build_log.log' in url:
log_url = 'https://s3.amazonaws.com/clickhouse-builds/' + url.replace('+', '%2B').replace(' ', '%20')
else:
build_urls.append('https://s3.amazonaws.com/clickhouse-builds/' + url.replace('+', '%2B').replace(' ', '%20'))
create_json_artifact(temp_path, build_name, log_url, build_urls, build_config, 0, True)
sys.exit(0)
image_name = get_image_name(build_config)
docker_image = get_image_with_version(os.getenv("IMAGES_PATH"), image_name)
image_version = docker_image.version
version = get_version_from_repo(repo_path)
logging.info("Got version from repo %s", version.get_version_string())
version_type = 'testing'
@ -121,14 +174,12 @@ if __name__ == "__main__":
logging.info("Updated local files with version")
logging.info("Build short name %s", build_name)
subprocess.check_call(f"echo 'BUILD_NAME=build_urls_{build_name}' >> $GITHUB_ENV", shell=True)
build_output_path = os.path.join(temp_path, build_name)
if not os.path.exists(build_output_path):
os.makedirs(build_output_path)
ccache_path = os.path.join(caches_path, build_name + '_ccache')
s3_helper = S3Helper('https://s3.amazonaws.com')
logging.info("Will try to fetch cache for our build")
get_ccache_if_not_exists(ccache_path, s3_helper, pr_info.number, temp_path)
@ -155,19 +206,6 @@ if __name__ == "__main__":
logging.info("Will upload cache")
upload_ccache(ccache_path, s3_helper, pr_info.number, temp_path)
release_or_pr = None
if 'release' in pr_info.labels or 'release-lts' in pr_info.labels:
# for release pull requests we use branch names prefixes, not pr numbers
release_or_pr = pr_info.head_ref
elif pr_info.number == 0:
# for pushes to master - major version
release_or_pr = ".".join(version.as_tuple()[:2])
else:
# PR number for anything else
release_or_pr = str(pr_info.number)
s3_path_prefix = "/".join((release_or_pr, pr_info.sha, build_name))
if os.path.exists(log_path):
log_url = s3_helper.upload_build_file_to_s3(log_path, s3_path_prefix + "/" + os.path.basename(log_path))
logging.info("Log url %s", log_url)
@ -179,19 +217,9 @@ if __name__ == "__main__":
print("::notice ::Build URLs: {}".format('\n'.join(build_urls)))
result = {
"log_url": log_url,
"build_urls": build_urls,
"build_config": build_config,
"elapsed_seconds": elapsed,
"status": success,
}
print("::notice ::Log URL: {}".format(log_url))
with open(os.path.join(temp_path, "build_urls_" + build_name + '.json'), 'w') as build_links:
json.dump(result, build_links)
create_json_artifact(temp_path, build_name, log_url, build_urls, build_config, elapsed, success)
# Fail build job if not successeded
if not success:
sys.exit(1)

View File

@ -11,6 +11,7 @@ from get_robot_token import get_best_robot_token
from pr_info import PRInfo, get_event
from commit_status_helper import get_commit
from ci_config import CI_CONFIG
from rerun_helper import RerunHelper
class BuildResult():
def __init__(self, compiler, build_type, sanitizer, bundled, splitted, status, elapsed_seconds, with_coverage):
@ -83,6 +84,13 @@ if __name__ == "__main__":
build_check_name = sys.argv[1]
gh = Github(get_best_robot_token())
pr_info = PRInfo(get_event())
rerun_helper = RerunHelper(gh, pr_info, build_check_name)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
reports_order = CI_CONFIG["builds_report_config"][build_check_name]
logging.info("My reports list %s", reports_order)
@ -114,7 +122,6 @@ if __name__ == "__main__":
logging.info("Totally got %s results", len(build_results))
gh = Github(get_best_robot_token())
s3_helper = S3Helper('https://s3.amazonaws.com')
pr_info = PRInfo(get_event())
@ -161,7 +168,7 @@ if __name__ == "__main__":
ok_builds += 1
if ok_builds == 0:
summary_status = "failure"
summary_status = "error"
description = "{}/{} builds are OK".format(ok_builds, total_builds)

View File

@ -4,6 +4,7 @@ from distutils.version import StrictVersion
import logging
import os
import subprocess
import sys
from github import Github
@ -16,6 +17,7 @@ from docker_pull_helper import get_images_with_versions
from commit_status_helper import post_commit_status
from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
IMAGE_UBUNTU = "clickhouse/test-old-ubuntu"
IMAGE_CENTOS = "clickhouse/test-old-centos"
@ -109,6 +111,11 @@ if __name__ == "__main__":
gh = Github(get_best_robot_token())
rerun_helper = RerunHelper(gh, pr_info, CHECK_NAME)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_images = get_images_with_versions(reports_path, [IMAGE_CENTOS, IMAGE_UBUNTU])
packages_path = os.path.join(temp_path, "packages")

View File

@ -12,6 +12,7 @@ from docker_pull_helper import get_image_with_version
from commit_status_helper import post_commit_status, get_commit
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
NAME = "Docs Check (actions)"
@ -27,6 +28,12 @@ if __name__ == "__main__":
pr_info = PRInfo(get_event(), need_changed_files=True)
gh = Github(get_best_robot_token())
rerun_helper = RerunHelper(gh, pr_info, NAME)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
if not pr_info.has_changes_in_documentation():
logging.info ("No changes in documentation")
commit = get_commit(gh, pr_info.sha)

View File

@ -15,6 +15,7 @@ from docker_pull_helper import get_image_with_version
from commit_status_helper import post_commit_status
from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
NAME = 'Fast test (actions)'
@ -67,6 +68,11 @@ if __name__ == "__main__":
gh = Github(get_best_robot_token())
rerun_helper = RerunHelper(gh, pr_info, NAME)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_image = get_image_with_version(temp_path, 'clickhouse/fasttest')
s3_helper = S3Helper('https://s3.amazonaws.com')

View File

@ -17,6 +17,7 @@ from docker_pull_helper import get_image_with_version
from commit_status_helper import post_commit_status, get_commit
from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
def get_additional_envs(check_name):
if 'DatabaseReplicated' in check_name:
@ -116,12 +117,18 @@ if __name__ == "__main__":
check_name = sys.argv[1]
kill_timeout = int(sys.argv[2])
flaky_check = 'flaky' in check_name.lower()
gh = Github(get_best_robot_token())
pr_info = PRInfo(get_event(), need_changed_files=flaky_check)
rerun_helper = RerunHelper(gh, pr_info, check_name)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
if not os.path.exists(temp_path):
os.makedirs(temp_path)
gh = Github(get_best_robot_token())
pr_info = PRInfo(get_event(), need_changed_files=flaky_check)
tests_to_run = []
if flaky_check:
tests_to_run = get_tests_to_run(pr_info)
@ -130,7 +137,6 @@ if __name__ == "__main__":
commit.create_status(context=check_name, description='Not found changed stateless tests', state='success')
sys.exit(0)
image_name = get_image_name(check_name)
docker_image = get_image_with_version(reports_path, image_name)

View File

@ -18,6 +18,7 @@ from docker_pull_helper import get_images_with_versions
from commit_status_helper import post_commit_status
from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
DOWNLOAD_RETRIES_COUNT = 5
@ -113,6 +114,11 @@ if __name__ == "__main__":
gh = Github(get_best_robot_token())
rerun_helper = RerunHelper(gh, pr_info, check_name)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
images = get_images_with_versions(temp_path, IMAGES)
images_with_versions = {i.name: i.version for i in images}
result_path = os.path.join(temp_path, "output_dir")

View File

@ -7,6 +7,30 @@ import sys
import json
import time
from collections import namedtuple
import boto3
def get_dead_runners_in_ec2(runners):
ids = {runner.name: runner for runner in runners if runner.offline == True and runner.busy == False}
if not ids:
return []
client = boto3.client('ec2')
print("Checking ids", list(ids.keys()))
instances_statuses = client.describe_instance_status(InstanceIds=list(ids.keys()))
found_instances = set([])
print("Response", instances_statuses)
for instance_status in instances_statuses['InstanceStatuses']:
if instance_status['InstanceState']['Name'] in ('pending', 'running'):
found_instances.add(instance_status['InstanceId'])
print("Found instances", found_instances)
result_to_delete = []
for instance_id, runner in ids.items():
if instance_id not in found_instances:
print("Instance", instance_id, "is not alive, going to remove it")
result_to_delete.append(runner)
return result_to_delete
def get_key_and_app_from_aws():
import boto3
@ -23,7 +47,7 @@ def get_key_and_app_from_aws():
def handler(event, context):
private_key, app_id = get_key_and_app_from_aws()
main(private_key, app_id, True, False)
main(private_key, app_id, True, True)
def get_installation_id(jwt_token):
headers = {
@ -74,6 +98,7 @@ def list_runners(access_token):
desc = RunnerDescription(id=runner['id'], name=runner['name'], tags=tags,
offline=runner['status']=='offline', busy=runner['busy'])
result.append(desc)
return result
def group_runners_by_tag(listed_runners):
@ -95,10 +120,9 @@ def group_runners_by_tag(listed_runners):
def push_metrics_to_cloudwatch(listed_runners, namespace):
import boto3
client = boto3.client('cloudwatch')
metrics_data = []
busy_runners = sum(1 for runner in listed_runners if runner.busy)
busy_runners = sum(1 for runner in listed_runners if runner.busy and not runner.offline)
metrics_data.append({
'MetricName': 'BusyRunners',
'Value': busy_runners,
@ -154,6 +178,7 @@ def main(github_secret_key, github_app_id, push_to_cloudwatch, delete_offline_ru
grouped_runners = group_runners_by_tag(runners)
for group, group_runners in grouped_runners.items():
if push_to_cloudwatch:
print(group)
push_metrics_to_cloudwatch(group_runners, 'RunnersMetrics/' + group)
else:
print(group, f"({len(group_runners)})")
@ -162,12 +187,10 @@ def main(github_secret_key, github_app_id, push_to_cloudwatch, delete_offline_ru
if delete_offline_runners:
print("Going to delete offline runners")
for runner in runners:
if runner.offline and not runner.busy:
print("Deleting runner", runner)
delete_runner(access_token, runner)
dead_runners = get_dead_runners_in_ec2(runners)
for runner in dead_runners:
print("Deleting runner", runner)
delete_runner(access_token, runner)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Get list of runners and their states')

View File

@ -15,6 +15,7 @@ from upload_result_helper import upload_results
from commit_status_helper import get_commit
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
NAME = 'PVS Studio (actions)'
LICENCE_NAME = 'Free license: ClickHouse, Yandex'
@ -49,6 +50,10 @@ if __name__ == "__main__":
logging.info("Repo copy path %s", repo_path)
gh = Github(get_best_robot_token())
rerun_helper = RerunHelper(gh, pr_info, NAME)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
images_path = os.path.join(temp_path, 'changed_images.json')
docker_image = 'clickhouse/pvs-test'

35
tests/ci/rerun_helper.py Normal file
View File

@ -0,0 +1,35 @@
#!/usr/bin/env python3
from commit_status_helper import get_commit
def _filter_statuses(statuses):
"""
Squash statuses to latest state
1. context="first", state="success", update_time=1
2. context="second", state="success", update_time=2
3. context="first", stat="failure", update_time=3
=========>
1. context="second", state="success"
2. context="first", stat="failure"
"""
filt = {}
for status in sorted(statuses, key=lambda x: x.updated_at):
filt[status.context] = status
return filt.values()
class RerunHelper:
def __init__(self, gh, pr_info, check_name):
self.gh = gh
self.pr_info = pr_info
self.check_name = check_name
self.pygh_commit = get_commit(gh, self.pr_info.sha)
self.statuses = _filter_statuses(self.pygh_commit.get_statuses())
def is_already_finished_by_status(self):
# currently we agree even for failed statuses
for status in self.statuses:
if self.check_name in status.context and status.state in ('success', 'failure'):
return True
return False

View File

@ -3,6 +3,7 @@
import os
import logging
import subprocess
import sys
from github import Github
@ -15,6 +16,7 @@ from docker_pull_helper import get_image_with_version
from commit_status_helper import post_commit_status
from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
DOCKER_IMAGE = "clickhouse/split-build-smoke-test"
@ -66,6 +68,11 @@ if __name__ == "__main__":
gh = Github(get_best_robot_token())
rerun_helper = RerunHelper(gh, pr_info, CHECK_NAME)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
for root, _, files in os.walk(reports_path):
for f in files:
if f == 'changed_images.json':

View File

@ -17,6 +17,7 @@ from docker_pull_helper import get_image_with_version
from commit_status_helper import post_commit_status
from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
def get_run_command(build_path, result_folder, server_log_folder, image):
@ -80,6 +81,11 @@ if __name__ == "__main__":
gh = Github(get_best_robot_token())
rerun_helper = RerunHelper(gh, pr_info, check_name)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_image = get_image_with_version(reports_path, 'clickhouse/stress-test')
packages_path = os.path.join(temp_path, "packages")

View File

@ -3,6 +3,8 @@ import logging
import subprocess
import os
import csv
import sys
from github import Github
from s3_helper import S3Helper
from pr_info import PRInfo, get_event
@ -12,7 +14,7 @@ from docker_pull_helper import get_image_with_version
from commit_status_helper import post_commit_status
from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
NAME = "Style Check (actions)"
@ -56,11 +58,16 @@ if __name__ == "__main__":
pr_info = PRInfo(get_event())
gh = Github(get_best_robot_token())
rerun_helper = RerunHelper(gh, pr_info, NAME)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
if not os.path.exists(temp_path):
os.makedirs(temp_path)
gh = Github(get_best_robot_token())
docker_image = get_image_with_version(temp_path, 'clickhouse/style-test')
s3_helper = S3Helper('https://s3.amazonaws.com')

View File

@ -139,7 +139,7 @@ def delete_runner(access_token, runner):
response = requests.delete(f"https://api.github.com/orgs/ClickHouse/actions/runners/{runner.id}", headers=headers)
response.raise_for_status()
print(f"Response code deleting {runner.name} is {response.status_code}")
print(f"Response code deleting {runner.name} with id {runner.id} is {response.status_code}")
return response.status_code == 204
@ -197,7 +197,7 @@ def main(github_secret_key, github_app_id, event):
print("Going to delete runners:", ', '.join([runner.name for runner in to_delete_runners]))
for runner in to_delete_runners:
if delete_runner(access_token, runner):
print(f"Runner {runner.name} successfuly deleted from github")
print(f"Runner with name {runner.name} and id {runner.id} successfuly deleted from github")
instances_to_kill.append(runner.name)
else:
print(f"Cannot delete {runner.name} from github")

View File

@ -16,6 +16,7 @@ from docker_pull_helper import get_image_with_version
from commit_status_helper import post_commit_status
from clickhouse_helper import ClickHouseHelper, mark_flaky_tests, prepare_tests_results_for_clickhouse
from stopwatch import Stopwatch
from rerun_helper import RerunHelper
IMAGE_NAME = 'clickhouse/unit-test'
@ -105,6 +106,11 @@ if __name__ == "__main__":
gh = Github(get_best_robot_token())
rerun_helper = RerunHelper(gh, pr_info, check_name)
if rerun_helper.is_already_finished_by_status():
logging.info("Check is already finished according to github status, exiting")
sys.exit(0)
docker_image = get_image_with_version(reports_path, IMAGE_NAME)
download_unit_tests(check_name, reports_path, temp_path)

View File

@ -0,0 +1 @@
#!/usr/bin/env python3

View File

@ -0,0 +1,22 @@
<clickhouse>
<keeper_server>
<tcp_port>9181</tcp_port>
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<operation_timeout_ms>5000</operation_timeout_ms>
<session_timeout_ms>10000</session_timeout_ms>
<raft_logs_level>trace</raft_logs_level>
</coordination_settings>
<raft_configuration>
<server>
<id>1</id>
<hostname>node1</hostname>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>
</clickhouse>

View File

@ -0,0 +1,119 @@
#!/usr/bin/env python3
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', main_configs=['configs/enable_keeper1.xml'], stay_alive=True)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
DUPLICATE_ID_CONFIG = """
<clickhouse>
<keeper_server>
<tcp_port>9181</tcp_port>
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<operation_timeout_ms>5000</operation_timeout_ms>
<session_timeout_ms>10000</session_timeout_ms>
<raft_logs_level>trace</raft_logs_level>
</coordination_settings>
<raft_configuration>
<server>
<id>1</id>
<hostname>node1</hostname>
<port>9234</port>
</server>
<server>
<id>1</id>
<hostname>node2</hostname>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>
</clickhouse>
"""
DUPLICATE_ENDPOINT_CONFIG = """
<clickhouse>
<keeper_server>
<tcp_port>9181</tcp_port>
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<operation_timeout_ms>5000</operation_timeout_ms>
<session_timeout_ms>10000</session_timeout_ms>
<raft_logs_level>trace</raft_logs_level>
</coordination_settings>
<raft_configuration>
<server>
<id>1</id>
<hostname>node1</hostname>
<port>9234</port>
</server>
<server>
<id>2</id>
<hostname>node1</hostname>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>
</clickhouse>
"""
NORMAL_CONFIG = """
<clickhouse>
<keeper_server>
<tcp_port>9181</tcp_port>
<server_id>1</server_id>
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
<coordination_settings>
<operation_timeout_ms>5000</operation_timeout_ms>
<session_timeout_ms>10000</session_timeout_ms>
<raft_logs_level>trace</raft_logs_level>
</coordination_settings>
<raft_configuration>
<server>
<id>1</id>
<hostname>node1</hostname>
<port>9234</port>
</server>
</raft_configuration>
</keeper_server>
</clickhouse>
"""
def test_duplicate_endpoint(started_cluster):
node1.stop_clickhouse()
node1.replace_config("/etc/clickhouse-server/config.d/enable_keeper1.xml", DUPLICATE_ENDPOINT_CONFIG)
with pytest.raises(Exception):
node1.start_clickhouse(start_wait_sec=10)
node1.replace_config("/etc/clickhouse-server/config.d/enable_keeper1.xml", DUPLICATE_ID_CONFIG)
with pytest.raises(Exception):
node1.start_clickhouse(start_wait_sec=10)
node1.replace_config("/etc/clickhouse-server/config.d/enable_keeper1.xml", NORMAL_CONFIG)
node1.start_clickhouse()
assert node1.query("SELECT 1") == "1\n"

View File

@ -238,6 +238,8 @@ def create_table_like_with_materialize_mysql_database(clickhouse_node, mysql_nod
clickhouse_node.query(
f"CREATE DATABASE create_like ENGINE = MaterializeMySQL('{service_name}:3306', 'create_like', 'root', 'clickhouse')")
mysql_node.query("CREATE TABLE create_like.t2 LIKE create_like.t1")
check_query(clickhouse_node, "SHOW TABLES FROM create_like", "t1\nt2\n")
mysql_node.query("USE create_like")
mysql_node.query("CREATE TABLE t3 LIKE create_like2.t1")
mysql_node.query("CREATE TABLE t4 LIKE t1")

View File

@ -1,6 +1,6 @@
---
title: 'ClickHouse v21.11 Released'
image: 'https://blog-images.clickhouse.com/en/2021/clickhouse-v21-11/featured.jpg'
image: 'https://blog-images.clickhouse.com/en/2021/clickhouse-v21-11/featured-dog.jpg'
date: '2021-11-11'
author: '[Rich Raposa](https://github.com/rfraposa), [Alexey Milovidov](https://github.com/alexey-milovidov)'
tags: ['company', 'community']