Merge branch 'master' into disable-zero-copy-replication

This commit is contained in:
Alexey Milovidov 2022-08-14 10:40:16 +03:00 committed by GitHub
commit bada9ebefa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 41 additions and 23 deletions

View File

@ -1,4 +1,4 @@
[![ClickHouse — open source distributed column-oriented DBMS](https://github.com/ClickHouse/ClickHouse/raw/master/website/images/logo-400x240.png)](https://clickhouse.com)
[![ClickHouse — open source distributed column-oriented DBMS](https://github.com/ClickHouse/clickhouse-presentations/raw/master/images/logo-400x240.png)](https://clickhouse.com)
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real-time.

View File

@ -18,6 +18,14 @@ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} --gcc-toolchain=${TOOLCHAIN_PATH}")
set (CMAKE_EXE_LINKER_FLAGS_INIT "-fuse-ld=bfd")
set (CMAKE_SHARED_LINKER_FLAGS_INIT "-fuse-ld=bfd")
# Currently, lld does not work with the error:
# ld.lld: error: section size decrease is too large
# But GNU BinUtils work.
set (LINKER_NAME "riscv64-linux-gnu-ld.bfd" CACHE STRING "Linker name" FORCE)
set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)

View File

@ -41,6 +41,9 @@ RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \
&& apt-get install gcc-11 g++-11 --yes \
&& apt-get clean
# A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work):
RUN apt-get install binutils-riscv64-linux-gnu
# Architecture of the image when BuildKit/buildx is used
ARG TARGETARCH
ARG NFPM_VERSION=2.16.0

View File

@ -65,12 +65,13 @@ public:
bool randomize_, size_t max_iterations_, double max_time_,
const String & json_path_, size_t confidence_,
const String & query_id_, const String & query_to_execute_, bool continue_on_errors_,
bool reconnect_, bool print_stacktrace_, const Settings & settings_)
bool reconnect_, bool display_client_side_time_, bool print_stacktrace_, const Settings & settings_)
:
round_robin(round_robin_), concurrency(concurrency_), delay(delay_), queue(concurrency), randomize(randomize_),
cumulative(cumulative_), max_iterations(max_iterations_), max_time(max_time_),
json_path(json_path_), confidence(confidence_), query_id(query_id_),
query_to_execute(query_to_execute_), continue_on_errors(continue_on_errors_), reconnect(reconnect_),
display_client_side_time(display_client_side_time_),
print_stacktrace(print_stacktrace_), settings(settings_),
shared_context(Context::createShared()), global_context(Context::createGlobal(shared_context.get())),
pool(concurrency)
@ -166,6 +167,7 @@ private:
String query_to_execute;
bool continue_on_errors;
bool reconnect;
bool display_client_side_time;
bool print_stacktrace;
const Settings & settings;
SharedContextHolder shared_context;
@ -408,8 +410,8 @@ private:
true /*check embedded stack trace*/) << std::endl;
size_t info_index = round_robin ? 0 : connection_index;
comparison_info_per_interval[info_index]->errors++;
comparison_info_total[info_index]->errors++;
++comparison_info_per_interval[info_index]->errors;
++comparison_info_total[info_index]->errors;
}
}
// Count failed queries toward executed, so that we'd reach
@ -443,7 +445,9 @@ private:
executor.finish();
double seconds = watch.elapsedSeconds();
double seconds = (display_client_side_time || progress.elapsed_ns == 0)
? watch.elapsedSeconds()
: progress.elapsed_ns / 1e9;
std::lock_guard lock(mutex);
@ -630,22 +634,23 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
("stage", value<std::string>()->default_value("complete"), "request query processing up to specified stage: complete,fetch_columns,with_mergeable_state,with_mergeable_state_after_aggregation,with_mergeable_state_after_aggregation_and_limit")
("iterations,i", value<size_t>()->default_value(0), "amount of queries to be executed")
("timelimit,t", value<double>()->default_value(0.), "stop launch of queries after specified time limit")
("randomize,r", value<bool>()->default_value(false), "randomize order of execution")
("randomize,r", "randomize order of execution")
("json", value<std::string>()->default_value(""), "write final report to specified file in JSON format")
("host,h", value<Strings>()->multitoken(), "list of hosts")
("port", value<Ports>()->multitoken(), "list of ports")
("roundrobin", "Instead of comparing queries for different --host/--port just pick one random --host/--port for every query and send query to it.")
("cumulative", "prints cumulative data instead of data per interval")
("secure,s", "Use TLS connection")
("roundrobin", "Instead of comparing queries for different --host/--port just pick one random --host/--port for every query and send query to it.")
("cumulative", "prints cumulative data instead of data per interval")
("secure,s", "Use TLS connection")
("user,u", value<std::string>()->default_value(env_user_str.value_or("default")), "")
("password", value<std::string>()->default_value(env_password_str.value_or("")), "")
("quota_key", value<std::string>()->default_value(env_quota_key_str.value_or("")), "")
("database", value<std::string>()->default_value("default"), "")
("stacktrace", "print stack traces of exceptions")
("confidence", value<size_t>()->default_value(5), "set the level of confidence for T-test [0=80%, 1=90%, 2=95%, 3=98%, 4=99%, 5=99.5%(default)")
("query_id", value<std::string>()->default_value(""), "")
("quota_key", value<std::string>()->default_value(env_quota_key_str.value_or("")), "")
("database", value<std::string>()->default_value("default"), "")
("stacktrace", "print stack traces of exceptions")
("confidence", value<size_t>()->default_value(5), "set the level of confidence for T-test [0=80%, 1=90%, 2=95%, 3=98%, 4=99%, 5=99.5%(default)")
("query_id", value<std::string>()->default_value(""), "")
("continue_on_errors", "continue testing even if a query fails")
("reconnect", "establish new connection for every query")
("client-side-time", "display the time including network communication instead of server-side time; note that for server versions before 22.8 we always display client-side time")
;
Settings settings;
@ -690,7 +695,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
options["password"].as<std::string>(),
options["quota_key"].as<std::string>(),
options["stage"].as<std::string>(),
options["randomize"].as<bool>(),
options.count("randomize"),
options["iterations"].as<size_t>(),
options["timelimit"].as<double>(),
options["json"].as<std::string>(),
@ -699,6 +704,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
options["query"].as<std::string>(),
options.count("continue_on_errors"),
options.count("reconnect"),
options.count("client-side-time"),
print_stacktrace,
settings);
return benchmark.run();

View File

@ -167,8 +167,8 @@ private:
validateFunctionArgumentTypes(*this, arguments,
FunctionArgumentDescriptors{
{"mode", &isStringOrFixedString<IDataType>, isColumnConst, "encryption mode string"},
{"input", &isStringOrFixedString<IDataType>, nullptr, "plaintext"},
{"key", &isStringOrFixedString<IDataType>, nullptr, "encryption key binary string"},
{"input", &isStringOrFixedString<IDataType>, {}, "plaintext"},
{"key", &isStringOrFixedString<IDataType>, {}, "encryption key binary string"},
},
optional_args
);
@ -439,8 +439,8 @@ private:
validateFunctionArgumentTypes(*this, arguments,
FunctionArgumentDescriptors{
{"mode", &isStringOrFixedString<IDataType>, isColumnConst, "decryption mode string"},
{"input", nullptr, nullptr, "ciphertext"},
{"key", &isStringOrFixedString<IDataType>, nullptr, "decryption key binary string"},
{"input", &isStringOrFixedString<IDataType>, {}, "ciphertext"},
{"key", &isStringOrFixedString<IDataType>, {}, "decryption key binary string"},
},
optional_args
);

View File

@ -33,6 +33,8 @@ def get_run_command(
"docker run --cap-add=SYS_PTRACE "
# a static link, don't use S3_URL or S3_DOWNLOAD
"-e S3_URL='https://s3.amazonaws.com/clickhouse-datasets' "
# For dmesg
"--cap-add syslog "
f"--volume={build_path}:/package_folder "
f"--volume={result_folder}:/test_output "
f"--volume={repo_tests_path}:/usr/share/clickhouse-test "

View File

@ -1759,10 +1759,7 @@ def main(args):
stop_time = time() + args.global_time_limit
if args.zookeeper is None:
try:
args.zookeeper = int(extract_key(" --key zookeeper | grep . | wc -l")) > 0
except ValueError:
args.zookeeper = False
args.zookeeper = True
if args.shard is None:
args.shard = bool(extract_key(' --key listen_host | grep -E "127.0.0.2|::"'))

View File

@ -0,0 +1,2 @@
-- Tags: no-fasttest
SELECT decrypt('aes-128-gcm', [1024, 65535, NULL, NULL, 9223372036854775807, 1048576, NULL], 'text', 'key', 'IV'); -- { serverError 43 }