mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge branch 'master' into patch-1
This commit is contained in:
commit
140075da69
@ -64,7 +64,7 @@ The supported formats are:
|
||||
| [Null](#null) | ✗ | ✔ |
|
||||
| [XML](#xml) | ✗ | ✔ |
|
||||
| [CapnProto](#capnproto) | ✔ | ✔ |
|
||||
| [LineAsString](#lineasstring) | ✔ | ✗ |
|
||||
| [LineAsString](#lineasstring) | ✔ | ✔ |
|
||||
| [Regexp](#data-format-regexp) | ✔ | ✗ |
|
||||
| [RawBLOB](#rawblob) | ✔ | ✔ |
|
||||
| [MsgPack](#msgpack) | ✔ | ✔ |
|
||||
|
@ -1,6 +1,5 @@
|
||||
set (CLICKHOUSE_CLIENT_SOURCES
|
||||
Client.cpp
|
||||
TestTags.cpp
|
||||
)
|
||||
|
||||
set (CLICKHOUSE_CLIENT_LINK
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include <Common/Config/configReadClient.h>
|
||||
|
||||
#include <Core/QueryProcessingStage.h>
|
||||
#include <Client/TestHint.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Poco/Util/Application.h>
|
||||
|
||||
@ -43,7 +42,6 @@
|
||||
#include <Functions/registerFunctions.h>
|
||||
#include <AggregateFunctions/registerAggregateFunctions.h>
|
||||
#include <Formats/registerFormats.h>
|
||||
#include "TestTags.h"
|
||||
|
||||
#ifndef __clang__
|
||||
#pragma GCC optimize("-fno-var-tracking-assignments")
|
||||
@ -102,212 +100,6 @@ void Client::processError(const String & query) const
|
||||
}
|
||||
|
||||
|
||||
bool Client::executeMultiQuery(const String & all_queries_text)
|
||||
{
|
||||
// It makes sense not to base any control flow on this, so that it is
|
||||
// the same in tests and in normal usage. The only difference is that in
|
||||
// normal mode we ignore the test hints.
|
||||
const bool test_mode = config().has("testmode");
|
||||
if (test_mode)
|
||||
{
|
||||
/// disable logs if expects errors
|
||||
TestHint test_hint(test_mode, all_queries_text);
|
||||
if (test_hint.clientError() || test_hint.serverError())
|
||||
processTextAsSingleQuery("SET send_logs_level = 'fatal'");
|
||||
}
|
||||
|
||||
bool echo_query = echo_queries;
|
||||
|
||||
/// Test tags are started with "--" so they are interpreted as comments anyway.
|
||||
/// But if the echo is enabled we have to remove the test tags from `all_queries_text`
|
||||
/// because we don't want test tags to be echoed.
|
||||
size_t test_tags_length = test_mode ? getTestTagsLength(all_queries_text) : 0;
|
||||
|
||||
/// Several queries separated by ';'.
|
||||
/// INSERT data is ended by the end of line, not ';'.
|
||||
/// An exception is VALUES format where we also support semicolon in
|
||||
/// addition to end of line.
|
||||
const char * this_query_begin = all_queries_text.data() + test_tags_length;
|
||||
const char * this_query_end;
|
||||
const char * all_queries_end = all_queries_text.data() + all_queries_text.size();
|
||||
|
||||
String full_query; // full_query is the query + inline INSERT data + trailing comments (the latter is our best guess for now).
|
||||
String query_to_execute;
|
||||
ASTPtr parsed_query;
|
||||
std::optional<Exception> current_exception;
|
||||
|
||||
while (true)
|
||||
{
|
||||
auto stage = analyzeMultiQueryText(this_query_begin, this_query_end, all_queries_end,
|
||||
query_to_execute, parsed_query, all_queries_text, current_exception);
|
||||
switch (stage)
|
||||
{
|
||||
case MultiQueryProcessingStage::QUERIES_END:
|
||||
case MultiQueryProcessingStage::PARSING_FAILED:
|
||||
{
|
||||
return true;
|
||||
}
|
||||
case MultiQueryProcessingStage::CONTINUE_PARSING:
|
||||
{
|
||||
continue;
|
||||
}
|
||||
case MultiQueryProcessingStage::PARSING_EXCEPTION:
|
||||
{
|
||||
this_query_end = find_first_symbols<'\n'>(this_query_end, all_queries_end);
|
||||
|
||||
// Try to find test hint for syntax error. We don't know where
|
||||
// the query ends because we failed to parse it, so we consume
|
||||
// the entire line.
|
||||
TestHint hint(test_mode, String(this_query_begin, this_query_end - this_query_begin));
|
||||
if (hint.serverError())
|
||||
{
|
||||
// Syntax errors are considered as client errors
|
||||
current_exception->addMessage("\nExpected server error '{}'.", hint.serverError());
|
||||
current_exception->rethrow();
|
||||
}
|
||||
|
||||
if (hint.clientError() != current_exception->code())
|
||||
{
|
||||
if (hint.clientError())
|
||||
current_exception->addMessage("\nExpected client error: " + std::to_string(hint.clientError()));
|
||||
current_exception->rethrow();
|
||||
}
|
||||
|
||||
/// It's expected syntax error, skip the line
|
||||
this_query_begin = this_query_end;
|
||||
current_exception.reset();
|
||||
|
||||
continue;
|
||||
}
|
||||
case MultiQueryProcessingStage::EXECUTE_QUERY:
|
||||
{
|
||||
full_query = all_queries_text.substr(this_query_begin - all_queries_text.data(), this_query_end - this_query_begin);
|
||||
if (query_fuzzer_runs)
|
||||
{
|
||||
if (!processWithFuzzing(full_query))
|
||||
return false;
|
||||
this_query_begin = this_query_end;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Now we know for sure where the query ends.
|
||||
// Look for the hint in the text of query + insert data + trailing
|
||||
// comments,
|
||||
// e.g. insert into t format CSV 'a' -- { serverError 123 }.
|
||||
// Use the updated query boundaries we just calculated.
|
||||
TestHint test_hint(test_mode, full_query);
|
||||
// Echo all queries if asked; makes for a more readable reference
|
||||
// file.
|
||||
echo_query = test_hint.echoQueries().value_or(echo_query);
|
||||
try
|
||||
{
|
||||
processParsedSingleQuery(full_query, query_to_execute, parsed_query, echo_query, false);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
// Surprisingly, this is a client error. A server error would
|
||||
// have been reported w/o throwing (see onReceiveSeverException()).
|
||||
client_exception = std::make_unique<Exception>(getCurrentExceptionMessage(print_stack_trace), getCurrentExceptionCode());
|
||||
have_error = true;
|
||||
}
|
||||
// Check whether the error (or its absence) matches the test hints
|
||||
// (or their absence).
|
||||
bool error_matches_hint = true;
|
||||
if (have_error)
|
||||
{
|
||||
if (test_hint.serverError())
|
||||
{
|
||||
if (!server_exception)
|
||||
{
|
||||
error_matches_hint = false;
|
||||
fmt::print(stderr, "Expected server error code '{}' but got no server error (query: {}).\n",
|
||||
test_hint.serverError(), full_query);
|
||||
}
|
||||
else if (server_exception->code() != test_hint.serverError())
|
||||
{
|
||||
error_matches_hint = false;
|
||||
fmt::print(stderr, "Expected server error code: {} but got: {} (query: {}).\n",
|
||||
test_hint.serverError(), server_exception->code(), full_query);
|
||||
}
|
||||
}
|
||||
if (test_hint.clientError())
|
||||
{
|
||||
if (!client_exception)
|
||||
{
|
||||
error_matches_hint = false;
|
||||
fmt::print(stderr, "Expected client error code '{}' but got no client error (query: {}).\n",
|
||||
test_hint.clientError(), full_query);
|
||||
}
|
||||
else if (client_exception->code() != test_hint.clientError())
|
||||
{
|
||||
error_matches_hint = false;
|
||||
fmt::print(stderr, "Expected client error code '{}' but got '{}' (query: {}).\n",
|
||||
test_hint.clientError(), client_exception->code(), full_query);
|
||||
}
|
||||
}
|
||||
if (!test_hint.clientError() && !test_hint.serverError())
|
||||
{
|
||||
// No error was expected but it still occurred. This is the
|
||||
// default case w/o test hint, doesn't need additional
|
||||
// diagnostics.
|
||||
error_matches_hint = false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (test_hint.clientError())
|
||||
{
|
||||
fmt::print(stderr, "The query succeeded but the client error '{}' was expected (query: {}).\n",
|
||||
test_hint.clientError(), full_query);
|
||||
error_matches_hint = false;
|
||||
}
|
||||
if (test_hint.serverError())
|
||||
{
|
||||
fmt::print(stderr, "The query succeeded but the server error '{}' was expected (query: {}).\n",
|
||||
test_hint.serverError(), full_query);
|
||||
error_matches_hint = false;
|
||||
}
|
||||
}
|
||||
// If the error is expected, force reconnect and ignore it.
|
||||
if (have_error && error_matches_hint)
|
||||
{
|
||||
client_exception.reset();
|
||||
server_exception.reset();
|
||||
have_error = false;
|
||||
|
||||
if (!connection->checkConnected())
|
||||
connect();
|
||||
}
|
||||
|
||||
// For INSERTs with inline data: use the end of inline data as
|
||||
// reported by the format parser (it is saved in sendData()).
|
||||
// This allows us to handle queries like:
|
||||
// insert into t values (1); select 1
|
||||
// , where the inline data is delimited by semicolon and not by a
|
||||
// newline.
|
||||
auto * insert_ast = parsed_query->as<ASTInsertQuery>();
|
||||
if (insert_ast && isSyncInsertWithData(*insert_ast, global_context))
|
||||
{
|
||||
this_query_end = insert_ast->end;
|
||||
adjustQueryEnd(this_query_end, all_queries_end, global_context->getSettingsRef().max_parser_depth);
|
||||
}
|
||||
|
||||
// Report error.
|
||||
if (have_error)
|
||||
processError(full_query);
|
||||
|
||||
// Stop processing queries if needed.
|
||||
if (have_error && !ignore_error)
|
||||
return is_interactive;
|
||||
|
||||
this_query_begin = this_query_end;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Make query to get all server warnings
|
||||
std::vector<String> Client::loadWarningMessages()
|
||||
{
|
||||
@ -496,7 +288,7 @@ void Client::connect()
|
||||
if (hosts_and_ports.empty())
|
||||
{
|
||||
String host = config().getString("host", "localhost");
|
||||
UInt16 port = static_cast<UInt16>(ConnectionParameters::getPortFromConfig(config()));
|
||||
UInt16 port = ConnectionParameters::getPortFromConfig(config());
|
||||
hosts_and_ports.emplace_back(HostAndPort{host, port});
|
||||
}
|
||||
|
||||
@ -1015,7 +807,6 @@ void Client::addOptions(OptionsDescription & options_description)
|
||||
("password", po::value<std::string>()->implicit_value("\n", ""), "password")
|
||||
("ask-password", "ask-password")
|
||||
("quota_key", po::value<std::string>(), "A string to differentiate quotas when the user have keyed quotas configured on server")
|
||||
("testmode,T", "enable test hints in comments")
|
||||
|
||||
("max_client_network_bandwidth", po::value<int>(), "the maximum speed of data exchange over the network for the client in bytes per second.")
|
||||
("compression", po::value<bool>(), "enable or disable compression")
|
||||
@ -1058,7 +849,7 @@ void Client::addOptions(OptionsDescription & options_description)
|
||||
"Example of usage: '--host host1 --host host2 --port port2 --host host3 ...'"
|
||||
"Each '--port port' will be attached to the last seen host that doesn't have a port yet,"
|
||||
"if there is no such host, the port will be attached to the next first host or to default host.")
|
||||
("port", po::value<UInt16>()->default_value(DBMS_DEFAULT_PORT), "server ports")
|
||||
("port", po::value<UInt16>(), "server ports")
|
||||
;
|
||||
}
|
||||
|
||||
@ -1106,8 +897,11 @@ void Client::processOptions(const OptionsDescription & options_description,
|
||||
= po::command_line_parser(hosts_and_ports_argument).options(options_description.hosts_and_ports_description.value()).run();
|
||||
po::variables_map host_and_port_options;
|
||||
po::store(parsed_hosts_and_ports, host_and_port_options);
|
||||
hosts_and_ports.emplace_back(
|
||||
HostAndPort{host_and_port_options["host"].as<std::string>(), host_and_port_options["port"].as<UInt16>()});
|
||||
std::string host = host_and_port_options["host"].as<std::string>();
|
||||
std::optional<UInt16> port = !host_and_port_options["port"].empty()
|
||||
? std::make_optional(host_and_port_options["port"].as<UInt16>())
|
||||
: std::nullopt;
|
||||
hosts_and_ports.emplace_back(HostAndPort{host, port});
|
||||
}
|
||||
|
||||
send_external_tables = true;
|
||||
@ -1151,8 +945,6 @@ void Client::processOptions(const OptionsDescription & options_description,
|
||||
config().setBool("ask-password", true);
|
||||
if (options.count("quota_key"))
|
||||
config().setString("quota_key", options["quota_key"].as<std::string>());
|
||||
if (options.count("testmode"))
|
||||
config().setBool("testmode", true);
|
||||
if (options.count("max_client_network_bandwidth"))
|
||||
max_client_network_bandwidth = options["max_client_network_bandwidth"].as<int>();
|
||||
if (options.count("compression"))
|
||||
|
@ -16,20 +16,24 @@ public:
|
||||
int main(const std::vector<String> & /*args*/) override;
|
||||
|
||||
protected:
|
||||
bool executeMultiQuery(const String & all_queries_text) override;
|
||||
bool processWithFuzzing(const String & full_query) override;
|
||||
|
||||
void connect() override;
|
||||
|
||||
void processError(const String & query) const override;
|
||||
|
||||
String getName() const override { return "client"; }
|
||||
|
||||
void printHelpMessage(const OptionsDescription & options_description) override;
|
||||
|
||||
void addOptions(OptionsDescription & options_description) override;
|
||||
|
||||
void processOptions(
|
||||
const OptionsDescription & options_description,
|
||||
const CommandLineOptions & options,
|
||||
const std::vector<Arguments> & external_tables_arguments,
|
||||
const std::vector<Arguments> & hosts_and_ports_arguments) override;
|
||||
|
||||
void processConfig() override;
|
||||
|
||||
private:
|
||||
|
@ -92,92 +92,6 @@ void LocalServer::processError(const String &) const
|
||||
}
|
||||
|
||||
|
||||
bool LocalServer::executeMultiQuery(const String & all_queries_text)
|
||||
{
|
||||
bool echo_query = echo_queries;
|
||||
|
||||
/// Several queries separated by ';'.
|
||||
/// INSERT data is ended by the end of line, not ';'.
|
||||
/// An exception is VALUES format where we also support semicolon in
|
||||
/// addition to end of line.
|
||||
const char * this_query_begin = all_queries_text.data();
|
||||
const char * this_query_end;
|
||||
const char * all_queries_end = all_queries_text.data() + all_queries_text.size();
|
||||
|
||||
String full_query; // full_query is the query + inline INSERT data + trailing comments (the latter is our best guess for now).
|
||||
String query_to_execute;
|
||||
ASTPtr parsed_query;
|
||||
std::optional<Exception> current_exception;
|
||||
|
||||
while (true)
|
||||
{
|
||||
auto stage = analyzeMultiQueryText(this_query_begin, this_query_end, all_queries_end,
|
||||
query_to_execute, parsed_query, all_queries_text, current_exception);
|
||||
switch (stage)
|
||||
{
|
||||
case MultiQueryProcessingStage::QUERIES_END:
|
||||
case MultiQueryProcessingStage::PARSING_FAILED:
|
||||
{
|
||||
return true;
|
||||
}
|
||||
case MultiQueryProcessingStage::CONTINUE_PARSING:
|
||||
{
|
||||
continue;
|
||||
}
|
||||
case MultiQueryProcessingStage::PARSING_EXCEPTION:
|
||||
{
|
||||
if (current_exception)
|
||||
current_exception->rethrow();
|
||||
return true;
|
||||
}
|
||||
case MultiQueryProcessingStage::EXECUTE_QUERY:
|
||||
{
|
||||
full_query = all_queries_text.substr(this_query_begin - all_queries_text.data(), this_query_end - this_query_begin);
|
||||
|
||||
try
|
||||
{
|
||||
processParsedSingleQuery(full_query, query_to_execute, parsed_query, echo_query, false);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (!is_interactive && !ignore_error)
|
||||
throw;
|
||||
|
||||
// Surprisingly, this is a client error. A server error would
|
||||
// have been reported w/o throwing (see onReceiveSeverException()).
|
||||
client_exception = std::make_unique<Exception>(getCurrentExceptionMessage(print_stack_trace), getCurrentExceptionCode());
|
||||
have_error = true;
|
||||
}
|
||||
|
||||
// For INSERTs with inline data: use the end of inline data as
|
||||
// reported by the format parser (it is saved in sendData()).
|
||||
// This allows us to handle queries like:
|
||||
// insert into t values (1); select 1
|
||||
// , where the inline data is delimited by semicolon and not by a
|
||||
// newline.
|
||||
auto * insert_ast = parsed_query->as<ASTInsertQuery>();
|
||||
if (insert_ast && insert_ast->data)
|
||||
{
|
||||
this_query_end = insert_ast->end;
|
||||
adjustQueryEnd(this_query_end, all_queries_end, global_context->getSettingsRef().max_parser_depth);
|
||||
}
|
||||
|
||||
// Report error.
|
||||
if (have_error)
|
||||
processError(full_query);
|
||||
|
||||
// Stop processing queries if needed.
|
||||
if (have_error && !ignore_error)
|
||||
return is_interactive;
|
||||
|
||||
this_query_begin = this_query_end;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalServer::initialize(Poco::Util::Application & self)
|
||||
{
|
||||
Poco::Util::Application::initialize(self);
|
||||
|
@ -31,17 +31,19 @@ public:
|
||||
int main(const std::vector<String> & /*args*/) override;
|
||||
|
||||
protected:
|
||||
bool executeMultiQuery(const String & all_queries_text) override;
|
||||
|
||||
void connect() override;
|
||||
|
||||
void processError(const String & query) const override;
|
||||
|
||||
String getName() const override { return "local"; }
|
||||
|
||||
void printHelpMessage(const OptionsDescription & options_description) override;
|
||||
|
||||
void addOptions(OptionsDescription & options_description) override;
|
||||
|
||||
void processOptions(const OptionsDescription & options_description, const CommandLineOptions & options,
|
||||
const std::vector<Arguments> &, const std::vector<Arguments> &) override;
|
||||
|
||||
void processConfig() override;
|
||||
|
||||
private:
|
||||
|
@ -575,8 +575,8 @@
|
||||
const formatted_rows = formatReadableRows(rows);
|
||||
stats.innerText = `Elapsed: ${seconds} sec, read ${formatted_rows} rows, ${formatted_bytes}.`;
|
||||
|
||||
/// We can also render graphs if user performed EXPLAIN PIPELINE graph=1.
|
||||
if (response.data.length > 3 && response.data[0][0] === "digraph" && document.getElementById('query').value.match(/^\s*EXPLAIN/i)) {
|
||||
/// We can also render graphs if user performed EXPLAIN PIPELINE graph=1 or EXPLAIN AST graph = 1
|
||||
if (response.data.length > 3 && response.data[0][0].startsWith("digraph") && document.getElementById('query').value.match(/^\s*EXPLAIN/i)) {
|
||||
renderGraph(response);
|
||||
} else {
|
||||
renderTable(response);
|
||||
|
@ -36,6 +36,8 @@
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
|
||||
#include <Client/ClientBaseHelpers.h>
|
||||
#include <Client/TestHint.h>
|
||||
#include "TestTags.h"
|
||||
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/ParserQuery.h>
|
||||
@ -1483,6 +1485,219 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText(
|
||||
}
|
||||
|
||||
|
||||
bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
||||
{
|
||||
// It makes sense not to base any control flow on this, so that it is
|
||||
// the same in tests and in normal usage. The only difference is that in
|
||||
// normal mode we ignore the test hints.
|
||||
const bool test_mode = config().has("testmode");
|
||||
if (test_mode)
|
||||
{
|
||||
/// disable logs if expects errors
|
||||
TestHint test_hint(test_mode, all_queries_text);
|
||||
if (test_hint.clientError() || test_hint.serverError())
|
||||
processTextAsSingleQuery("SET send_logs_level = 'fatal'");
|
||||
}
|
||||
|
||||
bool echo_query = echo_queries;
|
||||
|
||||
/// Test tags are started with "--" so they are interpreted as comments anyway.
|
||||
/// But if the echo is enabled we have to remove the test tags from `all_queries_text`
|
||||
/// because we don't want test tags to be echoed.
|
||||
size_t test_tags_length = test_mode ? getTestTagsLength(all_queries_text) : 0;
|
||||
|
||||
/// Several queries separated by ';'.
|
||||
/// INSERT data is ended by the end of line, not ';'.
|
||||
/// An exception is VALUES format where we also support semicolon in
|
||||
/// addition to end of line.
|
||||
const char * this_query_begin = all_queries_text.data() + test_tags_length;
|
||||
const char * this_query_end;
|
||||
const char * all_queries_end = all_queries_text.data() + all_queries_text.size();
|
||||
|
||||
String full_query; // full_query is the query + inline INSERT data + trailing comments (the latter is our best guess for now).
|
||||
String query_to_execute;
|
||||
ASTPtr parsed_query;
|
||||
std::optional<Exception> current_exception;
|
||||
|
||||
while (true)
|
||||
{
|
||||
auto stage = analyzeMultiQueryText(this_query_begin, this_query_end, all_queries_end,
|
||||
query_to_execute, parsed_query, all_queries_text, current_exception);
|
||||
switch (stage)
|
||||
{
|
||||
case MultiQueryProcessingStage::QUERIES_END:
|
||||
case MultiQueryProcessingStage::PARSING_FAILED:
|
||||
{
|
||||
return true;
|
||||
}
|
||||
case MultiQueryProcessingStage::CONTINUE_PARSING:
|
||||
{
|
||||
continue;
|
||||
}
|
||||
case MultiQueryProcessingStage::PARSING_EXCEPTION:
|
||||
{
|
||||
this_query_end = find_first_symbols<'\n'>(this_query_end, all_queries_end);
|
||||
|
||||
// Try to find test hint for syntax error. We don't know where
|
||||
// the query ends because we failed to parse it, so we consume
|
||||
// the entire line.
|
||||
TestHint hint(test_mode, String(this_query_begin, this_query_end - this_query_begin));
|
||||
if (hint.serverError())
|
||||
{
|
||||
// Syntax errors are considered as client errors
|
||||
current_exception->addMessage("\nExpected server error '{}'.", hint.serverError());
|
||||
current_exception->rethrow();
|
||||
}
|
||||
|
||||
if (hint.clientError() != current_exception->code())
|
||||
{
|
||||
if (hint.clientError())
|
||||
current_exception->addMessage("\nExpected client error: " + std::to_string(hint.clientError()));
|
||||
|
||||
current_exception->rethrow();
|
||||
}
|
||||
|
||||
/// It's expected syntax error, skip the line
|
||||
this_query_begin = this_query_end;
|
||||
current_exception.reset();
|
||||
|
||||
continue;
|
||||
}
|
||||
case MultiQueryProcessingStage::EXECUTE_QUERY:
|
||||
{
|
||||
full_query = all_queries_text.substr(this_query_begin - all_queries_text.data(), this_query_end - this_query_begin);
|
||||
if (query_fuzzer_runs)
|
||||
{
|
||||
if (!processWithFuzzing(full_query))
|
||||
return false;
|
||||
|
||||
this_query_begin = this_query_end;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Now we know for sure where the query ends.
|
||||
// Look for the hint in the text of query + insert data + trailing
|
||||
// comments, e.g. insert into t format CSV 'a' -- { serverError 123 }.
|
||||
// Use the updated query boundaries we just calculated.
|
||||
TestHint test_hint(test_mode, full_query);
|
||||
|
||||
// Echo all queries if asked; makes for a more readable reference file.
|
||||
echo_query = test_hint.echoQueries().value_or(echo_query);
|
||||
|
||||
try
|
||||
{
|
||||
processParsedSingleQuery(full_query, query_to_execute, parsed_query, echo_query, false);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
// Surprisingly, this is a client error. A server error would
|
||||
// have been reported w/o throwing (see onReceiveSeverException()).
|
||||
client_exception = std::make_unique<Exception>(getCurrentExceptionMessage(print_stack_trace), getCurrentExceptionCode());
|
||||
have_error = true;
|
||||
}
|
||||
|
||||
// Check whether the error (or its absence) matches the test hints
|
||||
// (or their absence).
|
||||
bool error_matches_hint = true;
|
||||
if (have_error)
|
||||
{
|
||||
if (test_hint.serverError())
|
||||
{
|
||||
if (!server_exception)
|
||||
{
|
||||
error_matches_hint = false;
|
||||
fmt::print(stderr, "Expected server error code '{}' but got no server error (query: {}).\n",
|
||||
test_hint.serverError(), full_query);
|
||||
}
|
||||
else if (server_exception->code() != test_hint.serverError())
|
||||
{
|
||||
error_matches_hint = false;
|
||||
fmt::print(stderr, "Expected server error code: {} but got: {} (query: {}).\n",
|
||||
test_hint.serverError(), server_exception->code(), full_query);
|
||||
}
|
||||
}
|
||||
if (test_hint.clientError())
|
||||
{
|
||||
if (!client_exception)
|
||||
{
|
||||
error_matches_hint = false;
|
||||
fmt::print(stderr, "Expected client error code '{}' but got no client error (query: {}).\n",
|
||||
test_hint.clientError(), full_query);
|
||||
}
|
||||
else if (client_exception->code() != test_hint.clientError())
|
||||
{
|
||||
error_matches_hint = false;
|
||||
fmt::print(stderr, "Expected client error code '{}' but got '{}' (query: {}).\n",
|
||||
test_hint.clientError(), client_exception->code(), full_query);
|
||||
}
|
||||
}
|
||||
if (!test_hint.clientError() && !test_hint.serverError())
|
||||
{
|
||||
// No error was expected but it still occurred. This is the
|
||||
// default case w/o test hint, doesn't need additional
|
||||
// diagnostics.
|
||||
error_matches_hint = false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (test_hint.clientError())
|
||||
{
|
||||
error_matches_hint = false;
|
||||
fmt::print(stderr,
|
||||
"The query succeeded but the client error '{}' was expected (query: {}).\n",
|
||||
test_hint.clientError(), full_query);
|
||||
}
|
||||
if (test_hint.serverError())
|
||||
{
|
||||
error_matches_hint = false;
|
||||
fmt::print(stderr,
|
||||
"The query succeeded but the server error '{}' was expected (query: {}).\n",
|
||||
test_hint.serverError(), full_query);
|
||||
}
|
||||
}
|
||||
|
||||
// If the error is expected, force reconnect and ignore it.
|
||||
if (have_error && error_matches_hint)
|
||||
{
|
||||
client_exception.reset();
|
||||
server_exception.reset();
|
||||
|
||||
have_error = false;
|
||||
|
||||
if (!connection->checkConnected())
|
||||
connect();
|
||||
}
|
||||
|
||||
// For INSERTs with inline data: use the end of inline data as
|
||||
// reported by the format parser (it is saved in sendData()).
|
||||
// This allows us to handle queries like:
|
||||
// insert into t values (1); select 1
|
||||
// , where the inline data is delimited by semicolon and not by a
|
||||
// newline.
|
||||
auto * insert_ast = parsed_query->as<ASTInsertQuery>();
|
||||
if (insert_ast && isSyncInsertWithData(*insert_ast, global_context))
|
||||
{
|
||||
this_query_end = insert_ast->end;
|
||||
adjustQueryEnd(this_query_end, all_queries_end, global_context->getSettingsRef().max_parser_depth);
|
||||
}
|
||||
|
||||
// Report error.
|
||||
if (have_error)
|
||||
processError(full_query);
|
||||
|
||||
// Stop processing queries if needed.
|
||||
if (have_error && !ignore_error)
|
||||
return is_interactive;
|
||||
|
||||
this_query_begin = this_query_end;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool ClientBase::processQueryText(const String & text)
|
||||
{
|
||||
if (exit_strings.end() != exit_strings.find(trim(text, [](char c) { return isWhitespaceASCII(c) || c == ';'; })))
|
||||
@ -1967,6 +2182,8 @@ void ClientBase::init(int argc, char ** argv)
|
||||
("suggestion_limit", po::value<int>()->default_value(10000),
|
||||
"Suggestion limit for how many databases, tables and columns to fetch.")
|
||||
|
||||
("testmode,T", "enable test hints in comments")
|
||||
|
||||
("format,f", po::value<std::string>(), "default output format")
|
||||
("vertical,E", "vertical output format, same as --format=Vertical or FORMAT Vertical or \\G at end of command")
|
||||
("highlight", po::value<bool>()->default_value(true), "enable or disable basic syntax highlight in interactive command line")
|
||||
@ -2072,6 +2289,8 @@ void ClientBase::init(int argc, char ** argv)
|
||||
config().setBool("interactive", true);
|
||||
if (options.count("pager"))
|
||||
config().setString("pager", options["pager"].as<std::string>());
|
||||
if (options.count("testmode"))
|
||||
config().setBool("testmode", true);
|
||||
|
||||
if (options.count("log-level"))
|
||||
Poco::Logger::root().setLevel(options["log-level"].as<std::string>());
|
||||
|
@ -61,7 +61,6 @@ protected:
|
||||
throw Exception("Query processing with fuzzing is not implemented", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
virtual bool executeMultiQuery(const String & all_queries_text) = 0;
|
||||
virtual void connect() = 0;
|
||||
virtual void processError(const String & query) const = 0;
|
||||
virtual String getName() const = 0;
|
||||
@ -77,6 +76,7 @@ protected:
|
||||
ASTPtr parseQuery(const char *& pos, const char * end, bool allow_multi_statements) const;
|
||||
static void setupSignalHandler();
|
||||
|
||||
bool executeMultiQuery(const String & all_queries_text);
|
||||
MultiQueryProcessingStage analyzeMultiQueryText(
|
||||
const char *& this_query_begin, const char *& this_query_end, const char * all_queries_end,
|
||||
String & query_to_execute, ASTPtr & parsed_query, const String & all_queries_text,
|
||||
@ -255,7 +255,7 @@ protected:
|
||||
struct HostAndPort
|
||||
{
|
||||
String host;
|
||||
UInt16 port;
|
||||
std::optional<UInt16> port;
|
||||
};
|
||||
|
||||
std::vector<HostAndPort> hosts_and_ports{};
|
||||
|
@ -25,7 +25,9 @@ namespace ErrorCodes
|
||||
|
||||
ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfiguration & config,
|
||||
std::string connection_host,
|
||||
int connection_port) : host(connection_host), port(connection_port)
|
||||
std::optional<UInt16> connection_port)
|
||||
: host(connection_host)
|
||||
, port(connection_port.value_or(getPortFromConfig(config)))
|
||||
{
|
||||
bool is_secure = config.getBool("secure", false);
|
||||
security = is_secure ? Protocol::Secure::Enable : Protocol::Secure::Disable;
|
||||
@ -73,7 +75,7 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
|
||||
{
|
||||
}
|
||||
|
||||
int ConnectionParameters::getPortFromConfig(const Poco::Util::AbstractConfiguration & config)
|
||||
UInt16 ConnectionParameters::getPortFromConfig(const Poco::Util::AbstractConfiguration & config)
|
||||
{
|
||||
bool is_secure = config.getBool("secure", false);
|
||||
return config.getInt("port",
|
||||
|
@ -23,10 +23,10 @@ struct ConnectionParameters
|
||||
ConnectionTimeouts timeouts;
|
||||
|
||||
ConnectionParameters() = default;
|
||||
explicit ConnectionParameters(const Poco::Util::AbstractConfiguration & config);
|
||||
ConnectionParameters(const Poco::Util::AbstractConfiguration & config, std::string host, int port);
|
||||
ConnectionParameters(const Poco::Util::AbstractConfiguration & config);
|
||||
ConnectionParameters(const Poco::Util::AbstractConfiguration & config, std::string host, std::optional<UInt16> port);
|
||||
|
||||
static int getPortFromConfig(const Poco::Util::AbstractConfiguration & config);
|
||||
static UInt16 getPortFromConfig(const Poco::Util::AbstractConfiguration & config);
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -167,6 +167,24 @@ SeekableReadBufferPtr CachedReadBufferFromRemoteFS::getReadBufferForFileSegment(
|
||||
}
|
||||
case FileSegment::State::DOWNLOADING:
|
||||
{
|
||||
size_t download_offset = file_segment->getDownloadOffset();
|
||||
bool can_start_from_cache = download_offset > file_offset_of_buffer_end;
|
||||
|
||||
/// If file segment is being downloaded but we can already read from already downloaded part, do that.
|
||||
if (can_start_from_cache)
|
||||
{
|
||||
/// segment{k} state: DOWNLOADING
|
||||
/// cache: [______|___________
|
||||
/// ^
|
||||
/// download_offset (in progress)
|
||||
/// requested_range: [__________]
|
||||
/// ^
|
||||
/// file_offset_of_buffer_end
|
||||
|
||||
read_type = ReadType::CACHED;
|
||||
return getCacheReadBuffer(range.left);
|
||||
}
|
||||
|
||||
if (wait_download_tries++ < wait_download_max_tries)
|
||||
{
|
||||
download_state = file_segment->wait();
|
||||
@ -476,6 +494,7 @@ bool CachedReadBufferFromRemoteFS::updateImplementationBufferIfNeeded()
|
||||
auto download_offset = file_segment->getDownloadOffset();
|
||||
if (download_offset == file_offset_of_buffer_end)
|
||||
{
|
||||
/// TODO: makes sense to reuse local file reader if we return here with CACHED read type again?
|
||||
implementation_buffer = getImplementationBuffer(*current_file_segment_it);
|
||||
|
||||
return true;
|
||||
|
@ -27,15 +27,19 @@ class FunctionGetMacro : public IFunction
|
||||
{
|
||||
private:
|
||||
MultiVersion<Macros>::Version macros;
|
||||
bool is_distributed;
|
||||
|
||||
public:
|
||||
static constexpr auto name = "getMacro";
|
||||
static FunctionPtr create(ContextPtr context)
|
||||
{
|
||||
return std::make_shared<FunctionGetMacro>(context->getMacros());
|
||||
return std::make_shared<FunctionGetMacro>(context->getMacros(), context->isDistributed());
|
||||
}
|
||||
|
||||
explicit FunctionGetMacro(MultiVersion<Macros>::Version macros_) : macros(std::move(macros_)) {}
|
||||
explicit FunctionGetMacro(MultiVersion<Macros>::Version macros_, bool is_distributed_)
|
||||
: macros(std::move(macros_)), is_distributed(is_distributed_)
|
||||
{
|
||||
}
|
||||
|
||||
String getName() const override
|
||||
{
|
||||
@ -48,9 +52,12 @@ public:
|
||||
|
||||
bool isDeterministicInScopeOfQuery() const override
|
||||
{
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/// getMacro may return different values on different shards/replicas, so it's not constant for distributed query
|
||||
bool isSuitableForConstantFolding() const override { return !is_distributed; }
|
||||
|
||||
size_t getNumberOfArguments() const override
|
||||
{
|
||||
return 1;
|
||||
@ -63,9 +70,6 @@ public:
|
||||
return std::make_shared<DataTypeString>();
|
||||
}
|
||||
|
||||
/** convertToFullColumn needed because in distributed query processing,
|
||||
* each server returns its own value.
|
||||
*/
|
||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override
|
||||
{
|
||||
const IColumn * arg_column = arguments[0].column.get();
|
||||
@ -74,8 +78,7 @@ public:
|
||||
if (!arg_string)
|
||||
throw Exception("The argument of function " + getName() + " must be constant String", ErrorCodes::ILLEGAL_COLUMN);
|
||||
|
||||
return result_type->createColumnConst(
|
||||
input_rows_count, macros->getValue(arg_string->getDataAt(0).toString()))->convertToFullColumnIfConst();
|
||||
return result_type->createColumnConst(input_rows_count, macros->getValue(arg_string->getDataAt(0).toString()));
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -50,24 +50,29 @@ bool BrotliReadBuffer::nextImpl()
|
||||
if (eof_flag)
|
||||
return false;
|
||||
|
||||
if (!in_available)
|
||||
do
|
||||
{
|
||||
in->nextIfAtEnd();
|
||||
in_available = in->buffer().end() - in->position();
|
||||
in_data = reinterpret_cast<uint8_t *>(in->position());
|
||||
if (!in_available)
|
||||
{
|
||||
in->nextIfAtEnd();
|
||||
in_available = in->buffer().end() - in->position();
|
||||
in_data = reinterpret_cast<uint8_t *>(in->position());
|
||||
}
|
||||
|
||||
if (brotli->result == BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT && (!in_available || in->eof()))
|
||||
{
|
||||
throw Exception("brotli decode error", ErrorCodes::BROTLI_READ_FAILED);
|
||||
}
|
||||
|
||||
out_capacity = internal_buffer.size();
|
||||
out_data = reinterpret_cast<uint8_t *>(internal_buffer.begin());
|
||||
|
||||
brotli->result = BrotliDecoderDecompressStream(brotli->state, &in_available, &in_data, &out_capacity, &out_data, nullptr);
|
||||
|
||||
in->position() = in->buffer().end() - in_available;
|
||||
}
|
||||
while (brotli->result == BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT && out_capacity == internal_buffer.size());
|
||||
|
||||
if (brotli->result == BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT && (!in_available || in->eof()))
|
||||
{
|
||||
throw Exception("brotli decode error", ErrorCodes::BROTLI_READ_FAILED);
|
||||
}
|
||||
|
||||
out_capacity = internal_buffer.size();
|
||||
out_data = reinterpret_cast<uint8_t *>(internal_buffer.begin());
|
||||
|
||||
brotli->result = BrotliDecoderDecompressStream(brotli->state, &in_available, &in_data, &out_capacity, &out_data, nullptr);
|
||||
|
||||
in->position() = in->buffer().end() - in_available;
|
||||
working_buffer.resize(internal_buffer.size() - out_capacity);
|
||||
|
||||
if (brotli->result == BROTLI_DECODER_RESULT_SUCCESS)
|
||||
|
@ -141,6 +141,18 @@ namespace
|
||||
|
||||
/// Settings. Different for each explain type.
|
||||
|
||||
struct QueryASTSettings
|
||||
{
|
||||
bool graph = false;
|
||||
|
||||
constexpr static char name[] = "AST";
|
||||
|
||||
std::unordered_map<std::string, std::reference_wrapper<bool>> boolean_settings =
|
||||
{
|
||||
{"graph", graph},
|
||||
};
|
||||
};
|
||||
|
||||
struct QueryPlanSettings
|
||||
{
|
||||
QueryPlan::ExplainPlanOptions query_plan_options;
|
||||
@ -260,10 +272,11 @@ QueryPipeline InterpreterExplainQuery::executeImpl()
|
||||
{
|
||||
case ASTExplainQuery::ParsedAST:
|
||||
{
|
||||
if (ast.getSettings())
|
||||
throw Exception("Settings are not supported for EXPLAIN AST query.", ErrorCodes::UNKNOWN_SETTING);
|
||||
|
||||
dumpAST(*ast.getExplainedQuery(), buf);
|
||||
auto settings = checkAndGetSettings<QueryASTSettings>(ast.getSettings());
|
||||
if (settings.graph)
|
||||
dumpASTInDotFormat(*ast.getExplainedQuery(), buf);
|
||||
else
|
||||
dumpAST(*ast.getExplainedQuery(), buf);
|
||||
break;
|
||||
}
|
||||
case ASTExplainQuery::AnalyzedSyntax:
|
||||
|
@ -98,6 +98,7 @@ NamesAndTypesList QueryLogElement::getNamesAndTypes()
|
||||
{"http_referer", std::make_shared<DataTypeString>()},
|
||||
{"forwarded_for", std::make_shared<DataTypeString>()},
|
||||
{"quota_key", std::make_shared<DataTypeString>()},
|
||||
{"distributed_depth", std::make_shared<DataTypeUInt64>()},
|
||||
|
||||
{"revision", std::make_shared<DataTypeUInt32>()},
|
||||
|
||||
@ -289,5 +290,6 @@ void QueryLogElement::appendClientInfo(const ClientInfo & client_info, MutableCo
|
||||
columns[i++]->insert(client_info.forwarded_for);
|
||||
|
||||
columns[i++]->insert(client_info.quota_key);
|
||||
columns[i++]->insert(client_info.distributed_depth);
|
||||
}
|
||||
}
|
||||
|
@ -68,6 +68,7 @@ NamesAndTypesList QueryThreadLogElement::getNamesAndTypes()
|
||||
{"http_referer", std::make_shared<DataTypeString>()},
|
||||
{"forwarded_for", std::make_shared<DataTypeString>()},
|
||||
{"quota_key", std::make_shared<DataTypeString>()},
|
||||
{"distributed_depth", std::make_shared<DataTypeUInt64>()},
|
||||
|
||||
{"revision", std::make_shared<DataTypeUInt32>()},
|
||||
|
||||
|
@ -86,6 +86,75 @@ inline void dumpAST(const IAST & ast, WriteBuffer & ostr, DumpASTNode * parent =
|
||||
dumpAST(*child, ostr, &dump);
|
||||
}
|
||||
|
||||
class DumpASTNodeInDotFormat
|
||||
{
|
||||
public:
|
||||
DumpASTNodeInDotFormat(const IAST & ast_, WriteBuffer * ostr_, bool root_ = true, const char * label_ = nullptr)
|
||||
: ast(ast_), ostr(ostr_), root(root_), label(label_)
|
||||
{
|
||||
if (!ostr)
|
||||
return;
|
||||
|
||||
if (root)
|
||||
(*ostr) << "digraph " << (label ? String(label) : "") << "{\n rankdir=\"UD\";\n";
|
||||
|
||||
printNode();
|
||||
}
|
||||
|
||||
~DumpASTNodeInDotFormat()
|
||||
{
|
||||
if (!ostr)
|
||||
return;
|
||||
|
||||
for (const auto & child : ast.children)
|
||||
printEdge(ast, *child);
|
||||
|
||||
if (root)
|
||||
(*ostr) << "}\n";
|
||||
}
|
||||
|
||||
private:
|
||||
const IAST & ast;
|
||||
WriteBuffer * ostr;
|
||||
bool root;
|
||||
const char * label;
|
||||
|
||||
String getASTId() const { return ast.getID(' '); }
|
||||
static String getNodeId(const IAST & a) { return "n" + std::to_string(reinterpret_cast<std::uintptr_t>(&a)); }
|
||||
|
||||
void printNode() const
|
||||
{
|
||||
(*ostr) << " " << getNodeId(ast) << "[label=\"";
|
||||
(*ostr) << getASTId();
|
||||
|
||||
String alias = ast.tryGetAlias();
|
||||
if (!alias.empty())
|
||||
(*ostr) << " ("
|
||||
<< "alias"
|
||||
<< " " << alias << ")";
|
||||
|
||||
if (!ast.children.empty())
|
||||
(*ostr) << " (children"
|
||||
<< " " << ast.children.size() << ")";
|
||||
(*ostr) << "\"];\n";
|
||||
}
|
||||
|
||||
void printEdge(const IAST & parent, const IAST & child) const
|
||||
{
|
||||
(*ostr) << " " << getNodeId(parent) << " -> " << getNodeId(child) << ";\n";
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/// Print AST in "dot" format for GraphViz
|
||||
/// You can render it with: dot -Tpng ast.dot ast.png
|
||||
inline void dumpASTInDotFormat(const IAST & ast, WriteBuffer & ostr, bool root = true)
|
||||
{
|
||||
DumpASTNodeInDotFormat dump(ast, &ostr, root);
|
||||
for (const auto & child : ast.children)
|
||||
dumpASTInDotFormat(*child, ostr, false);
|
||||
}
|
||||
|
||||
|
||||
/// String stream dumped in dtor
|
||||
template <bool _enable>
|
||||
|
@ -281,13 +281,13 @@ StorageMySQLConfiguration StorageMySQL::getConfiguration(ASTs engine_args, Conte
|
||||
configuration.table = engine_args[2]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
configuration.username = engine_args[3]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
configuration.password = engine_args[4]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
|
||||
if (engine_args.size() >= 6)
|
||||
configuration.replace_query = engine_args[5]->as<ASTLiteral &>().value.safeGet<UInt64>();
|
||||
if (engine_args.size() == 7)
|
||||
configuration.on_duplicate_clause = engine_args[6]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
}
|
||||
|
||||
for (const auto & address : configuration.addresses)
|
||||
context_->getRemoteHostFilter().checkHostAndPort(address.first, toString(address.second));
|
||||
if (configuration.replace_query && !configuration.on_duplicate_clause.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Only one of 'replace_query' and 'on_duplicate_clause' can be specified, or none of them");
|
||||
|
@ -425,7 +425,6 @@ StoragePostgreSQLConfiguration StoragePostgreSQL::getConfiguration(ASTs engine_a
|
||||
configuration.host = configuration.addresses[0].first;
|
||||
configuration.port = configuration.addresses[0].second;
|
||||
}
|
||||
|
||||
configuration.database = engine_args[1]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
configuration.table = engine_args[2]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
configuration.username = engine_args[3]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
@ -436,6 +435,8 @@ StoragePostgreSQLConfiguration StoragePostgreSQL::getConfiguration(ASTs engine_a
|
||||
if (engine_args.size() >= 7)
|
||||
configuration.on_conflict = engine_args[6]->as<ASTLiteral &>().value.safeGet<String>();
|
||||
}
|
||||
for (const auto & address : configuration.addresses)
|
||||
context->getRemoteHostFilter().checkHostAndPort(address.first, toString(address.second));
|
||||
|
||||
return configuration;
|
||||
}
|
||||
|
@ -48,6 +48,7 @@ NamesAndTypesList StorageSystemProcesses::getNamesAndTypes()
|
||||
{"forwarded_for", std::make_shared<DataTypeString>()},
|
||||
|
||||
{"quota_key", std::make_shared<DataTypeString>()},
|
||||
{"distributed_depth", std::make_shared<DataTypeUInt64>()},
|
||||
|
||||
{"elapsed", std::make_shared<DataTypeFloat64>()},
|
||||
{"is_cancelled", std::make_shared<DataTypeUInt8>()},
|
||||
@ -115,6 +116,7 @@ void StorageSystemProcesses::fillData(MutableColumns & res_columns, ContextPtr c
|
||||
res_columns[i++]->insert(process.client_info.forwarded_for);
|
||||
|
||||
res_columns[i++]->insert(process.client_info.quota_key);
|
||||
res_columns[i++]->insert(process.client_info.distributed_depth);
|
||||
|
||||
res_columns[i++]->insert(process.elapsed_seconds);
|
||||
res_columns[i++]->insert(process.is_cancelled);
|
||||
|
@ -1,5 +1,9 @@
|
||||
-- Tags: long
|
||||
|
||||
SET max_bytes_before_external_group_by = 100000000;
|
||||
SET max_memory_usage = 351000000;
|
||||
SET max_memory_usage = 410000000;
|
||||
SET group_by_two_level_threshold = 100000;
|
||||
SET group_by_two_level_threshold_bytes = 50000000;
|
||||
|
||||
SELECT sum(k), sum(c) FROM (SELECT number AS k, count() AS c FROM (SELECT * FROM system.numbers LIMIT 10000000) GROUP BY k);
|
||||
SELECT sum(k), sum(c), max(u) FROM (SELECT number AS k, count() AS c, uniqArray(range(number % 16)) AS u FROM (SELECT * FROM system.numbers LIMIT 1000000) GROUP BY k);
|
||||
|
@ -1,2 +1,3 @@
|
||||
test Hello, world!
|
||||
Hello, world!
|
||||
1
|
||||
|
@ -1,2 +1,3 @@
|
||||
SELECT * FROM system.macros WHERE macro = 'test';
|
||||
SELECT getMacro('test');
|
||||
select isConstant(getMacro('test'));
|
||||
|
@ -13,6 +13,8 @@
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
=== Code 210 with ipv6
|
||||
1
|
||||
1
|
||||
@ -23,6 +25,12 @@
|
||||
=== Values form config
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
=== Values form config 2
|
||||
1
|
||||
1
|
||||
1
|
||||
===
|
||||
1
|
||||
1
|
||||
|
@ -32,6 +32,10 @@ error="$(${CLICKHOUSE_CLIENT} --host "${CLICKHOUSE_HOST}" --port "${not_alive_po
|
||||
echo "${error}" | grep -Fc "Code: 210"
|
||||
echo "${error}" | grep -Fc "${CLICKHOUSE_HOST}:${not_alive_port}"
|
||||
|
||||
error="$(${CLICKHOUSE_CLIENT} --host "${not_alive_host}" --query "SELECT 1" 2>&1 > /dev/null)"
|
||||
echo "${error}" | grep -Fc "DB::NetException"
|
||||
echo "${error}" | grep -Fc "${not_alive_host}:9000"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --host "${CLICKHOUSE_HOST}" --port "${not_alive_port}" --host "${CLICKHOUSE_HOST}" --query "SELECT 1";
|
||||
${CLICKHOUSE_CLIENT} --host "${CLICKHOUSE_HOST}" --port "${CLICKHOUSE_PORT_TCP}" --port "${not_alive_port}" --query "SELECT 1";
|
||||
|
||||
@ -54,7 +58,7 @@ echo "${error}" | grep -Fc "${ipv6_host_with_brackets}:${not_alive_port}"
|
||||
|
||||
echo '=== Values form config'
|
||||
|
||||
CUSTOM_CONFIG="$CURDIR/02100_config.xml"
|
||||
CUSTOM_CONFIG="$CURDIR/02100_config_$(${CLICKHOUSE_LOCAL} -q 'SELECT rand()').xml"
|
||||
rm -f ${CUSTOM_CONFIG}
|
||||
|
||||
cat << EOF > ${CUSTOM_CONFIG}
|
||||
@ -67,6 +71,27 @@ EOF
|
||||
error="$(${CLICKHOUSE_CLIENT} --config ${CUSTOM_CONFIG} --query "SELECT 1" 2>&1 > /dev/null)"
|
||||
echo "${error}" | grep -Fc "DB::NetException"
|
||||
echo "${error}" | grep -Fc "${not_alive_host}:${not_alive_port}"
|
||||
|
||||
error="$(${CLICKHOUSE_CLIENT} --host localhost --config ${CUSTOM_CONFIG} --query "SELECT 1" 2>&1 > /dev/null)"
|
||||
echo "${error}" | grep -Fc "DB::NetException"
|
||||
echo "${error}" | grep -Fc "localhost:${not_alive_port}"
|
||||
|
||||
rm -f ${CUSTOM_CONFIG}
|
||||
|
||||
echo '=== Values form config 2'
|
||||
|
||||
cat << EOF > ${CUSTOM_CONFIG}
|
||||
<config>
|
||||
<host>${not_alive_host}</host>
|
||||
</config>
|
||||
EOF
|
||||
|
||||
error="$(${CLICKHOUSE_CLIENT} --config ${CUSTOM_CONFIG} --query "SELECT 1" 2>&1 > /dev/null)"
|
||||
echo "${error}" | grep -Fc "DB::NetException"
|
||||
echo "${error}" | grep -Fc "${not_alive_host}:9000"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --host "${CLICKHOUSE_HOST}" --config ${CUSTOM_CONFIG} --query "SELECT 1"
|
||||
|
||||
rm -f ${CUSTOM_CONFIG}
|
||||
|
||||
echo '==='
|
||||
|
@ -0,0 +1,10 @@
|
||||
1
|
||||
1
|
||||
=== Values form config
|
||||
1
|
||||
1
|
||||
1
|
||||
1
|
||||
=== Values form config 2
|
||||
1
|
||||
1
|
49
tests/queries/0_stateless/02100_multiple_hosts_command_line_set_ssl.sh
Executable file
49
tests/queries/0_stateless/02100_multiple_hosts_command_line_set_ssl.sh
Executable file
@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: use-ssl
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
not_alive_host="10.100.0.0"
|
||||
not_alive_port="1"
|
||||
|
||||
error="$(${CLICKHOUSE_CLIENT} --secure --host "${not_alive_host}" --query "SELECT 1" 2>&1 > /dev/null)"
|
||||
echo "${error}" | grep -Fc "DB::NetException"
|
||||
echo "${error}" | grep -Fc "${not_alive_host}:9440"
|
||||
|
||||
echo '=== Values form config'
|
||||
|
||||
CUSTOM_CONFIG="$CURDIR/02100_config_$(${CLICKHOUSE_LOCAL} -q 'SELECT rand()').xml"
|
||||
rm -f ${CUSTOM_CONFIG}
|
||||
|
||||
cat << EOF > ${CUSTOM_CONFIG}
|
||||
<config>
|
||||
<host>${not_alive_host}</host>
|
||||
<port>${not_alive_port}</port>
|
||||
</config>
|
||||
EOF
|
||||
|
||||
error="$(${CLICKHOUSE_CLIENT} --secure --config ${CUSTOM_CONFIG} --query "SELECT 1" 2>&1 > /dev/null)"
|
||||
echo "${error}" | grep -Fc "DB::NetException"
|
||||
echo "${error}" | grep -Fc "${not_alive_host}:${not_alive_port}"
|
||||
|
||||
error="$(${CLICKHOUSE_CLIENT} --secure --host localhost --config ${CUSTOM_CONFIG} --query "SELECT 1" 2>&1 > /dev/null)"
|
||||
echo "${error}" | grep -Fc "DB::NetException"
|
||||
echo "${error}" | grep -Fc "localhost:${not_alive_port}"
|
||||
|
||||
rm -f ${CUSTOM_CONFIG}
|
||||
|
||||
echo '=== Values form config 2'
|
||||
|
||||
cat << EOF > ${CUSTOM_CONFIG}
|
||||
<config>
|
||||
<host>${not_alive_host}</host>
|
||||
</config>
|
||||
EOF
|
||||
|
||||
error="$(${CLICKHOUSE_CLIENT} --secure --config ${CUSTOM_CONFIG} --query "SELECT 1" 2>&1 > /dev/null)"
|
||||
echo "${error}" | grep -Fc "DB::NetException"
|
||||
echo "${error}" | grep -Fc "${not_alive_host}:9440"
|
||||
|
||||
rm -f ${CUSTOM_CONFIG}
|
File diff suppressed because one or more lines are too long
@ -0,0 +1 @@
|
||||
OK
|
10
tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh
Executable file
10
tests/queries/0_stateless/02234_clickhouse_local_test_mode.sh
Executable file
@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
|
||||
$CLICKHOUSE_LOCAL --query="SELECT n" 2>&1 | grep -q "Code: 47. DB::Exception: Missing columns:" && echo 'OK' || echo 'FAIL' ||:
|
||||
$CLICKHOUSE_LOCAL --testmode --query="SELECT n -- { serverError 47 }"
|
||||
|
1
tests/queries/0_stateless/02235_brotli_bug.reference
Normal file
1
tests/queries/0_stateless/02235_brotli_bug.reference
Normal file
@ -0,0 +1 @@
|
||||
1000000 999999
|
14
tests/queries/0_stateless/02235_brotli_bug.sh
Executable file
14
tests/queries/0_stateless/02235_brotli_bug.sh
Executable file
@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-fasttest
|
||||
# Tag no-fasttest: depends on brotli and bzip2
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS file"
|
||||
${CLICKHOUSE_CLIENT} --query "CREATE TABLE file (x UInt64) ENGINE = File(TSV, '${CLICKHOUSE_DATABASE}/data.tsv.br')"
|
||||
${CLICKHOUSE_CLIENT} --query "TRUNCATE TABLE file"
|
||||
${CLICKHOUSE_CLIENT} --query "INSERT INTO file SELECT * FROM numbers(1000000)"
|
||||
${CLICKHOUSE_CLIENT} --max_read_buffer_size=8 --query "SELECT count(), max(x) FROM file"
|
||||
${CLICKHOUSE_CLIENT} --query "DROP TABLE file"
|
@ -75,7 +75,7 @@ xfails = {
|
||||
RQ_SRS008_AES_Functions_DifferentModes("1.0")
|
||||
)
|
||||
@XFails(xfails)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None):
|
||||
"""ClickHouse AES encryption functions regression module.
|
||||
"""
|
||||
nodes = {
|
||||
@ -84,6 +84,7 @@ def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "aes_encryption_env")) as cluster:
|
||||
|
@ -61,7 +61,7 @@ xfails = {
|
||||
RQ_SRS_010_DateTime64_ExtendedRange("1.0"),
|
||||
)
|
||||
@XFails(xfails)
|
||||
def regression(self, local, clickhouse_binary_path, stress=False):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=False):
|
||||
"""ClickHouse DateTime64 Extended Range regression module.
|
||||
"""
|
||||
nodes = {
|
||||
@ -70,6 +70,7 @@ def regression(self, local, clickhouse_binary_path, stress=False):
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "datetime64_extended_range_env")) as cluster:
|
||||
|
@ -0,0 +1,29 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: registry.gitlab.com/altinity-public/container-images/test/clickhouse-integration-test:21.12
|
||||
privileged: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/:/etc/clickhouse-server/users.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
healthcheck:
|
||||
test: clickhouse client --query='select 1'
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
31
tests/testflows/example/example_env_arm64/docker-compose.yml
Normal file
31
tests/testflows/example/example_env_arm64/docker-compose.yml
Normal file
@ -0,0 +1,31 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.6.2
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -7,22 +7,31 @@ append_path(sys.path, "..")
|
||||
|
||||
from helpers.cluster import Cluster
|
||||
from helpers.argparser import argparser
|
||||
from platform import processor as current_cpu
|
||||
|
||||
@TestFeature
|
||||
@Name("example")
|
||||
@ArgumentParser(argparser)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version, stress=None):
|
||||
"""Simple example of how you can use TestFlows to test ClickHouse.
|
||||
"""
|
||||
nodes = {
|
||||
"clickhouse": ("clickhouse1",),
|
||||
}
|
||||
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
|
||||
folder_name = os.path.basename(current_dir())
|
||||
if current_cpu() == 'aarch64':
|
||||
env = f"{folder_name}_env_arm64"
|
||||
else:
|
||||
env = f"{folder_name}_env"
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "example_env")) as cluster:
|
||||
docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster:
|
||||
self.context.cluster = cluster
|
||||
|
||||
Scenario(run=load("example.tests.example", "scenario"))
|
||||
|
@ -27,19 +27,21 @@ xflags = {
|
||||
@Requirements(
|
||||
RQ_SRS_020_ClickHouse_Extended_Precision("1.0"),
|
||||
)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None):
|
||||
"""Extended precision data type regression.
|
||||
"""
|
||||
nodes = {
|
||||
"clickhouse":
|
||||
("clickhouse1",)
|
||||
}
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "extended-precision-data-type_env")) as cluster:
|
||||
|
||||
self.context.cluster = cluster
|
||||
self.context.stress = stress
|
||||
|
||||
Feature(run=load("extended_precision_data_types.tests.feature", "feature"))
|
||||
|
||||
|
@ -7,6 +7,10 @@ def argparser(parser):
|
||||
action="store_true",
|
||||
help="run regression in local mode", default=False)
|
||||
|
||||
parser.add_argument("--clickhouse-version", type=str, dest="clickhouse_version",
|
||||
help="clickhouse server version", metavar="version",
|
||||
default=os.getenv("CLICKHOUSE_TESTS_SERVER_VERSION", None))
|
||||
|
||||
parser.add_argument("--clickhouse-binary-path",
|
||||
type=str, dest="clickhouse_binary_path",
|
||||
help="path to ClickHouse binary, default: /usr/bin/clickhouse", metavar="path",
|
||||
|
@ -1,9 +1,12 @@
|
||||
import os
|
||||
import uuid
|
||||
import time
|
||||
import inspect
|
||||
import threading
|
||||
import tempfile
|
||||
|
||||
from testflows._core.cli.arg.common import description
|
||||
|
||||
import testflows.settings as settings
|
||||
|
||||
from testflows.core import *
|
||||
@ -12,6 +15,21 @@ from testflows.connect import Shell as ShellBase
|
||||
from testflows.uexpect import ExpectTimeoutError
|
||||
from testflows._core.testtype import TestSubType
|
||||
|
||||
MESSAGES_TO_RETRY = [
|
||||
"DB::Exception: ZooKeeper session has been expired",
|
||||
"DB::Exception: Connection loss",
|
||||
"Coordination::Exception: Session expired",
|
||||
"Coordination::Exception: Connection loss",
|
||||
"Coordination::Exception: Operation timeout",
|
||||
"DB::Exception: Operation timeout",
|
||||
"Operation timed out",
|
||||
"ConnectionPoolWithFailover: Connection failed at try",
|
||||
"DB::Exception: New table appeared in database being dropped or detached. Try again",
|
||||
"is already started to be removing by another replica right now",
|
||||
"Shutdown is called for table", # happens in SYSTEM SYNC REPLICA query if session with ZooKeeper is being reinitialized.
|
||||
"is executing longer than distributed_ddl_task_timeout" # distributed TTL timeout message
|
||||
]
|
||||
|
||||
class Shell(ShellBase):
|
||||
def __exit__(self, type, value, traceback):
|
||||
# send exit and Ctrl-D repeatedly
|
||||
@ -56,33 +74,36 @@ class Node(object):
|
||||
shell = self.cluster._bash.pop(key)
|
||||
shell.__exit__(None, None, None)
|
||||
|
||||
def restart(self, timeout=300, retries=5, safe=True):
|
||||
def wait_healthy(self, timeout=300):
|
||||
with By(f"waiting until container {self.name} is healthy"):
|
||||
for attempt in retries(timeout=timeout, delay=1):
|
||||
with attempt:
|
||||
if self.command("echo 1", no_checks=1, steps=False).exitcode != 0:
|
||||
fail("container is not healthy")
|
||||
|
||||
def restart(self, timeout=300, retry_count=5, safe=True):
|
||||
"""Restart node.
|
||||
"""
|
||||
self.close_bashes()
|
||||
retry(self.cluster.command, retry_count)(
|
||||
None, f'{self.cluster.docker_compose} restart {self.name}',
|
||||
timeout=timeout, exitcode=0, steps=False)
|
||||
|
||||
for retry in range(retries):
|
||||
r = self.cluster.command(None, f'{self.cluster.docker_compose} restart {self.name}', timeout=timeout)
|
||||
if r.exitcode == 0:
|
||||
break
|
||||
|
||||
def start(self, timeout=300, retries=5):
|
||||
def start(self, timeout=300, retry_count=5):
|
||||
"""Start node.
|
||||
"""
|
||||
for retry in range(retries):
|
||||
r = self.cluster.command(None, f'{self.cluster.docker_compose} start {self.name}', timeout=timeout)
|
||||
if r.exitcode == 0:
|
||||
break
|
||||
retry(self.cluster.command, retry_count)(
|
||||
None, f'{self.cluster.docker_compose} start {self.name}',
|
||||
timeout=timeout, exitcode=0, steps=False)
|
||||
|
||||
def stop(self, timeout=300, retries=5, safe=True):
|
||||
def stop(self, timeout=300, retry_count=5, safe=True):
|
||||
"""Stop node.
|
||||
"""
|
||||
self.close_bashes()
|
||||
|
||||
for retry in range(retries):
|
||||
r = self.cluster.command(None, f'{self.cluster.docker_compose} stop {self.name}', timeout=timeout)
|
||||
if r.exitcode == 0:
|
||||
break
|
||||
retry(self.cluster.command, retry_count)(
|
||||
None, f'{self.cluster.docker_compose} stop {self.name}',
|
||||
timeout=timeout, exitcode=0, steps=False)
|
||||
|
||||
def command(self, *args, **kwargs):
|
||||
return self.cluster.command(self.name, *args, **kwargs)
|
||||
@ -96,7 +117,7 @@ class Node(object):
|
||||
"""
|
||||
|
||||
command = f"{cmd}"
|
||||
with Step("executing command", description=command, format_description=False) if steps else NullStep():
|
||||
with step("executing command", description=command, format_description=False) if steps else NullStep():
|
||||
try:
|
||||
r = self.cluster.bash(self.name, command=shell_command)(command, *args, **kwargs)
|
||||
except ExpectTimeoutError:
|
||||
@ -114,124 +135,328 @@ class Node(object):
|
||||
with Then(f"output should contain message", description=message) if steps else NullStep():
|
||||
assert message in r.output, error(r.output)
|
||||
|
||||
if message is None or "Exception:" not in message:
|
||||
with Then("check if output has exception") if steps else NullStep():
|
||||
if "Exception:" in r.output:
|
||||
if raise_on_exception:
|
||||
raise QueryRuntimeException(r.output)
|
||||
assert False, error(r.output)
|
||||
|
||||
return r
|
||||
|
||||
|
||||
|
||||
class ClickHouseNode(Node):
|
||||
"""Node with ClickHouse server.
|
||||
"""
|
||||
def wait_healthy(self, timeout=300):
|
||||
with By(f"waiting until container {self.name} is healthy"):
|
||||
start_time = time.time()
|
||||
while True:
|
||||
if self.query("select 1", no_checks=1, timeout=300, steps=False).exitcode == 0:
|
||||
break
|
||||
if time.time() - start_time < timeout:
|
||||
time.sleep(2)
|
||||
continue
|
||||
assert False, "container is not healthy"
|
||||
def thread_fuzzer(self):
|
||||
with Given("exporting THREAD_FUZZER"):
|
||||
self.command("export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000")
|
||||
self.command("export THREAD_FUZZER_SLEEP_PROBABILITY=0.1")
|
||||
self.command("export THREAD_FUZZER_SLEEP_TIME_US=100000")
|
||||
|
||||
def stop(self, timeout=300, safe=True, retries=5):
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_MIGRATE_PROBABILITY=1")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_unlock_AFTER_MIGRATE_PROBABILITY=1")
|
||||
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000")
|
||||
self.command("export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000")
|
||||
|
||||
def wait_clickhouse_healthy(self, timeout=300):
|
||||
with By(f"waiting until ClickHouse server on {self.name} is healthy"):
|
||||
for attempt in retries(timeout=timeout, delay=1):
|
||||
with attempt:
|
||||
if self.query("SELECT version()", no_checks=1, steps=False).exitcode != 0:
|
||||
fail("ClickHouse server is not healthy")
|
||||
node_version = self.query("SELECT version()", no_checks=1, steps=False).output
|
||||
if current().context.clickhouse_version is None:
|
||||
current().context.clickhouse_version = node_version
|
||||
else:
|
||||
assert current().context.clickhouse_version == node_version, error()
|
||||
|
||||
def clickhouse_pid(self):
|
||||
"""Return ClickHouse server pid if present
|
||||
otherwise return None.
|
||||
"""
|
||||
if self.command("ls /tmp/clickhouse-server.pid").exitcode == 0:
|
||||
return self.command("cat /tmp/clickhouse-server.pid").output.strip()
|
||||
return None
|
||||
|
||||
def stop_clickhouse(self, timeout=300, safe=True):
|
||||
"""Stop ClickHouse server.
|
||||
"""
|
||||
if safe:
|
||||
self.query("SYSTEM STOP MOVES")
|
||||
self.query("SYSTEM STOP MERGES")
|
||||
self.query("SYSTEM FLUSH LOGS")
|
||||
with By("waiting for 5 sec for moves and merges to stop"):
|
||||
time.sleep(5)
|
||||
with And("forcing to sync everything to disk"):
|
||||
self.command("sync", timeout=300, exitcode=0)
|
||||
|
||||
with By(f"sending kill -TERM to ClickHouse server process on {self.name}"):
|
||||
pid = self.clickhouse_pid()
|
||||
self.command(f"kill -TERM {pid}", exitcode=0, steps=False)
|
||||
|
||||
with And("checking pid does not exist"):
|
||||
for i, attempt in enumerate(retries(timeout=100, delay=3)):
|
||||
with attempt:
|
||||
if i > 0 and i % 20 == 0:
|
||||
self.command(f"kill -KILL {pid}", steps=False)
|
||||
if self.command(f"ps {pid}", steps=False, no_checks=True).exitcode != 1:
|
||||
fail("pid still alive")
|
||||
|
||||
with And("deleting ClickHouse server pid file"):
|
||||
self.command("rm -rf /tmp/clickhouse-server.pid", exitcode=0, steps=False)
|
||||
|
||||
def start_clickhouse(self, timeout=300, wait_healthy=True, retry_count=5, user=None, thread_fuzzer=False):
|
||||
"""Start ClickHouse server.
|
||||
"""
|
||||
pid = self.clickhouse_pid()
|
||||
if pid:
|
||||
raise RuntimeError(f"ClickHouse server already running with pid {pid}")
|
||||
|
||||
if thread_fuzzer:
|
||||
self.thread_fuzzer()
|
||||
|
||||
if user is None:
|
||||
with By("starting ClickHouse server process"):
|
||||
self.command(
|
||||
"clickhouse server --config-file=/etc/clickhouse-server/config.xml"
|
||||
" --log-file=/var/log/clickhouse-server/clickhouse-server.log"
|
||||
" --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
" --pidfile=/tmp/clickhouse-server.pid --daemon",
|
||||
exitcode=0, steps=False)
|
||||
else:
|
||||
with By(f"starting ClickHouse server process from {user}"):
|
||||
self.command(f'su {user} -c'
|
||||
'"clickhouse server --config-file=/etc/clickhouse-server/config.xml'
|
||||
' --log-file=/var/log/clickhouse-server/clickhouse-server.log'
|
||||
' --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log'
|
||||
' --pidfile=/tmp/clickhouse-server.pid --daemon"',
|
||||
exitcode=0, steps=False)
|
||||
|
||||
with And("checking that ClickHouse server pid file was created"):
|
||||
for attempt in retries(timeout=timeout, delay=1):
|
||||
with attempt:
|
||||
if self.command("ls /tmp/clickhouse-server.pid", steps=False, no_checks=True).exitcode != 0:
|
||||
fail("no pid file yet")
|
||||
|
||||
if wait_healthy:
|
||||
self.wait_clickhouse_healthy(timeout=timeout)
|
||||
|
||||
def restart_clickhouse(self, timeout=300, safe=True, wait_healthy=True, retry_count=5, user=None):
|
||||
"""Restart ClickHouse server.
|
||||
"""
|
||||
if self.clickhouse_pid():
|
||||
self.stop_clickhouse(timeout=timeout, safe=safe)
|
||||
|
||||
self.start_clickhouse(timeout=timeout, wait_healthy=wait_healthy, user=user)
|
||||
|
||||
def stop(self, timeout=300, safe=True, retry_count=5):
|
||||
"""Stop node.
|
||||
"""
|
||||
if safe:
|
||||
self.query("SYSTEM STOP MOVES")
|
||||
self.query("SYSTEM STOP MERGES")
|
||||
self.query("SYSTEM FLUSH LOGS")
|
||||
with By("waiting for 5 sec for moves and merges to stop"):
|
||||
time.sleep(5)
|
||||
with And("forcing to sync everything to disk"):
|
||||
self.command("sync", timeout=300)
|
||||
if self.clickhouse_pid():
|
||||
self.stop_clickhouse(timeout=timeout, safe=safe)
|
||||
|
||||
self.close_bashes()
|
||||
return super(ClickHouseNode, self).stop(timeout=timeout, retry_count=retry_count)
|
||||
|
||||
for retry in range(retries):
|
||||
r = self.cluster.command(None, f'{self.cluster.docker_compose} stop {self.name}', timeout=timeout)
|
||||
if r.exitcode == 0:
|
||||
break
|
||||
|
||||
def start(self, timeout=300, wait_healthy=True, retries=5):
|
||||
def start(self, timeout=300, start_clickhouse=True, wait_healthy=True, retry_count=5, user=None):
|
||||
"""Start node.
|
||||
"""
|
||||
for retry in range(retries):
|
||||
r = self.cluster.command(None, f'{self.cluster.docker_compose} start {self.name}', timeout=timeout)
|
||||
if r.exitcode == 0:
|
||||
break
|
||||
super(ClickHouseNode, self).start(timeout=timeout, retry_count=retry_count)
|
||||
|
||||
if wait_healthy:
|
||||
self.wait_healthy(timeout)
|
||||
if start_clickhouse:
|
||||
self.start_clickhouse(timeout=timeout, wait_healthy=wait_healthy, user=user,)
|
||||
|
||||
def restart(self, timeout=300, safe=True, wait_healthy=True, retries=5):
|
||||
def restart(self, timeout=300, safe=True, start_clickhouse=True,
|
||||
wait_healthy=True, retry_count=5, user=None):
|
||||
"""Restart node.
|
||||
"""
|
||||
if safe:
|
||||
self.query("SYSTEM STOP MOVES")
|
||||
self.query("SYSTEM STOP MERGES")
|
||||
self.query("SYSTEM FLUSH LOGS")
|
||||
with By("waiting for 5 sec for moves and merges to stop"):
|
||||
time.sleep(5)
|
||||
with And("forcing to sync everything to disk"):
|
||||
self.command("sync", timeout=300)
|
||||
if self.clickhouse_pid():
|
||||
self.stop_clickhouse(timeout=timeout, safe=safe)
|
||||
|
||||
self.close_bashes()
|
||||
super(ClickHouseNode, self).restart(timeout=timeout, retry_count=retry_count)
|
||||
|
||||
for retry in range(retries):
|
||||
r = self.cluster.command(None, f'{self.cluster.docker_compose} restart {self.name}', timeout=timeout)
|
||||
if r.exitcode == 0:
|
||||
break
|
||||
if start_clickhouse:
|
||||
self.start_clickhouse(timeout=timeout, wait_healthy=wait_healthy, user=user)
|
||||
|
||||
if wait_healthy:
|
||||
self.wait_healthy(timeout)
|
||||
def hash_query(self, sql, hash_utility="sha1sum", steps=True, step=By,
|
||||
settings=None, secure=False, *args, **kwargs):
|
||||
"""Execute sql query inside the container and return the hash of the output.
|
||||
|
||||
def query(self, sql, message=None, exitcode=None, steps=True, no_checks=False,
|
||||
raise_on_exception=False, step=By, settings=None, *args, **kwargs):
|
||||
"""Execute and check query.
|
||||
:param sql: sql query
|
||||
:param message: expected message that should be in the output, default: None
|
||||
:param exitcode: expected exitcode, default: None
|
||||
:param hash_utility: hash function which used to compute hash
|
||||
"""
|
||||
settings = list(settings or [])
|
||||
query_settings = list(settings)
|
||||
|
||||
if hasattr(current().context, "default_query_settings"):
|
||||
settings += current().context.default_query_settings
|
||||
query_settings += current().context.default_query_settings
|
||||
|
||||
client = "clickhouse client -n"
|
||||
if secure:
|
||||
client += " -s"
|
||||
|
||||
if len(sql) > 1024:
|
||||
with tempfile.NamedTemporaryFile("w", encoding="utf-8") as query:
|
||||
query.write(sql)
|
||||
query.flush()
|
||||
command = f"cat \"{query.name}\" | {self.cluster.docker_compose} exec -T {self.name} clickhouse client -n"
|
||||
for setting in settings:
|
||||
command = f"set -o pipefail && cat \"{query.name}\" | {self.cluster.docker_compose} exec -T {self.name} {client} | {hash_utility}"
|
||||
for setting in query_settings:
|
||||
name, value = setting
|
||||
command += f" --{name} \"{value}\""
|
||||
description = f"""
|
||||
echo -e \"{sql[:100]}...\" > {query.name}
|
||||
{command}
|
||||
"""
|
||||
with step("executing command", description=description,
|
||||
format_description=False) if steps else NullStep():
|
||||
try:
|
||||
r = self.cluster.bash(None)(command, *args, **kwargs)
|
||||
except ExpectTimeoutError:
|
||||
self.cluster.close_bash(None)
|
||||
else:
|
||||
command = f"set -o pipefail && echo -e \"{sql}\" | {client} | {hash_utility}"
|
||||
for setting in query_settings:
|
||||
name, value = setting
|
||||
command += f" --{name} \"{value}\""
|
||||
with step("executing command", description=command,
|
||||
format_description=False) if steps else NullStep():
|
||||
try:
|
||||
r = self.cluster.bash(self.name)(command, *args, **kwargs)
|
||||
except ExpectTimeoutError:
|
||||
self.cluster.close_bash(self.name)
|
||||
|
||||
with Then(f"exitcode should be 0") if steps else NullStep():
|
||||
assert r.exitcode == 0, error(r.output)
|
||||
|
||||
return r.output
|
||||
|
||||
def diff_query(self, sql, expected_output, steps=True, step=By,
|
||||
settings=None, secure=False, *args, **kwargs):
|
||||
"""Execute inside the container but from the host and compare its output
|
||||
to file that is located on the host.
|
||||
|
||||
For example:
|
||||
diff <(echo "SELECT * FROM myints FORMAT CSVWithNames" | clickhouse-client -mn) select.out
|
||||
|
||||
:param sql: sql query
|
||||
:param expected_output: path to the expected output
|
||||
"""
|
||||
settings = list(settings or [])
|
||||
query_settings = list(settings)
|
||||
|
||||
if hasattr(current().context, "default_query_settings"):
|
||||
query_settings += current().context.default_query_settings
|
||||
|
||||
client = "clickhouse client -n"
|
||||
if secure:
|
||||
client += " -s"
|
||||
|
||||
if len(sql) > 1024:
|
||||
with tempfile.NamedTemporaryFile("w", encoding="utf-8") as query:
|
||||
query.write(sql)
|
||||
query.flush()
|
||||
command = f"diff <(cat \"{query.name}\" | {self.cluster.docker_compose} exec -T {self.name} {client}) {expected_output}"
|
||||
for setting in query_settings:
|
||||
name, value = setting
|
||||
command += f" --{name} \"{value}\""
|
||||
description = f"""
|
||||
echo -e \"{sql[:100]}...\" > {query.name}
|
||||
{command}
|
||||
"""
|
||||
with Step("executing command", description=description, format_description=False) if steps else NullStep():
|
||||
with step("executing command", description=description, format_description=False) if steps else NullStep():
|
||||
try:
|
||||
r = self.cluster.bash(None)(command, *args, **kwargs)
|
||||
except ExpectTimeoutError:
|
||||
self.cluster.close_bash(None)
|
||||
else:
|
||||
command = f"echo -e \"{sql}\" | clickhouse client -n"
|
||||
for setting in settings:
|
||||
command = f"diff <(echo -e \"{sql}\" | {self.cluster.docker_compose} exec -T {self.name} {client}) {expected_output}"
|
||||
for setting in query_settings:
|
||||
name, value = setting
|
||||
command += f" --{name} \"{value}\""
|
||||
with Step("executing command", description=command, format_description=False) if steps else NullStep():
|
||||
with step("executing command", description=command,
|
||||
format_description=False) if steps else NullStep():
|
||||
try:
|
||||
r = self.cluster.bash(None)(command, *args, **kwargs)
|
||||
except ExpectTimeoutError:
|
||||
self.cluster.close_bash(None)
|
||||
|
||||
with Then(f"exitcode should be 0") if steps else NullStep():
|
||||
assert r.exitcode == 0, error(r.output)
|
||||
|
||||
def query(self, sql, message=None, exitcode=None, steps=True, no_checks=False,
|
||||
raise_on_exception=False, step=By, settings=None,
|
||||
retry_count=5, messages_to_retry=None, retry_delay=5, secure=False,
|
||||
*args, **kwargs):
|
||||
"""Execute and check query.
|
||||
:param sql: sql query
|
||||
:param message: expected message that should be in the output, default: None
|
||||
:param exitcode: expected exitcode, default: None
|
||||
:param steps: wrap query execution in a step, default: True
|
||||
:param no_check: disable exitcode and message checks, default: False
|
||||
:param step: wrapping step class, default: By
|
||||
:param settings: list of settings to be used for the query in the form [(name, value),...], default: None
|
||||
:param retry_count: number of retries, default: 5
|
||||
:param messages_to_retry: list of messages in the query output for
|
||||
which retry should be triggered, default: MESSAGES_TO_RETRY
|
||||
:param retry_delay: number of seconds to sleep before retry, default: 5
|
||||
:param secure: use secure connection, default: False
|
||||
"""
|
||||
retry_count = max(0, int(retry_count))
|
||||
retry_delay = max(0, float(retry_delay))
|
||||
settings = list(settings or [])
|
||||
query_settings = list(settings)
|
||||
|
||||
if messages_to_retry is None:
|
||||
messages_to_retry = MESSAGES_TO_RETRY
|
||||
|
||||
if hasattr(current().context, "default_query_settings"):
|
||||
query_settings += current().context.default_query_settings
|
||||
|
||||
client = "clickhouse client -n"
|
||||
if secure:
|
||||
client += " -s"
|
||||
|
||||
if len(sql) > 1024:
|
||||
with tempfile.NamedTemporaryFile("w", encoding="utf-8") as query:
|
||||
query.write(sql)
|
||||
query.flush()
|
||||
command = f"cat \"{query.name}\" | {self.cluster.docker_compose} exec -T {self.name} {client}"
|
||||
for setting in query_settings:
|
||||
name, value = setting
|
||||
command += f" --{name} \"{value}\""
|
||||
description = f"""
|
||||
echo -e \"{sql[:100]}...\" > {query.name}
|
||||
{command}
|
||||
"""
|
||||
with step("executing command", description=description, format_description=False) if steps else NullStep():
|
||||
try:
|
||||
r = self.cluster.bash(None)(command, *args, **kwargs)
|
||||
except ExpectTimeoutError:
|
||||
self.cluster.close_bash(None)
|
||||
raise
|
||||
else:
|
||||
command = f"echo -e \"{sql}\" | {client}"
|
||||
for setting in query_settings:
|
||||
name, value = setting
|
||||
command += f" --{name} \"{value}\""
|
||||
with step("executing command", description=command, format_description=False) if steps else NullStep():
|
||||
try:
|
||||
r = self.cluster.bash(self.name)(command, *args, **kwargs)
|
||||
except ExpectTimeoutError:
|
||||
self.cluster.close_bash(self.name)
|
||||
raise
|
||||
|
||||
if retry_count and retry_count > 0:
|
||||
if any(msg in r.output for msg in messages_to_retry):
|
||||
time.sleep(retry_delay)
|
||||
return self.query(sql=sql, message=message, exitcode=exitcode,
|
||||
steps=steps, no_checks=no_checks,
|
||||
raise_on_exception=raise_on_exception, step=step, settings=settings,
|
||||
retry_count=retry_count-1, messages_to_retry=messages_to_retry,
|
||||
*args, **kwargs)
|
||||
|
||||
if no_checks:
|
||||
return r
|
||||
|
||||
@ -252,19 +477,24 @@ class ClickHouseNode(Node):
|
||||
|
||||
return r
|
||||
|
||||
|
||||
class Cluster(object):
|
||||
"""Simple object around docker-compose cluster.
|
||||
"""
|
||||
def __init__(self, local=False,
|
||||
clickhouse_binary_path=None, configs_dir=None,
|
||||
clickhouse_binary_path=None,
|
||||
clickhouse_odbc_bridge_binary_path=None,
|
||||
configs_dir=None,
|
||||
nodes=None,
|
||||
docker_compose="docker-compose", docker_compose_project_dir=None,
|
||||
docker_compose_file="docker-compose.yml"):
|
||||
docker_compose_file="docker-compose.yml",
|
||||
environ=None):
|
||||
|
||||
self._bash = {}
|
||||
self._control_shell = None
|
||||
self.environ = {}
|
||||
self.environ = {} if (environ is None) else environ
|
||||
self.clickhouse_binary_path = clickhouse_binary_path
|
||||
self.clickhouse_odbc_bridge_binary_path = clickhouse_odbc_bridge_binary_path
|
||||
self.configs_dir = configs_dir
|
||||
self.local = local
|
||||
self.nodes = nodes or {}
|
||||
@ -282,20 +512,54 @@ class Cluster(object):
|
||||
if not os.path.exists(self.configs_dir):
|
||||
raise TypeError(f"configs directory '{self.configs_dir}' does not exist")
|
||||
|
||||
# auto set docker-compose project directory
|
||||
if docker_compose_project_dir is None:
|
||||
caller_project_dir = os.path.join(caller_dir, "docker-compose")
|
||||
if os.path.exists(caller_project_dir):
|
||||
docker_compose_project_dir = caller_project_dir
|
||||
raise TypeError("docker compose directory must be specified.")
|
||||
|
||||
docker_compose_file_path = os.path.join(docker_compose_project_dir or "", docker_compose_file)
|
||||
|
||||
if not os.path.exists(docker_compose_file_path):
|
||||
raise TypeError("docker compose file '{docker_compose_file_path}' does not exist")
|
||||
raise TypeError(f"docker compose file '{docker_compose_file_path}' does not exist")
|
||||
|
||||
if self.clickhouse_binary_path and self.clickhouse_binary_path.startswith("docker://"):
|
||||
if current().context.clickhouse_version is None:
|
||||
try:
|
||||
current().context.clickhouse_version = self.clickhouse_binary_path.split(":")[2]
|
||||
debug(f"auto setting clickhouse version to {current().context.clickhouse_version}")
|
||||
except IndexError:
|
||||
current().context.clickhouse_version = None
|
||||
self.clickhouse_binary_path, self.clickhouse_odbc_bridge_binary_path = self.get_clickhouse_binary_from_docker_container(
|
||||
self.clickhouse_binary_path)
|
||||
|
||||
self.docker_compose += f" --ansi never --project-directory \"{docker_compose_project_dir}\" --file \"{docker_compose_file_path}\""
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def get_clickhouse_binary_from_docker_container(self, docker_image,
|
||||
container_clickhouse_binary_path="/usr/bin/clickhouse",
|
||||
container_clickhouse_odbc_bridge_binary_path="/usr/bin/clickhouse-odbc-bridge",
|
||||
host_clickhouse_binary_path=None,
|
||||
host_clickhouse_odbc_bridge_binary_path=None):
|
||||
"""Get clickhouse-server and clickhouse-odbc-bridge binaries
|
||||
from some Docker container.
|
||||
"""
|
||||
docker_image = docker_image.split("docker://", 1)[-1]
|
||||
docker_container_name = str(uuid.uuid1())
|
||||
|
||||
if host_clickhouse_binary_path is None:
|
||||
host_clickhouse_binary_path = os.path.join(tempfile.gettempdir(), f"{docker_image.rsplit('/',1)[-1].replace(':','_')}")
|
||||
|
||||
if host_clickhouse_odbc_bridge_binary_path is None:
|
||||
host_clickhouse_odbc_bridge_binary_path = host_clickhouse_binary_path + "_odbc_bridge"
|
||||
|
||||
with Given("I get ClickHouse server binary from docker container", description=f"{docker_image}"):
|
||||
with Shell() as bash:
|
||||
bash.timeout = 300
|
||||
bash(f"docker run -d --name \"{docker_container_name}\" {docker_image} | tee")
|
||||
bash(f"docker cp \"{docker_container_name}:{container_clickhouse_binary_path}\" \"{host_clickhouse_binary_path}\"")
|
||||
bash(f"docker cp \"{docker_container_name}:{container_clickhouse_odbc_bridge_binary_path}\" \"{host_clickhouse_odbc_bridge_binary_path}\"")
|
||||
bash(f"docker stop \"{docker_container_name}\"")
|
||||
|
||||
return host_clickhouse_binary_path, host_clickhouse_odbc_bridge_binary_path
|
||||
|
||||
@property
|
||||
def control_shell(self, timeout=300):
|
||||
"""Must be called with self.lock.acquired.
|
||||
@ -310,25 +574,42 @@ class Cluster(object):
|
||||
shell.timeout = 30
|
||||
shell("echo 1")
|
||||
break
|
||||
except:
|
||||
except IOError:
|
||||
raise
|
||||
except Exception as exc:
|
||||
shell.__exit__(None, None, None)
|
||||
if time.time() - time_start > timeout:
|
||||
raise RuntimeError(f"failed to open control shell")
|
||||
self._control_shell = shell
|
||||
return self._control_shell
|
||||
|
||||
def close_control_shell(self):
|
||||
"""Must be called with self.lock.acquired.
|
||||
"""
|
||||
if self._control_shell is None:
|
||||
return
|
||||
shell = self._control_shell
|
||||
self._control_shell = None
|
||||
shell.__exit__(None, None, None)
|
||||
|
||||
def node_container_id(self, node, timeout=300):
|
||||
"""Must be called with self.lock acquired.
|
||||
"""
|
||||
container_id = None
|
||||
time_start = time.time()
|
||||
while True:
|
||||
c = self.control_shell(f"{self.docker_compose} ps -q {node}")
|
||||
container_id = c.output.strip()
|
||||
if c.exitcode == 0 and len(container_id) > 1:
|
||||
break
|
||||
if time.time() - time_start > timeout:
|
||||
raise RuntimeError(f"failed to get docker container id for the {node} service")
|
||||
try:
|
||||
c = self.control_shell(f"{self.docker_compose} ps -q {node}", timeout=timeout)
|
||||
container_id = c.output.strip()
|
||||
if c.exitcode == 0 and len(container_id) > 1:
|
||||
break
|
||||
except IOError:
|
||||
raise
|
||||
except ExpectTimeoutError:
|
||||
self.close_control_shell()
|
||||
timeout = timeout - (time.time() - time_start)
|
||||
if timeout <= 0:
|
||||
raise RuntimeError(f"failed to get docker container id for the {node} service")
|
||||
return container_id
|
||||
|
||||
def shell(self, node, timeout=300):
|
||||
@ -352,7 +633,9 @@ class Cluster(object):
|
||||
shell.timeout = 30
|
||||
shell("echo 1")
|
||||
break
|
||||
except:
|
||||
except IOError:
|
||||
raise
|
||||
except Exception as exc:
|
||||
shell.__exit__(None, None, None)
|
||||
if time.time() - time_start > timeout:
|
||||
raise RuntimeError(f"failed to open bash to node {node}")
|
||||
@ -387,7 +670,9 @@ class Cluster(object):
|
||||
self._bash[id].timeout = 30
|
||||
self._bash[id]("echo 1")
|
||||
break
|
||||
except:
|
||||
except IOError:
|
||||
raise
|
||||
except Exception as exc:
|
||||
self._bash[id].__exit__(None, None, None)
|
||||
if time.time() - time_start > timeout:
|
||||
raise RuntimeError(f"failed to open bash to node {node}")
|
||||
@ -459,13 +744,26 @@ class Cluster(object):
|
||||
else:
|
||||
self._bash[id] = shell
|
||||
finally:
|
||||
cmd = self.command(None, f"{self.docker_compose} down --timeout 60", bash=bash, timeout=timeout)
|
||||
cmd = self.command(None, f"{self.docker_compose} down -v --remove-orphans --timeout 60", bash=bash, timeout=timeout)
|
||||
with self.lock:
|
||||
if self._control_shell:
|
||||
self._control_shell.__exit__(None, None, None)
|
||||
self._control_shell = None
|
||||
return cmd
|
||||
|
||||
def temp_path(self):
|
||||
"""Return temporary folder path.
|
||||
"""
|
||||
p = f"{self.environ['CLICKHOUSE_TESTS_DIR']}/_temp"
|
||||
if not os.path.exists(p):
|
||||
os.mkdir(p)
|
||||
return p
|
||||
|
||||
def temp_file(self, name):
|
||||
"""Return absolute temporary file path.
|
||||
"""
|
||||
return f"{os.path.join(self.temp_path(), name)}"
|
||||
|
||||
def up(self, timeout=30*60):
|
||||
if self.local:
|
||||
with Given("I am running in local mode"):
|
||||
@ -477,7 +775,7 @@ class Cluster(object):
|
||||
with And("I set all the necessary environment variables"):
|
||||
self.environ["COMPOSE_HTTP_TIMEOUT"] = "300"
|
||||
self.environ["CLICKHOUSE_TESTS_SERVER_BIN_PATH"] = self.clickhouse_binary_path
|
||||
self.environ["CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH"] = os.path.join(
|
||||
self.environ["CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH"] = self.clickhouse_odbc_bridge_binary_path or os.path.join(
|
||||
os.path.dirname(self.clickhouse_binary_path), "clickhouse-odbc-bridge")
|
||||
self.environ["CLICKHOUSE_TESTS_DIR"] = self.configs_dir
|
||||
|
||||
@ -525,11 +823,14 @@ class Cluster(object):
|
||||
if cmd.exitcode != 0 or "is unhealthy" in cmd.output or "Exit" in ps_cmd.output:
|
||||
fail("could not bring up docker-compose cluster")
|
||||
|
||||
with Then("wait all nodes report healhy"):
|
||||
with Then("wait all nodes report healthy"):
|
||||
for name in self.nodes["clickhouse"]:
|
||||
self.node(name).wait_healthy()
|
||||
if name.startswith("clickhouse"):
|
||||
self.node(name).start_clickhouse()
|
||||
|
||||
def command(self, node, command, message=None, exitcode=None, steps=True, bash=None, *args, **kwargs):
|
||||
def command(self, node, command, message=None, exitcode=None, steps=True,
|
||||
bash=None, no_checks=False, use_error=True, *args, **kwargs):
|
||||
"""Execute and check command.
|
||||
:param node: name of the service
|
||||
:param command: command
|
||||
@ -545,10 +846,16 @@ class Cluster(object):
|
||||
except ExpectTimeoutError:
|
||||
self.close_bash(node)
|
||||
raise
|
||||
|
||||
if no_checks:
|
||||
return r
|
||||
|
||||
if exitcode is not None:
|
||||
with Then(f"exitcode should be {exitcode}", format_name=False) if steps else NullStep():
|
||||
assert r.exitcode == exitcode, error(r.output)
|
||||
|
||||
if message is not None:
|
||||
with Then(f"output should contain message", description=message, format_description=False) if steps else NullStep():
|
||||
assert message in r.output, error(r.output)
|
||||
|
||||
return r
|
||||
|
@ -1,23 +1,76 @@
|
||||
import os
|
||||
import uuid
|
||||
import time
|
||||
import xml.etree.ElementTree as xmltree
|
||||
import packaging.version as pkg_version
|
||||
from collections import namedtuple
|
||||
|
||||
import testflows.settings as settings
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
from testflows.core.name import basename, parentname
|
||||
from testflows._core.testtype import TestSubType
|
||||
|
||||
def check_clickhouse_version(version):
|
||||
"""Compare ClickHouse version.
|
||||
"""
|
||||
def check(test):
|
||||
if getattr(test.context, "clickhouse_version", None) is None:
|
||||
return False
|
||||
|
||||
clickhouse_version = pkg_version.parse(str(test.context.clickhouse_version))
|
||||
|
||||
if version.startswith("=="):
|
||||
return clickhouse_version == pkg_version.parse(str(version.split("==",1)[-1]))
|
||||
elif version.startswith(">="):
|
||||
return clickhouse_version >= pkg_version.parse(str(version.split(">=",1)[-1]))
|
||||
elif version.startswith("<="):
|
||||
return clickhouse_version <= pkg_version.parse(str(version.split("<=",1)[-1]))
|
||||
elif version.startswith("="):
|
||||
return clickhouse_version == pkg_version.parse(str(version.split("=",1)[-1]))
|
||||
elif version.startswith(">"):
|
||||
return clickhouse_version > pkg_version.parse(str(version.split(">",1)[-1]))
|
||||
elif version.startswith("<"):
|
||||
return clickhouse_version < pkg_version.parse(str(version.split("<",1)[-1]))
|
||||
else:
|
||||
return clickhouse_version == pkg_version.parse(str(version))
|
||||
|
||||
return check
|
||||
|
||||
def getuid(with_test_name=False):
|
||||
if not with_test_name:
|
||||
return str(uuid.uuid1()).replace('-', '_')
|
||||
|
||||
if current().subtype == TestSubType.Example:
|
||||
testname = f"{basename(parentname(current().name)).replace(' ', '_').replace(',', '')}"
|
||||
else:
|
||||
testname = f"{basename(current().name).replace(' ', '_').replace(',', '')}"
|
||||
|
||||
return testname + "_" + str(uuid.uuid1()).replace('-', '_')
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def instrument_clickhouse_server_log(self, node=None, test=None,
|
||||
clickhouse_server_log="/var/log/clickhouse-server/clickhouse-server.log"):
|
||||
clickhouse_server_log="/var/log/clickhouse-server/clickhouse-server.log", always_dump=False):
|
||||
"""Instrument clickhouse-server.log for the current test (default)
|
||||
by adding start and end messages that include test name to log
|
||||
of the specified node. If we are in the debug mode and the test
|
||||
fails then dump the messages from the log for this test.
|
||||
|
||||
:param always_dump: always dump clickhouse log after test, default: `False`
|
||||
"""
|
||||
if test is None:
|
||||
test = current()
|
||||
test = current()
|
||||
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
|
||||
with By("getting current log size"):
|
||||
cmd = node.command(f"stat --format=%s {clickhouse_server_log}")
|
||||
start_logsize = cmd.output.split(" ")[0].strip()
|
||||
cmd = node.command(f"stat --format=%s {clickhouse_server_log}")
|
||||
if cmd.output == f"stat: cannot stat '{clickhouse_server_log}': No such file or directory":
|
||||
start_logsize = 0
|
||||
else:
|
||||
start_logsize = cmd.output.split(" ")[0].strip()
|
||||
|
||||
try:
|
||||
with And("adding test name start message to the clickhouse-server.log"):
|
||||
@ -29,14 +82,394 @@ def instrument_clickhouse_server_log(self, node=None, test=None,
|
||||
return
|
||||
|
||||
with Finally("adding test name end message to the clickhouse-server.log", flags=TE):
|
||||
node.command(f"echo -e \"\\n-- end: {test.name} --\\n\" >> {clickhouse_server_log}")
|
||||
node.command(f"echo -e \"\\n-- end: {test.name} --\\n\" >> {clickhouse_server_log}")
|
||||
|
||||
with And("getting current log size at the end of the test"):
|
||||
cmd = node.command(f"stat --format=%s {clickhouse_server_log}")
|
||||
end_logsize = cmd.output.split(" ")[0].strip()
|
||||
cmd = node.command(f"stat --format=%s {clickhouse_server_log}")
|
||||
end_logsize = cmd.output.split(" ")[0].strip()
|
||||
|
||||
with And("checking if test has failing result"):
|
||||
if settings.debug and not self.parent.result:
|
||||
with Then("dumping clickhouse-server.log for this test"):
|
||||
node.command(f"tail -c +{start_logsize} {clickhouse_server_log}"
|
||||
f" | head -c {int(end_logsize) - int(start_logsize)}")
|
||||
dump_log = always_dump or (settings.debug and not self.parent.result)
|
||||
|
||||
if dump_log:
|
||||
with Then("dumping clickhouse-server.log for this test"):
|
||||
node.command(f"tail -c +{start_logsize} {clickhouse_server_log}"
|
||||
f" | head -c {int(end_logsize) - int(start_logsize)}")
|
||||
|
||||
|
||||
xml_with_utf8 = '<?xml version="1.0" encoding="utf-8"?>\n'
|
||||
|
||||
|
||||
def xml_indent(elem, level=0, by=" "):
|
||||
i = "\n" + level * by
|
||||
if len(elem):
|
||||
if not elem.text or not elem.text.strip():
|
||||
elem.text = i + by
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
for elem in elem:
|
||||
xml_indent(elem, level + 1)
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
else:
|
||||
if level and (not elem.tail or not elem.tail.strip()):
|
||||
elem.tail = i
|
||||
|
||||
|
||||
def xml_append(root, tag, text):
|
||||
element = xmltree.Element(tag)
|
||||
element.text = text
|
||||
root.append(element)
|
||||
return element
|
||||
|
||||
|
||||
class Config:
|
||||
def __init__(self, content, path, name, uid, preprocessed_name):
|
||||
self.content = content
|
||||
self.path = path
|
||||
self.name = name
|
||||
self.uid = uid
|
||||
self.preprocessed_name = preprocessed_name
|
||||
|
||||
|
||||
class KeyWithAttributes:
|
||||
def __init__(self, name, attributes):
|
||||
"""XML key with attributes.
|
||||
|
||||
:param name: key name
|
||||
:param attributes: dictionary of attributes {name: value, ...}
|
||||
"""
|
||||
self.name = name
|
||||
self.attributes = dict(attributes)
|
||||
|
||||
|
||||
def create_xml_config_content(entries, config_file, config_d_dir="/etc/clickhouse-server/config.d"):
|
||||
"""Create XML configuration file from a dictionary.
|
||||
|
||||
:param entries: dictionary that defines xml
|
||||
:param config_file: name of the config file
|
||||
:param config_d_dir: config.d directory path, default: `/etc/clickhouse-server/config.d`
|
||||
"""
|
||||
uid = getuid()
|
||||
path = os.path.join(config_d_dir, config_file)
|
||||
name = config_file
|
||||
root = xmltree.Element("clickhouse")
|
||||
root.append(xmltree.Comment(text=f"config uid: {uid}"))
|
||||
|
||||
def create_xml_tree(entries, root):
|
||||
for k, v in entries.items():
|
||||
if isinstance(k, KeyWithAttributes):
|
||||
xml_element = xmltree.Element(k.name)
|
||||
for attr_name, attr_value in k.attributes.items():
|
||||
xml_element.set(attr_name, attr_value)
|
||||
if type(v) is dict:
|
||||
create_xml_tree(v, xml_element)
|
||||
elif type(v) in (list, tuple):
|
||||
for e in v:
|
||||
create_xml_tree(e, xml_element)
|
||||
else:
|
||||
xml_element.text = v
|
||||
root.append(xml_element)
|
||||
elif type(v) is dict:
|
||||
xml_element = xmltree.Element(k)
|
||||
create_xml_tree(v, xml_element)
|
||||
root.append(xml_element)
|
||||
elif type(v) in (list, tuple):
|
||||
xml_element = xmltree.Element(k)
|
||||
for e in v:
|
||||
create_xml_tree(e, xml_element)
|
||||
root.append(xml_element)
|
||||
else:
|
||||
xml_append(root, k, v)
|
||||
|
||||
create_xml_tree(entries, root)
|
||||
xml_indent(root)
|
||||
content = xml_with_utf8 + str(
|
||||
xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"),
|
||||
"utf-8")
|
||||
|
||||
return Config(content, path, name, uid, "config.xml")
|
||||
|
||||
|
||||
def add_invalid_config(config, message, recover_config=None, tail=30, timeout=300, restart=True, user=None):
|
||||
"""Check that ClickHouse errors when trying to load invalid configuration file.
|
||||
"""
|
||||
cluster = current().context.cluster
|
||||
node = current().context.node
|
||||
|
||||
try:
|
||||
with Given("I prepare the error log by writing empty lines into it"):
|
||||
node.command("echo -e \"%s\" > /var/log/clickhouse-server/clickhouse-server.err.log" % ("-\\n" * tail))
|
||||
|
||||
with When("I add the config", description=config.path):
|
||||
command = f"cat <<HEREDOC > {config.path}\n{config.content}\nHEREDOC"
|
||||
node.command(command, steps=False, exitcode=0)
|
||||
|
||||
with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"):
|
||||
started = time.time()
|
||||
command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{' > /dev/null' if not settings.debug else ''}"
|
||||
while time.time() - started < timeout:
|
||||
exitcode = node.command(command, steps=False).exitcode
|
||||
if exitcode == 0:
|
||||
break
|
||||
time.sleep(1)
|
||||
assert exitcode == 0, error()
|
||||
|
||||
if restart:
|
||||
with When("I restart ClickHouse to apply the config changes"):
|
||||
node.restart_clickhouse(safe=False, wait_healthy=False, user=user)
|
||||
|
||||
finally:
|
||||
if recover_config is None:
|
||||
with Finally(f"I remove {config.name}"):
|
||||
with By("removing invalid configuration file"):
|
||||
system_config_path = os.path.join(cluster.environ["CLICKHOUSE_TESTS_DIR"], "configs", node.name,
|
||||
"config.d", config.path.split("config.d/")[-1])
|
||||
cluster.command(None, f'rm -rf {system_config_path}', timeout=timeout, exitcode=0)
|
||||
|
||||
if restart:
|
||||
with And("restarting ClickHouse"):
|
||||
node.restart_clickhouse(safe=False, user=user)
|
||||
node.restart_clickhouse(safe=False, user=user)
|
||||
else:
|
||||
with Finally(f"I change {config.name}"):
|
||||
with By("changing invalid configuration file"):
|
||||
system_config_path = os.path.join(cluster.environ["CLICKHOUSE_TESTS_DIR"], "configs", node.name,
|
||||
"config.d", config.path.split("config.d/")[-1])
|
||||
cluster.command(None, f'rm -rf {system_config_path}', timeout=timeout, exitcode=0)
|
||||
command = f"cat <<HEREDOC > {system_config_path}\n{recover_config.content}\nHEREDOC"
|
||||
cluster.command(None, command, timeout=timeout, exitcode=0)
|
||||
|
||||
if restart:
|
||||
with And("restarting ClickHouse"):
|
||||
node.restart_clickhouse(safe=False, user=user)
|
||||
|
||||
with Then("error log should contain the expected error message"):
|
||||
started = time.time()
|
||||
command = f"tail -n {tail} /var/log/clickhouse-server/clickhouse-server.err.log | grep \"{message}\""
|
||||
while time.time() - started < timeout:
|
||||
exitcode = node.command(command, steps=False).exitcode
|
||||
if exitcode == 0:
|
||||
break
|
||||
time.sleep(1)
|
||||
assert exitcode == 0, error()
|
||||
|
||||
|
||||
def add_config(config, timeout=300, restart=False, modify=False, node=None, user=None, wait_healthy=True,
|
||||
check_preprocessed = True):
|
||||
"""Add dynamic configuration file to ClickHouse.
|
||||
|
||||
:param config: configuration file description
|
||||
:param timeout: timeout, default: 300 sec
|
||||
:param restart: restart server, default: False
|
||||
:param modify: only modify configuration file, default: False
|
||||
"""
|
||||
if node is None:
|
||||
node = current().context.node
|
||||
cluster = current().context.cluster
|
||||
|
||||
def check_preprocessed_config_is_updated(after_removal=False):
|
||||
"""Check that preprocessed config is updated.
|
||||
"""
|
||||
started = time.time()
|
||||
command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{' > /dev/null' if not settings.debug else ''}"
|
||||
|
||||
while time.time() - started < timeout:
|
||||
exitcode = node.command(command, steps=False).exitcode
|
||||
if after_removal:
|
||||
if exitcode == 1:
|
||||
break
|
||||
else:
|
||||
if exitcode == 0:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
if settings.debug:
|
||||
node.command(f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name}")
|
||||
|
||||
if after_removal:
|
||||
assert exitcode == 1, error()
|
||||
else:
|
||||
assert exitcode == 0, error()
|
||||
|
||||
def wait_for_config_to_be_loaded(user=None):
|
||||
"""Wait for config to be loaded.
|
||||
"""
|
||||
if restart:
|
||||
with When("I close terminal to the node to be restarted"):
|
||||
bash.close()
|
||||
|
||||
with And("I stop ClickHouse to apply the config changes"):
|
||||
node.stop_clickhouse(safe=False)
|
||||
|
||||
with And("I get the current log size"):
|
||||
cmd = node.cluster.command(None,
|
||||
f"stat --format=%s {cluster.environ['CLICKHOUSE_TESTS_DIR']}/_instances/{node.name}/logs/clickhouse-server.log")
|
||||
logsize = cmd.output.split(" ")[0].strip()
|
||||
|
||||
with And("I start ClickHouse back up"):
|
||||
node.start_clickhouse(user=user, wait_healthy=wait_healthy)
|
||||
|
||||
with Then("I tail the log file from using previous log size as the offset"):
|
||||
bash.prompt = bash.__class__.prompt
|
||||
bash.open()
|
||||
bash.send(f"tail -c +{logsize} -f /var/log/clickhouse-server/clickhouse-server.log")
|
||||
|
||||
with Then("I wait for config reload message in the log file"):
|
||||
if restart:
|
||||
bash.expect(
|
||||
f"ConfigReloader: Loaded config '/etc/clickhouse-server/config.xml', performed update on configuration",
|
||||
timeout=timeout)
|
||||
else:
|
||||
bash.expect(
|
||||
f"ConfigReloader: Loaded config '/etc/clickhouse-server/{config.preprocessed_name}', performed update on configuration",
|
||||
timeout=timeout)
|
||||
|
||||
try:
|
||||
with Given(f"{config.name}"):
|
||||
if settings.debug:
|
||||
with When("I output the content of the config"):
|
||||
debug(config.content)
|
||||
|
||||
with node.cluster.shell(node.name) as bash:
|
||||
bash.expect(bash.prompt)
|
||||
bash.send("tail -v -n 0 -f /var/log/clickhouse-server/clickhouse-server.log")
|
||||
# make sure tail process is launched and started to follow the file
|
||||
bash.expect("<==")
|
||||
bash.expect("\n")
|
||||
|
||||
with When("I add the config", description=config.path):
|
||||
command = f"cat <<HEREDOC > {config.path}\n{config.content}\nHEREDOC"
|
||||
node.command(command, steps=False, exitcode=0)
|
||||
|
||||
if check_preprocessed:
|
||||
with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"):
|
||||
check_preprocessed_config_is_updated()
|
||||
|
||||
with And("I wait for config to be reloaded"):
|
||||
wait_for_config_to_be_loaded(user=user)
|
||||
|
||||
yield
|
||||
finally:
|
||||
if not modify:
|
||||
with Finally(f"I remove {config.name} on {node.name}"):
|
||||
with node.cluster.shell(node.name) as bash:
|
||||
bash.expect(bash.prompt)
|
||||
bash.send("tail -v -n 0 -f /var/log/clickhouse-server/clickhouse-server.log")
|
||||
# make sure tail process is launched and started to follow the file
|
||||
bash.expect("<==")
|
||||
bash.expect("\n")
|
||||
|
||||
with By("removing the config file", description=config.path):
|
||||
node.command(f"rm -rf {config.path}", exitcode=0)
|
||||
|
||||
with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"):
|
||||
check_preprocessed_config_is_updated(after_removal=True)
|
||||
|
||||
with And("I wait for config to be reloaded"):
|
||||
wait_for_config_to_be_loaded()
|
||||
|
||||
|
||||
@TestStep(When)
|
||||
def copy(self, dest_node, src_path, dest_path, bash=None, binary=False, eof="EOF", src_node=None):
|
||||
"""Copy file from source to destination node.
|
||||
"""
|
||||
if binary:
|
||||
raise NotImplementedError("not yet implemented; need to use base64 encoding")
|
||||
|
||||
bash = self.context.cluster.bash(node=src_node)
|
||||
|
||||
cmd = bash(f"cat {src_path}")
|
||||
|
||||
assert cmd.exitcode == 0, error()
|
||||
contents = cmd.output
|
||||
|
||||
dest_node.command(f"cat << {eof} > {dest_path}\n{contents}\n{eof}")
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def add_user_to_group_on_node(self, node=None, group="clickhouse", username="clickhouse"):
|
||||
"""Add user {username} into group {group}.
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
|
||||
node.command(f"usermode -g {group} {username}", exitcode=0)
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def change_user_on_node(self, node=None, username="clickhouse"):
|
||||
"""Change user on node.
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
try:
|
||||
node.command(f"su {username}", exitcode=0)
|
||||
yield
|
||||
finally:
|
||||
node.command("exit", exitcode=0)
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def add_user_on_node(self, node=None, groupname=None, username="clickhouse"):
|
||||
"""Create user on node with group specifying.
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
try:
|
||||
if groupname is None:
|
||||
node.command(f"useradd -s /bin/bash {username}", exitcode=0)
|
||||
else:
|
||||
node.command(f"useradd -g {groupname} -s /bin/bash {username}", exitcode=0)
|
||||
yield
|
||||
finally:
|
||||
node.command(f"deluser {username}", exitcode=0)
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def add_group_on_node(self, node=None, groupname="clickhouse"):
|
||||
"""Create group on node
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
try:
|
||||
node.command(f"groupadd {groupname}", exitcode=0)
|
||||
yield
|
||||
finally:
|
||||
node.command(f"delgroup clickhouse")
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def create_file_on_node(self, path, content, node=None):
|
||||
"""Create file on node.
|
||||
|
||||
:param path: file path
|
||||
:param content: file content
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
try:
|
||||
with By(f"creating file {path}"):
|
||||
node.command(f"cat <<HEREDOC > {path}\n{content}\nHEREDOC", exitcode=0)
|
||||
yield path
|
||||
finally:
|
||||
with Finally(f"I remove {path}"):
|
||||
node.command(f"rm -rf {path}", exitcode=0)
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def set_envs_on_node(self, envs, node=None):
|
||||
"""Set environment variables on node.
|
||||
|
||||
:param envs: dictionary of env variables key=value
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
try:
|
||||
with By("setting envs"):
|
||||
for key, value in envs.items():
|
||||
node.command(f"export {key}={value}", exitcode=0)
|
||||
yield
|
||||
finally:
|
||||
with Finally(f"I unset envs"):
|
||||
for key in envs:
|
||||
node.command(f"unset {key}", exitcode=0)
|
||||
|
@ -21,7 +21,7 @@ xfails = {
|
||||
RQ_SRS_016_Kerberos("1.0")
|
||||
)
|
||||
@XFails(xfails)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None):
|
||||
"""ClickHouse Kerberos authentication test regression module.
|
||||
"""
|
||||
nodes = {
|
||||
@ -31,6 +31,7 @@ def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "kerberos_env")) as cluster:
|
||||
|
@ -0,0 +1,29 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: clickhouse/integration-test
|
||||
init: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/:/etc/clickhouse-server/users.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "tail -f /dev/null"
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,162 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap1:
|
||||
# plain text
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap1/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap2:
|
||||
# TLS - never
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap3:
|
||||
# plain text - custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap3/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap4:
|
||||
# TLS - never custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
- "6036"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
LDAPS_PORT: "6036"
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
LDAP_TLS_CIPHER_SUITE: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap5:
|
||||
# TLS - try
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "try"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
phpldapadmin:
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: phpldapadmin
|
||||
environment:
|
||||
PHPLDAPADMIN_LDAP_HOSTS: "openldap1"
|
||||
depends_on:
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
openldap2:
|
||||
condition: service_healthy
|
||||
openldap3:
|
||||
condition: service_healthy
|
||||
openldap4:
|
||||
condition: service_healthy
|
||||
openldap5:
|
||||
condition: service_healthy
|
||||
phpldapadmin:
|
||||
condition: service_healthy
|
@ -0,0 +1,35 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: osixia/openldap:1.4.0
|
||||
command: "--copy-service --loglevel debug"
|
||||
environment:
|
||||
LDAP_ORGANIZATION: "company"
|
||||
LDAP_DOMAIN: "company.com"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
LDAP_TLS: "false"
|
||||
expose:
|
||||
- "389"
|
||||
- "636"
|
||||
healthcheck:
|
||||
test: ldapsearch -x -H ldap://localhost:$${LDAP_PORT:-389} -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
phpldapadmin:
|
||||
image: osixia/phpldapadmin:0.9.0
|
||||
environment:
|
||||
PHPLDAPADMIN_HTTPS=false:
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,29 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: registry.gitlab.com/altinity-public/container-images/test/clickhouse-integration-test:21.12
|
||||
privileged: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/:/etc/clickhouse-server/users.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
healthcheck:
|
||||
test: clickhouse client --query='select 1'
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,162 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap1:
|
||||
# plain text
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap1/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap2:
|
||||
# TLS - never
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap3:
|
||||
# plain text - custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap3/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap4:
|
||||
# TLS - never custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
- "6036"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
LDAPS_PORT: "6036"
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
LDAP_TLS_CIPHER_SUITE: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap5:
|
||||
# TLS - try
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "try"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
phpldapadmin:
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: phpldapadmin
|
||||
environment:
|
||||
PHPLDAPADMIN_LDAP_HOSTS: "openldap1"
|
||||
depends_on:
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
openldap2:
|
||||
condition: service_healthy
|
||||
openldap3:
|
||||
condition: service_healthy
|
||||
openldap4:
|
||||
condition: service_healthy
|
||||
openldap5:
|
||||
condition: service_healthy
|
||||
phpldapadmin:
|
||||
condition: service_healthy
|
@ -0,0 +1,35 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: osixia/openldap:1.4.0
|
||||
command: "--copy-service --loglevel debug"
|
||||
environment:
|
||||
LDAP_ORGANIZATION: "company"
|
||||
LDAP_DOMAIN: "company.com"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
LDAP_TLS: "false"
|
||||
expose:
|
||||
- "389"
|
||||
- "636"
|
||||
healthcheck:
|
||||
test: ldapsearch -x -H ldap://localhost:$${LDAP_PORT:-389} -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
phpldapadmin:
|
||||
image: osixia/phpldapadmin:0.9.0
|
||||
environment:
|
||||
PHPLDAPADMIN_HTTPS=false:
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -37,20 +37,28 @@ xfails = {
|
||||
RQ_SRS_007_LDAP_Authentication("1.0")
|
||||
)
|
||||
@XFails(xfails)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None):
|
||||
"""ClickHouse integration with LDAP regression module.
|
||||
"""
|
||||
nodes = {
|
||||
"clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"),
|
||||
}
|
||||
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
if parallel is not None:
|
||||
self.context.parallel = parallel
|
||||
|
||||
from platform import processor as current_cpu
|
||||
|
||||
folder_name = os.path.basename(current_dir())
|
||||
if current_cpu() == 'aarch64':
|
||||
env = f"{folder_name}_env_arm64"
|
||||
else:
|
||||
env = f"{folder_name}_env"
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "ldap_authentication_env")) as cluster:
|
||||
docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster:
|
||||
self.context.cluster = cluster
|
||||
|
||||
Scenario(run=load("ldap.authentication.tests.sanity", "scenario"))
|
||||
|
@ -2,7 +2,6 @@
|
||||
import random
|
||||
import time
|
||||
|
||||
from helpers.common import Pool
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
from ldap.authentication.tests.common import *
|
||||
|
@ -14,33 +14,7 @@ import testflows.settings as settings
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
|
||||
def getuid():
|
||||
return str(uuid.uuid1()).replace('-', '_')
|
||||
|
||||
xml_with_utf8 = '<?xml version="1.0" encoding="utf-8"?>\n'
|
||||
|
||||
def xml_indent(elem, level=0, by=" "):
|
||||
i = "\n" + level * by
|
||||
if len(elem):
|
||||
if not elem.text or not elem.text.strip():
|
||||
elem.text = i + by
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
for elem in elem:
|
||||
xml_indent(elem, level + 1)
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
else:
|
||||
if level and (not elem.tail or not elem.tail.strip()):
|
||||
elem.tail = i
|
||||
|
||||
def xml_append(root, tag, text):
|
||||
element = xmltree.Element(tag)
|
||||
element.text = text
|
||||
root.append(element)
|
||||
return element
|
||||
|
||||
Config = namedtuple("Config", "content path name uid preprocessed_name")
|
||||
from helpers.common import xml_indent, xml_with_utf8, xml_append, add_config, getuid, Config
|
||||
|
||||
ASCII_CHARS = string.ascii_lowercase + string.ascii_uppercase + string.digits
|
||||
|
||||
@ -78,115 +52,6 @@ def restart(node=None, safe=False, timeout=300):
|
||||
f"ConfigReloader: Loaded config '/etc/clickhouse-server/config.xml', performed update on configuration",
|
||||
timeout=timeout)
|
||||
|
||||
def add_config(config, timeout=300, restart=False, modify=False):
|
||||
"""Add dynamic configuration file to ClickHouse.
|
||||
|
||||
:param node: node
|
||||
:param config: configuration file description
|
||||
:param timeout: timeout, default: 20 sec
|
||||
"""
|
||||
node = current().context.node
|
||||
cluster = current().context.cluster
|
||||
|
||||
def check_preprocessed_config_is_updated(after_removal=False):
|
||||
"""Check that preprocessed config is updated.
|
||||
"""
|
||||
started = time.time()
|
||||
command = f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name} | grep {config.uid}{' > /dev/null' if not settings.debug else ''}"
|
||||
|
||||
while time.time() - started < timeout:
|
||||
exitcode = node.command(command, steps=False).exitcode
|
||||
if after_removal:
|
||||
if exitcode == 1:
|
||||
break
|
||||
else:
|
||||
if exitcode == 0:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
if settings.debug:
|
||||
node.command(f"cat /var/lib/clickhouse/preprocessed_configs/{config.preprocessed_name}")
|
||||
|
||||
if after_removal:
|
||||
assert exitcode == 1, error()
|
||||
else:
|
||||
assert exitcode == 0, error()
|
||||
|
||||
def wait_for_config_to_be_loaded():
|
||||
"""Wait for config to be loaded.
|
||||
"""
|
||||
if restart:
|
||||
with When("I close terminal to the node to be restarted"):
|
||||
bash.close()
|
||||
|
||||
with And("I stop ClickHouse to apply the config changes"):
|
||||
node.stop(safe=False)
|
||||
|
||||
with And("I get the current log size"):
|
||||
cmd = node.cluster.command(None,
|
||||
f"stat --format=%s {cluster.environ['CLICKHOUSE_TESTS_DIR']}/_instances/{node.name}/logs/clickhouse-server.log")
|
||||
logsize = cmd.output.split(" ")[0].strip()
|
||||
|
||||
with And("I start ClickHouse back up"):
|
||||
node.start()
|
||||
|
||||
with Then("I tail the log file from using previous log size as the offset"):
|
||||
bash.prompt = bash.__class__.prompt
|
||||
bash.open()
|
||||
bash.send(f"tail -c +{logsize} -f /var/log/clickhouse-server/clickhouse-server.log")
|
||||
|
||||
with Then("I wait for config reload message in the log file"):
|
||||
if restart:
|
||||
bash.expect(
|
||||
f"ConfigReloader: Loaded config '/etc/clickhouse-server/config.xml', performed update on configuration",
|
||||
timeout=timeout)
|
||||
else:
|
||||
bash.expect(
|
||||
f"ConfigReloader: Loaded config '/etc/clickhouse-server/{config.preprocessed_name}', performed update on configuration",
|
||||
timeout=timeout)
|
||||
|
||||
try:
|
||||
with Given(f"{config.name}"):
|
||||
if settings.debug:
|
||||
with When("I output the content of the config"):
|
||||
debug(config.content)
|
||||
|
||||
with node.cluster.shell(node.name) as bash:
|
||||
bash.expect(bash.prompt)
|
||||
bash.send("tail -v -n 0 -f /var/log/clickhouse-server/clickhouse-server.log")
|
||||
# make sure tail process is launched and started to follow the file
|
||||
bash.expect("<==")
|
||||
bash.expect("\n")
|
||||
|
||||
with When("I add the config", description=config.path):
|
||||
command = f"cat <<HEREDOC > {config.path}\n{config.content}\nHEREDOC"
|
||||
node.command(command, steps=False, exitcode=0)
|
||||
|
||||
with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"):
|
||||
check_preprocessed_config_is_updated()
|
||||
|
||||
with And("I wait for config to be reloaded"):
|
||||
wait_for_config_to_be_loaded()
|
||||
yield
|
||||
finally:
|
||||
if not modify:
|
||||
with Finally(f"I remove {config.name}"):
|
||||
with node.cluster.shell(node.name) as bash:
|
||||
bash.expect(bash.prompt)
|
||||
bash.send("tail -v -n 0 -f /var/log/clickhouse-server/clickhouse-server.log")
|
||||
# make sure tail process is launched and started to follow the file
|
||||
bash.expect("<==")
|
||||
bash.expect("\n")
|
||||
|
||||
with By("removing the config file", description=config.path):
|
||||
node.command(f"rm -rf {config.path}", exitcode=0)
|
||||
|
||||
with Then(f"{config.preprocessed_name} should be updated", description=f"timeout {timeout}"):
|
||||
check_preprocessed_config_is_updated(after_removal=True)
|
||||
|
||||
with And("I wait for config to be reloaded"):
|
||||
wait_for_config_to_be_loaded()
|
||||
|
||||
def create_ldap_servers_config_content(servers, config_d_dir="/etc/clickhouse-server/config.d", config_file="ldap_servers.xml"):
|
||||
"""Create LDAP servers configuration content.
|
||||
"""
|
||||
@ -210,19 +75,19 @@ def create_ldap_servers_config_content(servers, config_d_dir="/etc/clickhouse-se
|
||||
return Config(content, path, name, uid, "config.xml")
|
||||
|
||||
@contextmanager
|
||||
def modify_config(config, restart=False):
|
||||
def modify_config(config, restart=False, node=None):
|
||||
"""Apply updated configuration file.
|
||||
"""
|
||||
return add_config(config, restart=restart, modify=True)
|
||||
return add_config(config, restart=restart, modify=True, node=node)
|
||||
|
||||
@contextmanager
|
||||
def ldap_servers(servers, config_d_dir="/etc/clickhouse-server/config.d", config_file="ldap_servers.xml",
|
||||
timeout=300, restart=False, config=None):
|
||||
timeout=300, restart=False, config=None, node=None):
|
||||
"""Add LDAP servers configuration.
|
||||
"""
|
||||
if config is None:
|
||||
config = create_ldap_servers_config_content(servers, config_d_dir, config_file)
|
||||
return add_config(config, restart=restart)
|
||||
return add_config(config, restart=restart, node=node)
|
||||
|
||||
def create_ldap_users_config_content(*users, config_d_dir="/etc/clickhouse-server/users.d", config_file="ldap_users.xml"):
|
||||
"""Create LDAP users configuration file content.
|
||||
@ -247,11 +112,12 @@ def create_ldap_users_config_content(*users, config_d_dir="/etc/clickhouse-serve
|
||||
|
||||
return Config(content, path, name, uid, "users.xml")
|
||||
|
||||
def add_users_identified_with_ldap(*users):
|
||||
def add_users_identified_with_ldap(*users, node=None):
|
||||
"""Add one or more users that are identified via
|
||||
an ldap server using RBAC.
|
||||
"""
|
||||
node = current().context.node
|
||||
if node is None:
|
||||
node = current().context.node
|
||||
try:
|
||||
with Given("I create users"):
|
||||
for user in users:
|
||||
@ -265,17 +131,20 @@ def add_users_identified_with_ldap(*users):
|
||||
|
||||
@contextmanager
|
||||
def ldap_authenticated_users(*users, config_d_dir="/etc/clickhouse-server/users.d",
|
||||
config_file=None, timeout=300, restart=True, config=None, rbac=False):
|
||||
config_file=None, timeout=300, restart=True, config=None, rbac=False, node=None):
|
||||
"""Add LDAP authenticated users.
|
||||
"""
|
||||
if node is None:
|
||||
node = current().context.node
|
||||
|
||||
if rbac:
|
||||
return add_users_identified_with_ldap(*users)
|
||||
return add_users_identified_with_ldap(*users, node=node)
|
||||
else:
|
||||
if config_file is None:
|
||||
config_file = f"ldap_users_{getuid()}.xml"
|
||||
if config is None:
|
||||
config = create_ldap_users_config_content(*users, config_d_dir=config_d_dir, config_file=config_file)
|
||||
return add_config(config, timeout=timeout, restart=restart)
|
||||
return add_config(config, timeout=timeout, restart=restart, node=node)
|
||||
|
||||
def invalid_server_config(servers, message=None, tail=30, timeout=300):
|
||||
"""Check that ClickHouse errors when trying to load invalid LDAP servers configuration file.
|
||||
|
@ -0,0 +1,29 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: clickhouse/integration-test
|
||||
init: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/:/etc/clickhouse-server/users.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "tail -f /dev/null"
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,162 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap1:
|
||||
# plain text
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap1/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap2:
|
||||
# TLS - never
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap3:
|
||||
# plain text - custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap3/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap4:
|
||||
# TLS - never custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
- "6036"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
LDAPS_PORT: "6036"
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
LDAP_TLS_CIPHER_SUITE: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap5:
|
||||
# TLS - try
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "try"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
phpldapadmin:
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: phpldapadmin
|
||||
environment:
|
||||
PHPLDAPADMIN_LDAP_HOSTS: "openldap1"
|
||||
depends_on:
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
openldap2:
|
||||
condition: service_healthy
|
||||
openldap3:
|
||||
condition: service_healthy
|
||||
openldap4:
|
||||
condition: service_healthy
|
||||
openldap5:
|
||||
condition: service_healthy
|
||||
phpldapadmin:
|
||||
condition: service_healthy
|
@ -0,0 +1,35 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: osixia/openldap:1.4.0
|
||||
command: "--copy-service --loglevel debug"
|
||||
environment:
|
||||
LDAP_ORGANIZATION: "company"
|
||||
LDAP_DOMAIN: "company.com"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
LDAP_TLS: "false"
|
||||
expose:
|
||||
- "389"
|
||||
- "636"
|
||||
healthcheck:
|
||||
test: ldapsearch -x -H ldap://localhost:$${LDAP_PORT:-389} -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
phpldapadmin:
|
||||
image: osixia/phpldapadmin:0.9.0
|
||||
environment:
|
||||
PHPLDAPADMIN_HTTPS=false:
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,29 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: registry.gitlab.com/altinity-public/container-images/test/clickhouse-integration-test:21.12
|
||||
privileged: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/:/etc/clickhouse-server/users.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
healthcheck:
|
||||
test: clickhouse client --query='select 1'
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,162 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap1:
|
||||
# plain text
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap1/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap2:
|
||||
# TLS - never
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap3:
|
||||
# plain text - custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap3/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap4:
|
||||
# TLS - never custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
- "6036"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
LDAPS_PORT: "6036"
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
LDAP_TLS_CIPHER_SUITE: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap5:
|
||||
# TLS - try
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "try"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
phpldapadmin:
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: phpldapadmin
|
||||
environment:
|
||||
PHPLDAPADMIN_LDAP_HOSTS: "openldap1"
|
||||
depends_on:
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/users.d:/etc/clickhouse-server/users.d"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
openldap2:
|
||||
condition: service_healthy
|
||||
openldap3:
|
||||
condition: service_healthy
|
||||
openldap4:
|
||||
condition: service_healthy
|
||||
openldap5:
|
||||
condition: service_healthy
|
||||
phpldapadmin:
|
||||
condition: service_healthy
|
@ -0,0 +1,35 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: osixia/openldap:1.4.0
|
||||
command: "--copy-service --loglevel debug"
|
||||
environment:
|
||||
LDAP_ORGANIZATION: "company"
|
||||
LDAP_DOMAIN: "company.com"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
LDAP_TLS: "false"
|
||||
expose:
|
||||
- "389"
|
||||
- "636"
|
||||
healthcheck:
|
||||
test: ldapsearch -x -H ldap://localhost:$${LDAP_PORT:-389} -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
phpldapadmin:
|
||||
image: osixia/phpldapadmin:0.9.0
|
||||
environment:
|
||||
PHPLDAPADMIN_HTTPS=false:
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -8,6 +8,7 @@ append_path(sys.path, "..", "..")
|
||||
from helpers.cluster import Cluster
|
||||
from helpers.argparser import argparser
|
||||
from ldap.external_user_directory.requirements import *
|
||||
from helpers.common import check_clickhouse_version
|
||||
|
||||
# Cross-outs of known fails
|
||||
xfails = {
|
||||
@ -27,6 +28,11 @@ xfails = {
|
||||
[(Fail, "can't get it to work")]
|
||||
}
|
||||
|
||||
ffails ={
|
||||
"user authentications/verification cooldown performance/:":
|
||||
(Skip, "causes timeout on 21.8", (lambda test: check_clickhouse_version(">=21.8")(test) and check_clickhouse_version("<21.9")(test)))
|
||||
}
|
||||
|
||||
@TestFeature
|
||||
@Name("external user directory")
|
||||
@ArgumentParser(argparser)
|
||||
@ -37,20 +43,29 @@ xfails = {
|
||||
RQ_SRS_009_LDAP_ExternalUserDirectory_Authentication("1.0")
|
||||
)
|
||||
@XFails(xfails)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
||||
@FFails(ffails)
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None):
|
||||
"""ClickHouse LDAP external user directory regression module.
|
||||
"""
|
||||
nodes = {
|
||||
"clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"),
|
||||
}
|
||||
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
if parallel is not None:
|
||||
self.context.parallel = parallel
|
||||
|
||||
from platform import processor as current_cpu
|
||||
|
||||
folder_name = os.path.basename(current_dir())
|
||||
if current_cpu() == 'aarch64':
|
||||
env = f"{folder_name}_env_arm64"
|
||||
else:
|
||||
env = f"{folder_name}_env"
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "ldap_external_user_directory_env")) as cluster:
|
||||
docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster:
|
||||
self.context.cluster = cluster
|
||||
|
||||
Scenario(run=load("ldap.authentication.tests.sanity", "scenario"))
|
||||
|
@ -1,7 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import random
|
||||
|
||||
from helpers.common import Pool
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
|
||||
|
@ -27,8 +27,9 @@ def table(name, create_statement, on_cluster=False):
|
||||
node.query(f"DROP TABLE IF EXISTS {name}")
|
||||
|
||||
@contextmanager
|
||||
def rbac_users(*users):
|
||||
node = current().context.node
|
||||
def rbac_users(*users, node=None):
|
||||
if node is None:
|
||||
node = current().context.node
|
||||
try:
|
||||
with Given("I have local users"):
|
||||
for user in users:
|
||||
@ -42,8 +43,9 @@ def rbac_users(*users):
|
||||
node.query(f"DROP USER IF EXISTS {user['cn']}")
|
||||
|
||||
@contextmanager
|
||||
def rbac_roles(*roles):
|
||||
node = current().context.node
|
||||
def rbac_roles(*roles, node=None):
|
||||
if node is None:
|
||||
node = current().context.node
|
||||
try:
|
||||
with Given("I have roles"):
|
||||
for role in roles:
|
||||
|
@ -1,6 +1,5 @@
|
||||
import random
|
||||
|
||||
from helpers.common import Pool
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
|
||||
|
@ -4,16 +4,17 @@ from testflows.core import *
|
||||
|
||||
append_path(sys.path, "..")
|
||||
|
||||
from helpers.common import Pool, join
|
||||
from helpers.argparser import argparser
|
||||
|
||||
@TestModule
|
||||
@Name("ldap")
|
||||
@ArgumentParser(argparser)
|
||||
def regression(self, local, clickhouse_binary_path, parallel=None, stress=None):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None):
|
||||
"""ClickHouse LDAP integration regression module.
|
||||
"""
|
||||
args = {"local": local, "clickhouse_binary_path": clickhouse_binary_path}
|
||||
args = {"local": local, "clickhouse_binary_path": clickhouse_binary_path, "clickhouse_version": clickhouse_version}
|
||||
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
|
@ -2,7 +2,8 @@
|
||||
<openSSL>
|
||||
<server>
|
||||
<certificateFile>/etc/clickhouse-server/ssl/server.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/ssl/server.key</privateKeyFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/ssl/server.key</privateKeyFile>
|
||||
<dhParamsFile>/etc/clickhouse-server/ssl/dhparam.pem</dhParamsFile>
|
||||
<verificationMode>none</verificationMode>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
</server>
|
||||
|
@ -0,0 +1,4 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
</users>
|
||||
</clickhouse>
|
@ -8,13 +8,22 @@ append_path(sys.path, "..", "..")
|
||||
from helpers.cluster import Cluster
|
||||
from helpers.argparser import argparser
|
||||
from ldap.role_mapping.requirements import *
|
||||
from helpers.common import check_clickhouse_version
|
||||
|
||||
# Cross-outs of known fails
|
||||
xfails = {
|
||||
"mapping/roles removed and added in parallel":
|
||||
[(Fail, "known bug")],
|
||||
"user dn detection/mapping/roles removed and added in parallel":
|
||||
[(Fail, "known bug")]
|
||||
"mapping/roles removed and added in parallel":
|
||||
[(Fail, "known bug")],
|
||||
"user dn detection/mapping/roles removed and added in parallel":
|
||||
[(Fail, "known bug")],
|
||||
"cluster secret/external user directory/:/:/cluster with secret/ldap user/:mapped True/select using mapped role/with privilege on source and distributed":
|
||||
[(Fail, "https://github.com/ClickHouse/ClickHouse/issues/34130")]
|
||||
}
|
||||
|
||||
# Force results without running the test
|
||||
ffails={
|
||||
"cluster secret":
|
||||
(Skip, "feature available on 20.10+", check_clickhouse_version("<20.10"))
|
||||
}
|
||||
|
||||
@TestFeature
|
||||
@ -27,26 +36,36 @@ xfails = {
|
||||
RQ_SRS_014_LDAP_RoleMapping("1.0")
|
||||
)
|
||||
@XFails(xfails)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None, parallel=None):
|
||||
@FFails(ffails)
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None):
|
||||
"""ClickHouse LDAP role mapping regression module.
|
||||
"""
|
||||
nodes = {
|
||||
"clickhouse": ("clickhouse1", "clickhouse2", "clickhouse3"),
|
||||
}
|
||||
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
if parallel is not None:
|
||||
self.context.parallel = parallel
|
||||
|
||||
from platform import processor as current_cpu
|
||||
|
||||
folder_name = os.path.basename(current_dir())
|
||||
if current_cpu() == 'aarch64':
|
||||
env = f"{folder_name}_env_arm64"
|
||||
else:
|
||||
env = f"{folder_name}_env"
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "ldap_role_mapping_env")) as cluster:
|
||||
docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster:
|
||||
self.context.cluster = cluster
|
||||
|
||||
Scenario(run=load("ldap.authentication.tests.sanity", "scenario"), name="ldap sanity")
|
||||
Feature(run=load("ldap.role_mapping.tests.server_config", "feature"))
|
||||
Feature(run=load("ldap.role_mapping.tests.mapping", "feature"))
|
||||
#Feature(run=load("ldap.role_mapping.tests.user_dn_detection", "feature"))
|
||||
Feature(run=load("ldap.role_mapping.tests.user_dn_detection", "feature"))
|
||||
Feature(run=load("ldap.role_mapping.tests.cluster_secret", "feature"))
|
||||
|
||||
if main():
|
||||
regression()
|
||||
|
@ -76,6 +76,8 @@
|
||||
* 4.8.8.3 [RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithUTF8Characters](#rqsrs-014ldaprolemappingconfigurationuserdirectoryrolemappingprefixwithutf8characters)
|
||||
* 4.8.8.4 [RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialXMLCharacters](#rqsrs-014ldaprolemappingconfigurationuserdirectoryrolemappingprefixwithspecialxmlcharacters)
|
||||
* 4.8.8.5 [RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialRegexCharacters](#rqsrs-014ldaprolemappingconfigurationuserdirectoryrolemappingprefixwithspecialregexcharacters)
|
||||
* 4.9 [Cluster With And Without Secret](#cluster-with-and-without-secret)
|
||||
* 4.9.8.1 [RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable](#rqsrs-014ldapclusterwithandwithoutsecretdistributedtable)
|
||||
* 5 [References](#references)
|
||||
|
||||
## Revision History
|
||||
@ -548,6 +550,67 @@ version: 1.0
|
||||
[ClickHouse] SHALL support regex special characters as the value of the `<prefix>` parameter in
|
||||
the `<user directories><ldap><role_mapping>` section of the `config.xml`.
|
||||
|
||||
### Cluster With And Without Secret
|
||||
|
||||
##### RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support propagating query user roles and their corresponding privileges
|
||||
when using `Distributed` table to the remote servers for the users that are authenticated
|
||||
using LDAP either via external user directory or defined in `users.xml` when
|
||||
cluster is configured with and without `<secret>`.
|
||||
|
||||
For example,
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<secret>qwerty123</secret>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host1</host>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host2</host>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host1</host>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host2</host>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
* **Access Control and Account Management**: https://clickhouse.com/docs/en/operations/access-rights/
|
||||
|
@ -1,6 +1,6 @@
|
||||
# These requirements were auto generated
|
||||
# from software requirements specification (SRS)
|
||||
# document by TestFlows v1.6.210505.1133630.
|
||||
# document by TestFlows v1.7.220210.1155232.
|
||||
# Do not edit by hand but re-generate instead
|
||||
# using 'tfs requirements generate' command.
|
||||
from testflows.core import Specification
|
||||
@ -913,6 +913,75 @@ RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithS
|
||||
level=4,
|
||||
num='4.8.8.5')
|
||||
|
||||
RQ_SRS_014_LDAP_ClusterWithAndWithoutSecret_DistributedTable = Requirement(
|
||||
name='RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable',
|
||||
version='1.0',
|
||||
priority=None,
|
||||
group=None,
|
||||
type=None,
|
||||
uid=None,
|
||||
description=(
|
||||
'[ClickHouse] SHALL support propagating query user roles and their corresponding privileges\n'
|
||||
'when using `Distributed` table to the remote servers for the users that are authenticated\n'
|
||||
'using LDAP either via external user directory or defined in `users.xml` when\n'
|
||||
'cluster is configured with and without `<secret>`.\n'
|
||||
'\n'
|
||||
'For example,\n'
|
||||
'\n'
|
||||
'```xml\n'
|
||||
'<clickhouse>\n'
|
||||
' <remote_servers>\n'
|
||||
' <cluster>\n'
|
||||
' <secret>qwerty123</secret>\n'
|
||||
' <shard>\n'
|
||||
' <internal_replication>true</internal_replication>\n'
|
||||
' <replica>\n'
|
||||
' <default_database>dwh</default_database>\n'
|
||||
' <host>host1</host>\n'
|
||||
' </replica>\n'
|
||||
' </shard>\n'
|
||||
' <shard>\n'
|
||||
' <internal_replication>true</internal_replication>\n'
|
||||
' <replica>\n'
|
||||
' <default_database>dwh</default_database>\n'
|
||||
' <host>host2</host>\n'
|
||||
' </replica>\n'
|
||||
' </shard>\n'
|
||||
' </cluster>\n'
|
||||
' </remote_servers>\n'
|
||||
'</clickhouse>\n'
|
||||
'```\n'
|
||||
'\n'
|
||||
'or \n'
|
||||
'\n'
|
||||
'```xml\n'
|
||||
'<clickhouse>\n'
|
||||
' <remote_servers>\n'
|
||||
' <cluster>\n'
|
||||
' <shard>\n'
|
||||
' <internal_replication>true</internal_replication>\n'
|
||||
' <replica>\n'
|
||||
' <default_database>dwh</default_database>\n'
|
||||
' <host>host1</host>\n'
|
||||
' </replica>\n'
|
||||
' </shard>\n'
|
||||
' <shard>\n'
|
||||
' <internal_replication>true</internal_replication>\n'
|
||||
' <replica>\n'
|
||||
' <default_database>dwh</default_database>\n'
|
||||
' <host>host2</host>\n'
|
||||
' </replica>\n'
|
||||
' </shard>\n'
|
||||
' </cluster>\n'
|
||||
' </remote_servers>\n'
|
||||
'</clickhouse>\n'
|
||||
'```\n'
|
||||
'\n'
|
||||
),
|
||||
link=None,
|
||||
level=4,
|
||||
num='4.9.8.1')
|
||||
|
||||
SRS_014_ClickHouse_LDAP_Role_Mapping = Specification(
|
||||
name='SRS-014 ClickHouse LDAP Role Mapping',
|
||||
description=None,
|
||||
@ -1003,6 +1072,8 @@ SRS_014_ClickHouse_LDAP_Role_Mapping = Specification(
|
||||
Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithUTF8Characters', level=4, num='4.8.8.3'),
|
||||
Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialXMLCharacters', level=4, num='4.8.8.4'),
|
||||
Heading(name='RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialRegexCharacters', level=4, num='4.8.8.5'),
|
||||
Heading(name='Cluster With And Without Secret', level=2, num='4.9'),
|
||||
Heading(name='RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable', level=4, num='4.9.8.1'),
|
||||
Heading(name='References', level=1, num='5'),
|
||||
),
|
||||
requirements=(
|
||||
@ -1056,6 +1127,7 @@ SRS_014_ClickHouse_LDAP_Role_Mapping = Specification(
|
||||
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithUTF8Characters,
|
||||
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithSpecialXMLCharacters,
|
||||
RQ_SRS_014_LDAP_RoleMapping_Configuration_UserDirectory_RoleMapping_Prefix_WithSpecialRegexCharacters,
|
||||
RQ_SRS_014_LDAP_ClusterWithAndWithoutSecret_DistributedTable,
|
||||
),
|
||||
content='''
|
||||
# SRS-014 ClickHouse LDAP Role Mapping
|
||||
@ -1136,6 +1208,8 @@ SRS_014_ClickHouse_LDAP_Role_Mapping = Specification(
|
||||
* 4.8.8.3 [RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithUTF8Characters](#rqsrs-014ldaprolemappingconfigurationuserdirectoryrolemappingprefixwithutf8characters)
|
||||
* 4.8.8.4 [RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialXMLCharacters](#rqsrs-014ldaprolemappingconfigurationuserdirectoryrolemappingprefixwithspecialxmlcharacters)
|
||||
* 4.8.8.5 [RQ.SRS-014.LDAP.RoleMapping.Configuration.UserDirectory.RoleMapping.Prefix.WithSpecialRegexCharacters](#rqsrs-014ldaprolemappingconfigurationuserdirectoryrolemappingprefixwithspecialregexcharacters)
|
||||
* 4.9 [Cluster With And Without Secret](#cluster-with-and-without-secret)
|
||||
* 4.9.8.1 [RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable](#rqsrs-014ldapclusterwithandwithoutsecretdistributedtable)
|
||||
* 5 [References](#references)
|
||||
|
||||
## Revision History
|
||||
@ -1608,6 +1682,67 @@ version: 1.0
|
||||
[ClickHouse] SHALL support regex special characters as the value of the `<prefix>` parameter in
|
||||
the `<user directories><ldap><role_mapping>` section of the `config.xml`.
|
||||
|
||||
### Cluster With And Without Secret
|
||||
|
||||
##### RQ.SRS-014.LDAP.ClusterWithAndWithoutSecret.DistributedTable
|
||||
version: 1.0
|
||||
|
||||
[ClickHouse] SHALL support propagating query user roles and their corresponding privileges
|
||||
when using `Distributed` table to the remote servers for the users that are authenticated
|
||||
using LDAP either via external user directory or defined in `users.xml` when
|
||||
cluster is configured with and without `<secret>`.
|
||||
|
||||
For example,
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<secret>qwerty123</secret>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host1</host>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host2</host>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host1</host>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<internal_replication>true</internal_replication>
|
||||
<replica>
|
||||
<default_database>dwh</default_database>
|
||||
<host>host2</host>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
* **Access Control and Account Management**: https://clickhouse.com/docs/en/operations/access-rights/
|
||||
|
@ -0,0 +1,37 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: clickhouse/integration-test
|
||||
init: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/common.xml:/etc/clickhouse-server/users.d/common.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/common.xml:/etc/clickhouse-server/config.d/common.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/logs.xml:/etc/clickhouse-server/config.d/logs.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/ports.xml:/etc/clickhouse-server/config.d/ports.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/remote.xml:/etc/clickhouse-server/config.d/remote.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/ssl.xml:/etc/clickhouse-server/config.d/ssl.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/storage.xml:/etc/clickhouse-server/config.d/storage.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/zookeeper.xml:/etc/clickhouse-server/config.d/zookeeper.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl/dhparam.pem:/etc/clickhouse-server/ssl/dhparam.pem"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl/server.crt:/etc/clickhouse-server/ssl/server.crt"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl/server.key:/etc/clickhouse-server/ssl/server.key"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "tail -f /dev/null"
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,159 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap1:
|
||||
# plain text
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap1/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap2:
|
||||
# TLS - never
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap3:
|
||||
# plain text - custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap3/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap4:
|
||||
# TLS - never custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
- "6036"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
LDAPS_PORT: "6036"
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
LDAP_TLS_CIPHER_SUITE: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap5:
|
||||
# TLS - try
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "try"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
phpldapadmin:
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: phpldapadmin
|
||||
environment:
|
||||
PHPLDAPADMIN_LDAP_HOSTS: "openldap1"
|
||||
depends_on:
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
openldap2:
|
||||
condition: service_healthy
|
||||
openldap3:
|
||||
condition: service_healthy
|
||||
openldap4:
|
||||
condition: service_healthy
|
||||
openldap5:
|
||||
condition: service_healthy
|
||||
phpldapadmin:
|
||||
condition: service_healthy
|
@ -0,0 +1,35 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: osixia/openldap:1.4.0
|
||||
command: "--copy-service --loglevel debug"
|
||||
environment:
|
||||
LDAP_ORGANIZATION: "company"
|
||||
LDAP_DOMAIN: "company.com"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
LDAP_TLS: "false"
|
||||
expose:
|
||||
- "389"
|
||||
- "636"
|
||||
healthcheck:
|
||||
test: ldapsearch -x -H ldap://localhost:$${LDAP_PORT:-389} -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
phpldapadmin:
|
||||
image: osixia/phpldapadmin:0.9.0
|
||||
environment:
|
||||
PHPLDAPADMIN_HTTPS=false:
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,37 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: registry.gitlab.com/altinity-public/container-images/test/clickhouse-integration-test:21.12
|
||||
init: true
|
||||
privileged: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d/common.xml:/etc/clickhouse-server/users.d/common.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/logs.xml:/etc/clickhouse-server/config.d/logs.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/ports.xml:/etc/clickhouse-server/config.d/ports.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/remote.xml:/etc/clickhouse-server/config.d/remote.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/ssl.xml:/etc/clickhouse-server/config.d/ssl.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/storage.xml:/etc/clickhouse-server/config.d/storage.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d/zookeeper.xml:/etc/clickhouse-server/config.d/zookeeper.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl/dhparam.pem:/etc/clickhouse-server/ssl/dhparam.pem"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl/server.crt:/etc/clickhouse-server/ssl/server.crt"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl/server.key:/etc/clickhouse-server/ssl/server.key"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "tail -f /dev/null"
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,159 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap1:
|
||||
# plain text
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap1/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap2:
|
||||
# TLS - never
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap2/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap3:
|
||||
# plain text - custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap3/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
|
||||
openldap4:
|
||||
# TLS - never custom port
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
expose:
|
||||
- "3089"
|
||||
- "6036"
|
||||
environment:
|
||||
LDAP_PORT: "3089"
|
||||
LDAPS_PORT: "6036"
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "never"
|
||||
LDAP_TLS_CIPHER_SUITE: "SECURE256:+SECURE128:-VERS-TLS-ALL:+VERS-TLS1.2:-RSA:-DHE-DSS:-CAMELLIA-128-CBC:-CAMELLIA-256-CBC"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap4/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
openldap5:
|
||||
# TLS - try
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: openldap
|
||||
environment:
|
||||
LDAP_TLS: "true"
|
||||
LDAP_TLS_CRT_FILENAME: "ldap.crt"
|
||||
LDAP_TLS_KEY_FILENAME: "ldap.key"
|
||||
LDAP_TLS_DH_PARAM_FILENAME: "dhparam.pem"
|
||||
LDAP_TLS_CA_CRT_FILENAME: "ca.crt"
|
||||
LDAP_TLS_ENFORCE: "false"
|
||||
LDAP_TLS_VERIFY_CLIENT: "try"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/config:/container/service/slapd/assets/config/bootstrap/ldif/custom"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/ldap5/certs:/container/service/slapd/assets/certs/"
|
||||
|
||||
phpldapadmin:
|
||||
extends:
|
||||
file: openldap-service.yml
|
||||
service: phpldapadmin
|
||||
environment:
|
||||
PHPLDAPADMIN_LDAP_HOSTS: "openldap1"
|
||||
depends_on:
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
openldap1:
|
||||
condition: service_healthy
|
||||
openldap2:
|
||||
condition: service_healthy
|
||||
openldap3:
|
||||
condition: service_healthy
|
||||
openldap4:
|
||||
condition: service_healthy
|
||||
openldap5:
|
||||
condition: service_healthy
|
||||
phpldapadmin:
|
||||
condition: service_healthy
|
@ -0,0 +1,35 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
openldap:
|
||||
image: osixia/openldap:1.4.0
|
||||
command: "--copy-service --loglevel debug"
|
||||
environment:
|
||||
LDAP_ORGANIZATION: "company"
|
||||
LDAP_DOMAIN: "company.com"
|
||||
LDAP_ADMIN_PASSWORD: "admin"
|
||||
LDAP_TLS: "false"
|
||||
expose:
|
||||
- "389"
|
||||
- "636"
|
||||
healthcheck:
|
||||
test: ldapsearch -x -H ldap://localhost:$${LDAP_PORT:-389} -b "dc=company,dc=com" -D "cn=admin,dc=company,dc=com" -w admin
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
||||
|
||||
phpldapadmin:
|
||||
image: osixia/phpldapadmin:0.9.0
|
||||
environment:
|
||||
PHPLDAPADMIN_HTTPS=false:
|
||||
healthcheck:
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
security_opt:
|
||||
- label:disable
|
442
tests/testflows/ldap/role_mapping/tests/cluster_secret.py
Normal file
442
tests/testflows/ldap/role_mapping/tests/cluster_secret.py
Normal file
@ -0,0 +1,442 @@
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
|
||||
from ldap.role_mapping.requirements import *
|
||||
from ldap.role_mapping.tests.common import *
|
||||
|
||||
|
||||
def cluster_node(name):
|
||||
"""Get cluster node instance.
|
||||
"""
|
||||
return current().context.cluster.node(name)
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def add_sharded_cluster(self, node, name="sharded_cluster_with_secret", with_secret=True, restart=False):
|
||||
"""Add configuration of sharded cluster that uses secret.
|
||||
"""
|
||||
entries = {
|
||||
"remote_servers": {
|
||||
name: []
|
||||
}
|
||||
}
|
||||
|
||||
if with_secret:
|
||||
entries["remote_servers"][name].append(
|
||||
{
|
||||
"secret": "qwerty123"
|
||||
}
|
||||
)
|
||||
|
||||
for node_name in self.context.cluster.nodes["clickhouse"]:
|
||||
entries["remote_servers"][name].append(
|
||||
{
|
||||
"shard": {
|
||||
"replica": {
|
||||
"host": node_name,
|
||||
"port": "9000"
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
config = create_xml_config_content(entries=entries, config_file=f"{name}.xml")
|
||||
return add_config(config, node=node, restart=restart)
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def create_table(self, on_cluster, name=None, node=None):
|
||||
"""Create table on cluster.
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
if name is None:
|
||||
name = getuid()
|
||||
|
||||
try:
|
||||
node.query(
|
||||
f"CREATE TABLE {name} ON CLUSTER {on_cluster} (d Date, a String, b UInt8, x String, y Int8) "
|
||||
f"ENGINE = ReplicatedMergeTree('/clickhouse/tables/{{shard}}/{name}', '{{replica}}') "
|
||||
"PARTITION BY y ORDER BY (d, b)"
|
||||
)
|
||||
yield name
|
||||
finally:
|
||||
with Finally(f"I drop table {name} on cluster {on_cluster} on {node.name}"):
|
||||
node.query(f"DROP TABLE IF EXISTS {name} ON CLUSTER {on_cluster} SYNC")
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def create_distributed_table(self, on_cluster, over, name=None, node=None):
|
||||
"""Create distributed table on cluster over some underlying table.
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
if name is None:
|
||||
name = getuid()
|
||||
|
||||
try:
|
||||
node.query(f"CREATE TABLE {name} ON CLUSTER {on_cluster} AS {over} "
|
||||
f"ENGINE = Distributed({on_cluster}, default, {over}, rand())")
|
||||
yield name
|
||||
finally:
|
||||
with Finally(f"I drop table {name} on cluster {on_cluster} on {node.name}"):
|
||||
node.query(f"DROP TABLE IF EXISTS {name} ON CLUSTER {on_cluster} SYNC")
|
||||
|
||||
|
||||
@TestStep(Given)
|
||||
def grant_select(self, cluster, privilege, role_or_user, node=None):
|
||||
"""Grant select privilege on a table on a given cluster
|
||||
to a role or a user.
|
||||
"""
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
|
||||
try:
|
||||
node.query(f"GRANT ON CLUSTER {cluster} {privilege} TO {role_or_user}")
|
||||
yield
|
||||
finally:
|
||||
with Finally(f"I remove privilege '{privilege}' on {cluster} from {role_or_user}"):
|
||||
node.query(f"REVOKE ON CLUSTER {cluster} {privilege} FROM {role_or_user}")
|
||||
|
||||
|
||||
@TestScenario
|
||||
def select_using_mapped_role(self, cluster, role_name, role_mapped, user):
|
||||
"""Check accessing normal and distributed table using
|
||||
a user and the specified role that is either granted
|
||||
rights to access the tables or not and is or is not assigned to the user
|
||||
from all cluster nodes.
|
||||
"""
|
||||
# default cluster node
|
||||
node = cluster_node("clickhouse1")
|
||||
|
||||
query_settings=[
|
||||
("user", user["username"]), ("password", user["password"])
|
||||
]
|
||||
|
||||
with Given(f"I create base table on cluster {cluster}"):
|
||||
src_table = create_table(on_cluster=cluster, node=node)
|
||||
|
||||
with And(f"I create distristibuted table over base table on cluster {cluster}"):
|
||||
dist_table = create_distributed_table(on_cluster=cluster, over=src_table, node=node)
|
||||
|
||||
with And("I check that grants for the user"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
for attempt in retries(timeout=10):
|
||||
with attempt:
|
||||
with By(f"executing query on node {name}", flags=TE):
|
||||
r = self.context.cluster.node(name).query(f"SHOW GRANTS", settings=query_settings)
|
||||
if role_mapped:
|
||||
with Then("check that role is mapped"):
|
||||
assert role_name in r.output, error()
|
||||
|
||||
with Example("no privilege on source table"):
|
||||
with When("user tries to read from the source table without privilege"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"executing query on node {name}", flags=TE):
|
||||
self.context.cluster.node(name).query(f"SELECT * FROM {src_table}", settings=query_settings,
|
||||
exitcode=241, message=f"DB::Exception:")
|
||||
|
||||
with Example("with privilege on source table"):
|
||||
with Given("I grant SELECT on source table to the mapped role"):
|
||||
grant_select(cluster=cluster, privilege=f"SELECT ON {src_table}", role_or_user=role_name, node=node)
|
||||
|
||||
with Then("user should be able to read from the source table"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"executing query on node {name}", flags=TE):
|
||||
self.context.cluster.node(name).query(f"SELECT * FROM {src_table}", settings=query_settings,
|
||||
exitcode=0 if role_mapped else 241, message="" if role_mapped else "DB::Exception:")
|
||||
|
||||
with Example("with privilege only on distributed table"):
|
||||
with Given("I grant SELECT on distributed table to the mapped role"):
|
||||
grant_select(cluster=cluster, privilege=f"SELECT ON {dist_table}", role_or_user=role_name, node=node)
|
||||
|
||||
with Then("user should still not be able to read from distributed table"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"executing query on node {name}", flags=TE):
|
||||
self.context.cluster.node(name).query(f"SELECT * FROM {dist_table}", settings=query_settings,
|
||||
exitcode=241, message=f"DB::Exception:")
|
||||
|
||||
with Example("with privilege only on source but not on distributed table"):
|
||||
with Given("I grant SELECT on source table to the mapped role"):
|
||||
grant_select(cluster=cluster, privilege=f"SELECT ON {src_table}", role_or_user=role_name, node=node)
|
||||
|
||||
with Then("user should still not be able to read from distributed table"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"executing query on node {name}", flags=TE):
|
||||
self.context.cluster.node(name).query(f"SELECT * FROM {dist_table}", settings=query_settings,
|
||||
exitcode=241, message=f"DB::Exception:")
|
||||
|
||||
with Example("with privilege on source and distributed"):
|
||||
with Given("I grant SELECT on source table to the mapped role"):
|
||||
grant_select(cluster=cluster, privilege=f"SELECT ON {src_table}", role_or_user=role_name, node=node)
|
||||
|
||||
with And("I grant SELECT on distributed table to the mapped role"):
|
||||
grant_select(cluster=cluster, privilege=f"SELECT ON {dist_table}", role_or_user=role_name, node=node)
|
||||
|
||||
with Then("user should be able to read from the distributed table"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"executing query on node {name}", flags=TE):
|
||||
self.context.cluster.node(name).query(f"SELECT * FROM {dist_table}", settings=query_settings,
|
||||
exitcode=0 if role_mapped else 241, message="" if role_mapped else "DB::Exception:")
|
||||
|
||||
|
||||
@TestFeature
|
||||
def execute_tests(self, role_name, role_mapped, ldap_user, local_user):
|
||||
"""Execute all scenarios on cluster with or without secret
|
||||
for LDAP and local users, using a role that might be
|
||||
mapped or not.
|
||||
"""
|
||||
for cluster_type in ["with secret", "without secret"]:
|
||||
with Feature("cluster " + cluster_type):
|
||||
for user in [ldap_user, local_user]:
|
||||
with Feature(user["type"]):
|
||||
with Feature(f"role {role_name} mapped {role_mapped}"):
|
||||
if role_mapped and user["type"] == "local user":
|
||||
with Given(f"I grant role {role_name} to local RBAC user"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"on node {name}"):
|
||||
cluster_node(name).query(f"GRANT {role_name} TO {local_user['username']}")
|
||||
|
||||
for scenario in ordered(loads(current_module(), Scenario)):
|
||||
scenario(cluster="sharded_cluster_" + cluster_type.replace(" ", "_"),
|
||||
role_name=role_name, role_mapped=role_mapped, user=user)
|
||||
|
||||
|
||||
@TestOutline(Feature)
|
||||
def outline_using_external_user_directory(self, ldap_servers, mapping, ldap_roles_or_groups, rbac_roles, mapped_roles):
|
||||
"""Check using simple and distributed table access when using
|
||||
LDAP external user directory or LDAP authenticated existing RBAC users
|
||||
with and without cluster secret.
|
||||
|
||||
Where mapping can be one of the following:
|
||||
'static' or 'dynamic' or 'dynamic and static'
|
||||
"""
|
||||
ldap_user = {
|
||||
"type": "ldap user",
|
||||
"server": "openldap1",
|
||||
"username": "user1",
|
||||
"password": "user1",
|
||||
"dn": "cn=user1,ou=users,dc=company,dc=com",
|
||||
}
|
||||
|
||||
local_user = {
|
||||
"type": "local user",
|
||||
"username": "local_user1",
|
||||
"password": "local_user1"
|
||||
}
|
||||
|
||||
role_mappings = [
|
||||
{
|
||||
"base_dn": "ou=groups,dc=company,dc=com",
|
||||
"attribute": "cn",
|
||||
"search_filter": "(&(objectClass=groupOfUniqueNames)(uniquemember={bind_dn}))",
|
||||
"prefix": "clickhouse_"
|
||||
}
|
||||
]
|
||||
|
||||
if mapping in ["dynamic", "dynamic and static"]:
|
||||
with Given("I add LDAP groups"):
|
||||
for name in ldap_servers:
|
||||
for group_name in ldap_roles_or_groups:
|
||||
with By(f"adding {group_name}"):
|
||||
ldap_groups = add_ldap_groups(groups=({"cn": group_name},), node=cluster_node(name))
|
||||
|
||||
with And("I add LDAP user to the group"):
|
||||
add_user_to_group_in_ldap(user=ldap_user, group=ldap_groups[0], node=cluster_node(name))
|
||||
|
||||
with Given(f"I add LDAP external user directory configuration with {mapping} role mapping"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
if mapping == "dynamic":
|
||||
By(f"on node {name}", test=add_ldap_external_user_directory, parallel=True)(
|
||||
server="openldap1", role_mappings=role_mappings,
|
||||
restart=True, node=cluster_node(name))
|
||||
elif mapping == "dynamic and static":
|
||||
By(f"on node {name}", test=add_ldap_external_user_directory, parallel=True)(
|
||||
server="openldap1", role_mappings=role_mappings,
|
||||
roles=ldap_roles_or_groups,
|
||||
restart=True, node=cluster_node(name))
|
||||
else:
|
||||
By(f"on node {name}", test=add_ldap_external_user_directory, parallel=True)(
|
||||
server="openldap1", roles=ldap_roles_or_groups,
|
||||
restart=True, node=cluster_node(name))
|
||||
|
||||
with And("I add local RBAC user"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"on node {name}"):
|
||||
add_rbac_users(users=[local_user], node=cluster_node(name))
|
||||
|
||||
with And("I add RBAC roles on cluster"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"on node {name}"):
|
||||
add_rbac_roles(roles=rbac_roles, node=cluster_node(name))
|
||||
|
||||
for role_name in rbac_roles:
|
||||
execute_tests(role_name=role_name, role_mapped=(role_name in mapped_roles) ,ldap_user=ldap_user, local_user=local_user)
|
||||
|
||||
|
||||
@TestFeature
|
||||
def using_authenticated_users(self, ldap_servers):
|
||||
"""Check using simple and distributed table access when using
|
||||
LDAP authenticated existing users with and without cluster secret.
|
||||
"""
|
||||
role_name = f"role_{getuid()}"
|
||||
|
||||
ldap_user = {
|
||||
"type": "ldap authenticated user",
|
||||
"cn": "myuser",
|
||||
"username": "myuser",
|
||||
"userpassword": "myuser",
|
||||
"password": "myuser",
|
||||
"server": "openldap1"
|
||||
}
|
||||
|
||||
local_user = {
|
||||
"type": "local user",
|
||||
"username": "local_user2",
|
||||
"password": "local_user2"
|
||||
}
|
||||
|
||||
with Given("I add LDAP user"):
|
||||
add_user = {
|
||||
"cn": ldap_user["cn"],
|
||||
"userpassword": ldap_user["userpassword"],
|
||||
}
|
||||
for name in ldap_servers:
|
||||
add_ldap_users(users=[add_user], node=cluster_node(name))
|
||||
|
||||
with And("I add LDAP authenticated users configuration"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
By(f"on node {name}", test=add_ldap_authenticated_users, parallel=True)(
|
||||
users=[ldap_user], rbac=True, node=cluster_node(name))
|
||||
|
||||
with And("I add local RBAC user"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"on node {name}"):
|
||||
add_rbac_users(users=[local_user], node=cluster_node(name))
|
||||
|
||||
with And("I add RBAC role on cluster that user will use"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"on node {name}"):
|
||||
add_rbac_roles(roles=(f"{role_name}",), node=cluster_node(name))
|
||||
|
||||
with And("I grant role to LDAP authenticated user"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"on node {name}"):
|
||||
cluster_node(name).query(f"GRANT {role_name} TO {ldap_user['username']}")
|
||||
|
||||
with And("I grant role to local RBAC user"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
with By(f"on node {name}"):
|
||||
cluster_node(name).query(f"GRANT {role_name} TO {local_user['username']}")
|
||||
|
||||
execute_tests(role_name=role_name, role_mapped=role_name, ldap_user=ldap_user, local_user=local_user)
|
||||
|
||||
|
||||
@TestFeature
|
||||
def using_external_user_directory(self, ldap_servers):
|
||||
"""Check using LDAP external user directory with different
|
||||
role mapping mode and different cases of role existens.
|
||||
"""
|
||||
uid = getuid()
|
||||
|
||||
for mapping in ["dynamic", "static", "dynamic and static"]:
|
||||
with Example(f"{mapping}"):
|
||||
with Example("all mapped roles exist"):
|
||||
if mapping == "dynamic":
|
||||
ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}"]
|
||||
elif mapping == "dynamic and static":
|
||||
ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}", f"role2_{uid}", f"role3_{uid}"]
|
||||
else:
|
||||
ldap_roles_or_groups = [f"role0_{uid}", f"role1_{uid}", f"role2_{uid}", f"role3_{uid}"]
|
||||
|
||||
rbac_roles = [f"role0_{uid}", f"role1_{uid}"]
|
||||
mapped_roles = [f"role0_{uid}", f"role1_{uid}"]
|
||||
|
||||
outline_using_external_user_directory(ldap_servers=ldap_servers,
|
||||
mapping=mapping, ldap_roles_or_groups=ldap_roles_or_groups,
|
||||
rbac_roles=rbac_roles, mapped_roles=mapped_roles)
|
||||
|
||||
with Example("some mapped roles exist"):
|
||||
if mapping == "dynamic":
|
||||
ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}"]
|
||||
elif mapping == "dynamic and static":
|
||||
ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}", f"role2_{uid}", f"role3_{uid}"]
|
||||
else:
|
||||
ldap_roles_or_groups = [f"role0_{uid}", f"role1_{uid}"]
|
||||
|
||||
rbac_roles = [f"role0_{uid}", f"role_not_mapped_{uid}", f"role2_{uid}"]
|
||||
|
||||
if mapping == "dynamic and static":
|
||||
mapped_roles = [f"role0_{uid}", f"role2_{uid}"]
|
||||
else:
|
||||
mapped_roles = [f"role0_{uid}"]
|
||||
|
||||
outline_using_external_user_directory(ldap_servers=ldap_servers,
|
||||
mapping=mapping, ldap_roles_or_groups=ldap_roles_or_groups,
|
||||
rbac_roles=rbac_roles, mapped_roles=mapped_roles)
|
||||
|
||||
with Example("no mapped roles exist"):
|
||||
if mapping == "dynamic":
|
||||
ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}"]
|
||||
elif mapping == "dynamic and static":
|
||||
ldap_roles_or_groups = [f"clickhouse_role0_{uid}", f"clickhouse_role1_{uid}", f"role2_{uid}", f"role3_{uid}"]
|
||||
else:
|
||||
ldap_roles_or_groups = [f"role0_{uid}", f"role1_{uid}"]
|
||||
|
||||
rbac_roles = [f"role_not_mapped0_{uid}", f"role_not_mapped1_{uid}"]
|
||||
mapped_roles = []
|
||||
|
||||
outline_using_external_user_directory(ldap_servers=ldap_servers,
|
||||
mapping=mapping, ldap_roles_or_groups=ldap_roles_or_groups,
|
||||
rbac_roles=rbac_roles, mapped_roles=mapped_roles)
|
||||
|
||||
with Example("empty roles"):
|
||||
ldap_roles_or_groups = []
|
||||
rbac_roles = [f"role0_{uid}", f"role1_{uid}"]
|
||||
mapped_roles = []
|
||||
|
||||
outline_using_external_user_directory(ldap_servers=ldap_servers,
|
||||
mapping=mapping, ldap_roles_or_groups=ldap_roles_or_groups,
|
||||
rbac_roles=rbac_roles, mapped_roles=mapped_roles)
|
||||
|
||||
|
||||
@TestFeature
|
||||
@Name("cluster secret")
|
||||
@Requirements(
|
||||
RQ_SRS_014_LDAP_ClusterWithAndWithoutSecret_DistributedTable("1.0")
|
||||
)
|
||||
def feature(self):
|
||||
"""Check using Distributed table when cluster is configured with and without secret
|
||||
using users authenticated via LDAP either through external user directory
|
||||
or defined using RBAC with LDAP server authentication.
|
||||
"""
|
||||
ldap_servers = {
|
||||
"openldap1": {
|
||||
"host": "openldap1",
|
||||
"port": "389",
|
||||
"enable_tls": "no",
|
||||
"bind_dn": "cn={user_name},ou=users,dc=company,dc=com"
|
||||
},
|
||||
}
|
||||
|
||||
with Given("I fix LDAP access permissions"):
|
||||
for name in ldap_servers:
|
||||
fix_ldap_permissions(node=cluster_node(name))
|
||||
|
||||
with And("I add LDAP servers configuration on all nodes", description=f"{ldap_servers}"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
By(f"on node {name}", test=add_ldap_servers_configuration, parallel=True)(
|
||||
servers=ldap_servers, node=cluster_node(name))
|
||||
|
||||
with And("I add sharded cluster that uses secrets on all the nodes"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
By(f"adding configuration on {name}", test=add_sharded_cluster, parallel=True)(
|
||||
node=cluster_node(name), name="sharded_cluster_with_secret", with_secret=True)
|
||||
|
||||
with And("I add sharded cluster that does not use secrets on all the nodes"):
|
||||
for name in self.context.cluster.nodes["clickhouse"]:
|
||||
By(f"adding configuration on {name}", test=add_sharded_cluster, parallel=True)(
|
||||
node=cluster_node(name), name="sharded_cluster_without_secret", with_secret=False)
|
||||
|
||||
Feature("external user directory", test=using_external_user_directory)(ldap_servers=ldap_servers)
|
||||
Feature("authenticated users", test=using_authenticated_users)(ldap_servers=ldap_servers)
|
@ -3,15 +3,16 @@ import os
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
|
||||
from ldap.authentication.tests.common import getuid, create_ldap_servers_config_content, add_config, Config
|
||||
from helpers.common import create_xml_config_content, add_config
|
||||
from ldap.authentication.tests.common import getuid, create_ldap_servers_config_content, ldap_authenticated_users
|
||||
from ldap.external_user_directory.tests.common import rbac_roles, rbac_users, ldap_users
|
||||
from ldap.authentication.tests.common import xmltree, xml_indent, xml_append, xml_with_utf8
|
||||
|
||||
@TestStep(Given)
|
||||
def create_table(self, name, create_statement, on_cluster=False):
|
||||
def create_table(self, name, create_statement, on_cluster=False, node=None):
|
||||
"""Create table.
|
||||
"""
|
||||
node = current().context.node
|
||||
if node is None:
|
||||
node = current().context.node
|
||||
try:
|
||||
with Given(f"I have a {name} table"):
|
||||
node.query(create_statement.format(name=name))
|
||||
@ -25,12 +26,12 @@ def create_table(self, name, create_statement, on_cluster=False):
|
||||
|
||||
@TestStep(Given)
|
||||
def add_ldap_servers_configuration(self, servers, config=None, config_d_dir="/etc/clickhouse-server/config.d",
|
||||
config_file="ldap_servers.xml", timeout=60, restart=False):
|
||||
config_file="ldap_servers.xml", timeout=60, restart=False, node=None):
|
||||
"""Add LDAP servers configuration to config.xml.
|
||||
"""
|
||||
if config is None:
|
||||
config = create_ldap_servers_config_content(servers, config_d_dir, config_file)
|
||||
return add_config(config, restart=restart)
|
||||
return add_config(config, restart=restart, node=node)
|
||||
|
||||
@TestStep(Given)
|
||||
def add_ldap_groups(self, groups, node=None):
|
||||
@ -50,7 +51,7 @@ def add_ldap_groups(self, groups, node=None):
|
||||
@TestStep(Given)
|
||||
def add_ldap_external_user_directory(self, server, roles=None, role_mappings=None,
|
||||
config_d_dir="/etc/clickhouse-server/config.d",
|
||||
config_file=None, timeout=60, restart=True, config=None):
|
||||
config_file=None, timeout=60, restart=True, config=None, node=None):
|
||||
"""Add LDAP external user directory.
|
||||
"""
|
||||
if config_file is None:
|
||||
@ -60,21 +61,35 @@ def add_ldap_external_user_directory(self, server, roles=None, role_mappings=Non
|
||||
config = create_ldap_external_user_directory_config_content(server=server, roles=roles,
|
||||
role_mappings=role_mappings, config_d_dir=config_d_dir, config_file=config_file)
|
||||
|
||||
return add_config(config, restart=restart)
|
||||
return add_config(config, restart=restart, node=node)
|
||||
|
||||
@TestStep(Given)
|
||||
def add_rbac_roles(self, roles):
|
||||
def add_rbac_roles(self, roles, node=None):
|
||||
"""Add RBAC roles.
|
||||
"""
|
||||
with rbac_roles(*roles) as _roles:
|
||||
with rbac_roles(*roles, node=node) as _roles:
|
||||
yield _roles
|
||||
|
||||
@TestStep(Given)
|
||||
def add_rbac_users(self, users):
|
||||
def add_rbac_users(self, users, node=None):
|
||||
"""Add RBAC users.
|
||||
"""
|
||||
with rbac_users(*users) as _users:
|
||||
yield _users
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
try:
|
||||
with Given(f"I create local users on {node}"):
|
||||
for user in users:
|
||||
username = user.get('username', None) or user['cn']
|
||||
password = user.get('password', None) or user['userpassword']
|
||||
with By(f"creating user {username}"):
|
||||
node.query(f"CREATE USER OR REPLACE {username} IDENTIFIED WITH PLAINTEXT_PASSWORD BY '{password}'")
|
||||
yield users
|
||||
finally:
|
||||
with Finally(f"I drop local users on {node}"):
|
||||
for user in users:
|
||||
username = user.get('username', None) or user['cn']
|
||||
with By(f"dropping user {username}", flags=TE):
|
||||
node.query(f"DROP USER IF EXISTS {username}")
|
||||
|
||||
@TestStep(Given)
|
||||
def add_ldap_users(self, users, node=None):
|
||||
@ -83,6 +98,16 @@ def add_ldap_users(self, users, node=None):
|
||||
with ldap_users(*users, node=node) as _users:
|
||||
yield _users
|
||||
|
||||
@TestStep(Given)
|
||||
def add_ldap_authenticated_users(self, users, config_file=None, rbac=False, node=None, restart=True):
|
||||
"""Add LDAP authenticated users.
|
||||
"""
|
||||
if config_file is None:
|
||||
config_file = f"ldap_users_{getuid()}.xml"
|
||||
|
||||
with ldap_authenticated_users(*users, config_file=config_file, restart=restart, rbac=rbac, node=node):
|
||||
yield users
|
||||
|
||||
def add_group_to_ldap(cn, gidnumber=None, node=None, _gidnumber=[600], exitcode=0):
|
||||
"""Add new group entry to LDAP.
|
||||
"""
|
||||
@ -193,39 +218,11 @@ def delete_user_from_group_in_ldap(user, group, node=None, exitcode=0):
|
||||
if exitcode is not None:
|
||||
assert r.exitcode == exitcode, error()
|
||||
|
||||
def create_xml_config_content(entries, config_d_dir="/etc/clickhouse-server/config.d",
|
||||
config_file="ldap_external_user_directories.xml"):
|
||||
"""Create XML configuration file from a dictionary.
|
||||
"""
|
||||
uid = getuid()
|
||||
path = os.path.join(config_d_dir, config_file)
|
||||
name = config_file
|
||||
root = xmltree.Element("yandex")
|
||||
root.append(xmltree.Comment(text=f"config uid: {uid}"))
|
||||
|
||||
def create_xml_tree(entries, root):
|
||||
for k,v in entries.items():
|
||||
if type(v) is dict:
|
||||
xml_element = xmltree.Element(k)
|
||||
create_xml_tree(v, xml_element)
|
||||
root.append(xml_element)
|
||||
elif type(v) in (list, tuple):
|
||||
xml_element = xmltree.Element(k)
|
||||
for e in v:
|
||||
create_xml_tree(e, xml_element)
|
||||
root.append(xml_element)
|
||||
else:
|
||||
xml_append(root, k, v)
|
||||
|
||||
create_xml_tree(entries, root)
|
||||
xml_indent(root)
|
||||
content = xml_with_utf8 + str(xmltree.tostring(root, short_empty_elements=False, encoding="utf-8"), "utf-8")
|
||||
|
||||
return Config(content, path, name, uid, "config.xml")
|
||||
|
||||
def create_ldap_external_user_directory_config_content(server=None, roles=None, role_mappings=None, **kwargs):
|
||||
"""Create LDAP external user directory configuration file content.
|
||||
"""
|
||||
kwargs["config_file"] = kwargs.pop("config_file", "external_ldap_user_directory.xml")
|
||||
|
||||
entries = {
|
||||
"user_directories": {
|
||||
"ldap": {
|
||||
@ -250,4 +247,5 @@ def create_ldap_external_user_directory_config_content(server=None, roles=None,
|
||||
def create_entries_ldap_external_user_directory_config_content(entries, **kwargs):
|
||||
"""Create LDAP external user directory configuration file content.
|
||||
"""
|
||||
kwargs["config_file"] = kwargs.pop("config_file", "external_ldap_user_directory.xml")
|
||||
return create_xml_config_content(entries, **kwargs)
|
||||
|
@ -2,8 +2,6 @@
|
||||
from testflows.core import *
|
||||
from testflows.asserts import error
|
||||
|
||||
from helpers.common import Pool
|
||||
|
||||
from ldap.role_mapping.requirements import *
|
||||
from ldap.role_mapping.tests.common import *
|
||||
from ldap.external_user_directory.tests.common import randomword
|
||||
|
@ -109,7 +109,7 @@ xflags = {
|
||||
@Specifications(
|
||||
SRS018_ClickHouse_Map_Data_Type
|
||||
)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouser_version=None, stress=None):
|
||||
"""Map type regression.
|
||||
"""
|
||||
nodes = {
|
||||
@ -119,6 +119,7 @@ def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "map_type_env")) as cluster:
|
||||
|
@ -3,6 +3,7 @@ version: '2.3'
|
||||
services:
|
||||
clickhouse:
|
||||
image: clickhouse/integration-test
|
||||
init: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
@ -15,9 +16,9 @@ services:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
entrypoint: bash -c "tail -f /dev/null"
|
||||
healthcheck:
|
||||
test: clickhouse client --query='select 1'
|
||||
test: echo 1
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
|
29
tests/testflows/rbac/rbac_env_arm64/clickhouse-service.yml
Executable file
29
tests/testflows/rbac/rbac_env_arm64/clickhouse-service.yml
Executable file
@ -0,0 +1,29 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: registry.gitlab.com/altinity-public/container-images/test/clickhouse-integration-test:21.12
|
||||
privileged: true
|
||||
expose:
|
||||
- "9000"
|
||||
- "9009"
|
||||
- "8123"
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.d:/etc/clickhouse-server/config.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.d:/etc/clickhouse-server/users.d"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/ssl:/etc/clickhouse-server/ssl"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/config.xml:/etc/clickhouse-server/config.xml"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse/users.xml:/etc/clickhouse-server/users.xml"
|
||||
- "${CLICKHOUSE_TESTS_SERVER_BIN_PATH:-/usr/bin/clickhouse}:/usr/bin/clickhouse"
|
||||
- "${CLICKHOUSE_TESTS_ODBC_BRIDGE_BIN_PATH:-/usr/bin/clickhouse-odbc-bridge}:/usr/bin/clickhouse-odbc-bridge"
|
||||
entrypoint: bash -c "clickhouse server --config-file=/etc/clickhouse-server/config.xml --log-file=/var/log/clickhouse-server/clickhouse-server.log --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log"
|
||||
healthcheck:
|
||||
test: clickhouse client --query='select 1'
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 300s
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
security_opt:
|
||||
- label:disable
|
60
tests/testflows/rbac/rbac_env_arm64/docker-compose.yml
Executable file
60
tests/testflows/rbac/rbac_env_arm64/docker-compose.yml
Executable file
@ -0,0 +1,60 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
extends:
|
||||
file: zookeeper-service.yml
|
||||
service: zookeeper
|
||||
|
||||
clickhouse1:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse1
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse1/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse1/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse2:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse2
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse2/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse2/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
clickhouse3:
|
||||
extends:
|
||||
file: clickhouse-service.yml
|
||||
service: clickhouse
|
||||
hostname: clickhouse3
|
||||
volumes:
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/database/:/var/lib/clickhouse/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/_instances/clickhouse3/logs/:/var/log/clickhouse-server/"
|
||||
- "${CLICKHOUSE_TESTS_DIR}/configs/clickhouse3/config.d/macros.xml:/etc/clickhouse-server/config.d/macros.xml"
|
||||
depends_on:
|
||||
zookeeper:
|
||||
condition: service_healthy
|
||||
|
||||
# dummy service which does nothing, but allows to postpone
|
||||
# 'docker-compose up -d' till all dependecies will go healthy
|
||||
all_services_ready:
|
||||
image: hello-world
|
||||
depends_on:
|
||||
clickhouse1:
|
||||
condition: service_healthy
|
||||
clickhouse2:
|
||||
condition: service_healthy
|
||||
clickhouse3:
|
||||
condition: service_healthy
|
||||
zookeeper:
|
||||
condition: service_healthy
|
18
tests/testflows/rbac/rbac_env_arm64/zookeeper-service.yml
Executable file
18
tests/testflows/rbac/rbac_env_arm64/zookeeper-service.yml
Executable file
@ -0,0 +1,18 @@
|
||||
version: '2.3'
|
||||
|
||||
services:
|
||||
zookeeper:
|
||||
image: zookeeper:3.4.12
|
||||
expose:
|
||||
- "2181"
|
||||
environment:
|
||||
ZOO_TICK_TIME: 500
|
||||
ZOO_MY_ID: 1
|
||||
healthcheck:
|
||||
test: echo stat | nc localhost 2181
|
||||
interval: 3s
|
||||
timeout: 2s
|
||||
retries: 5
|
||||
start_period: 2s
|
||||
security_opt:
|
||||
- label:disable
|
@ -9,6 +9,7 @@ append_path(sys.path, "..")
|
||||
from helpers.cluster import Cluster
|
||||
from helpers.argparser import argparser
|
||||
from rbac.requirements import SRS_006_ClickHouse_Role_Based_Access_Control
|
||||
from helpers.common import check_clickhouse_version
|
||||
|
||||
issue_14091 = "https://github.com/ClickHouse/ClickHouse/issues/14091"
|
||||
issue_14149 = "https://github.com/ClickHouse/ClickHouse/issues/14149"
|
||||
@ -144,7 +145,7 @@ xfails = {
|
||||
"views/live view/create with join subquery privilege granted directly or via role/create with join subquery, privilege granted directly":
|
||||
[(Fail, issue_26746)],
|
||||
"views/live view/create with join subquery privilege granted directly or via role/create with join subquery, privilege granted through a role":
|
||||
[(Fail, issue_26746)]
|
||||
[(Fail, issue_26746)],
|
||||
}
|
||||
|
||||
xflags = {
|
||||
@ -152,15 +153,23 @@ xflags = {
|
||||
(SKIP, 0)
|
||||
}
|
||||
|
||||
ffails ={
|
||||
"/clickhouse/rbac/privileges/:/table_type='ReplicatedReplacingMergeTree-sharded_cluster":
|
||||
(Skip, "Causes clickhouse timeout on 21.10", (lambda test: check_clickhouse_version(">=21.10")(test) and check_clickhouse_version("<21.11")(test))),
|
||||
"/clickhouse/rbac/views":
|
||||
(Skip, "Does not work on clickhouse 21.09", (lambda test: check_clickhouse_version(">=21.9")(test) and check_clickhouse_version("<21.10")(test)))
|
||||
}
|
||||
|
||||
@TestModule
|
||||
@ArgumentParser(argparser)
|
||||
@XFails(xfails)
|
||||
@XFlags(xflags)
|
||||
@FFails(ffails)
|
||||
@Name("rbac")
|
||||
@Specifications(
|
||||
SRS_006_ClickHouse_Role_Based_Access_Control
|
||||
)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None):
|
||||
"""RBAC regression.
|
||||
"""
|
||||
nodes = {
|
||||
@ -168,11 +177,21 @@ def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
("clickhouse1", "clickhouse2", "clickhouse3")
|
||||
}
|
||||
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
|
||||
from platform import processor as current_cpu
|
||||
|
||||
folder_name = os.path.basename(current_dir())
|
||||
if current_cpu() == 'aarch64':
|
||||
env = f"{folder_name}_env_arm64"
|
||||
else:
|
||||
env = f"{folder_name}_env"
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "rbac_env")) as cluster:
|
||||
docker_compose_project_dir=os.path.join(current_dir(), env)) as cluster:
|
||||
self.context.cluster = cluster
|
||||
|
||||
Feature(run=load("rbac.tests.syntax.feature", "feature"))
|
||||
|
@ -165,7 +165,7 @@ def url(self, privilege, grant_target_name, user_name, node=None):
|
||||
node.query(f"GRANT USAGE ON *.* TO {grant_target_name}")
|
||||
|
||||
with Then("I check the user can't use the URL source"):
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=URL('127.0.0.1')", settings=[("user",user_name)],
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=URL('127.0.0.1', 'TSV')", settings=[("user",user_name)],
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
with Scenario("URL source with privilege"):
|
||||
@ -173,8 +173,7 @@ def url(self, privilege, grant_target_name, user_name, node=None):
|
||||
node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}")
|
||||
|
||||
with Then("I check the user can use the URL source"):
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=URL('127.0.0.1')", settings = [("user", f"{user_name}")],
|
||||
exitcode=42, message='Exception: Storage')
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=URL('127.0.0.1', 'TSV')", settings = [("user", f"{user_name}")])
|
||||
|
||||
with Scenario("URL source with revoked privilege"):
|
||||
with When(f"I grant {privilege}"):
|
||||
@ -184,7 +183,7 @@ def url(self, privilege, grant_target_name, user_name, node=None):
|
||||
node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}")
|
||||
|
||||
with Then("I check the user cannot use the URL source"):
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=URL('127.0.0.1')", settings=[("user",user_name)],
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=URL('127.0.0.1', 'TSV')", settings=[("user",user_name)],
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestSuite
|
||||
@ -610,8 +609,9 @@ def HDFS(self, privilege, grant_target_name, user_name, node=None):
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
|
||||
table_name = f'table_{getuid()}'
|
||||
|
||||
with Scenario("HDFS source without privilege"):
|
||||
table_name = f'table_{getuid()}'
|
||||
|
||||
with Given("The user has table privilege"):
|
||||
node.query(f"GRANT CREATE TABLE ON {table_name} TO {grant_target_name}")
|
||||
@ -623,7 +623,7 @@ def HDFS(self, privilege, grant_target_name, user_name, node=None):
|
||||
node.query(f"GRANT USAGE ON *.* TO {grant_target_name}")
|
||||
|
||||
with Then("I check the user can't use the HDFS source"):
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=HDFS('127.0.0.1')", settings=[("user",user_name)],
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=HDFS('hdfs://127.0.0.1:8020/path', 'TSV')", settings=[("user",user_name)],
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
with Scenario("HDFS source with privilege"):
|
||||
@ -632,8 +632,7 @@ def HDFS(self, privilege, grant_target_name, user_name, node=None):
|
||||
node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}")
|
||||
|
||||
with Then("I check the user can use the HDFS source"):
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=HDFS('127.0.0.1')", settings = [("user", f"{user_name}")],
|
||||
exitcode=42, message='Exception: Storage')
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=HDFS('hdfs://127.0.0.1:8020/path', 'TSV')", settings = [("user", f"{user_name}")])
|
||||
|
||||
with Scenario("HDFS source with revoked privilege"):
|
||||
|
||||
@ -644,7 +643,7 @@ def HDFS(self, privilege, grant_target_name, user_name, node=None):
|
||||
node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}")
|
||||
|
||||
with Then("I check the user cannot use the HDFS source"):
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=HDFS('127.0.0.1')", settings=[("user",user_name)],
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=HDFS('hdfs://127.0.0.1:8020/path', 'TSV')", settings=[("user",user_name)],
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestSuite
|
||||
@ -702,8 +701,9 @@ def S3(self, privilege, grant_target_name, user_name, node=None):
|
||||
if node is None:
|
||||
node = self.context.node
|
||||
|
||||
table_name = f'table_{getuid()}'
|
||||
|
||||
with Scenario("S3 source without privilege"):
|
||||
table_name = f'table_{getuid()}'
|
||||
|
||||
with Given("The user has table privilege"):
|
||||
node.query(f"GRANT CREATE TABLE ON {table_name} TO {grant_target_name}")
|
||||
@ -715,7 +715,7 @@ def S3(self, privilege, grant_target_name, user_name, node=None):
|
||||
node.query(f"GRANT USAGE ON *.* TO {grant_target_name}")
|
||||
|
||||
with Then("I check the user can't use the S3 source"):
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=S3('127.0.0.1')", settings=[("user",user_name)],
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=S3('https://my.amazonaws.com/mybucket/mydata', 'TSV')", settings=[("user",user_name)],
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
with Scenario("S3 source with privilege"):
|
||||
@ -724,8 +724,7 @@ def S3(self, privilege, grant_target_name, user_name, node=None):
|
||||
node.query(f"GRANT {privilege} ON *.* TO {grant_target_name}")
|
||||
|
||||
with Then("I check the user can use the S3 source"):
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=S3('127.0.0.1')", settings = [("user", f"{user_name}")],
|
||||
exitcode=42, message='Exception: Storage')
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=S3('https://my.amazonaws.com/mybucket/mydata', 'TSV')", settings = [("user", f"{user_name}")])
|
||||
|
||||
with Scenario("S3 source with revoked privilege"):
|
||||
|
||||
@ -736,7 +735,7 @@ def S3(self, privilege, grant_target_name, user_name, node=None):
|
||||
node.query(f"REVOKE {privilege} ON *.* FROM {grant_target_name}")
|
||||
|
||||
with Then("I check the user cannot use the S3 source"):
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=S3('127.0.0.1')", settings=[("user",user_name)],
|
||||
node.query(f"CREATE TABLE {table_name} (x String) ENGINE=S3('https://my.amazonaws.com/mybucket/mydata', 'TSV')", settings=[("user",user_name)],
|
||||
exitcode=exitcode, message=message)
|
||||
|
||||
@TestFeature
|
||||
|
@ -4,6 +4,7 @@ from testflows.core import *
|
||||
|
||||
import rbac.helper.errors as errors
|
||||
from rbac.requirements import *
|
||||
from helpers.common import check_clickhouse_version
|
||||
|
||||
@TestFeature
|
||||
@Name("grant role")
|
||||
@ -58,7 +59,7 @@ def feature(self, node="clickhouse1"):
|
||||
RQ_SRS_006_RBAC_Grant_Role("1.0")]):
|
||||
with setup(0,0):
|
||||
with When("I grant nonexistent role to a nonexistent user"):
|
||||
exitcode, message = errors.role_not_found_in_disk(name="user0")
|
||||
exitcode, message = errors.role_not_found_in_disk(name="user0") if check_clickhouse_version(">=21.09")(self) else errors.role_not_found_in_disk(name="role0")
|
||||
node.query("GRANT role0 TO user0", exitcode=exitcode, message=message)
|
||||
|
||||
with Scenario("I grant a role to multiple users", requirements=[
|
||||
|
@ -4,6 +4,7 @@ from testflows.core import *
|
||||
|
||||
import rbac.helper.errors as errors
|
||||
from rbac.requirements import *
|
||||
from helpers.common import check_clickhouse_version
|
||||
|
||||
@TestFeature
|
||||
@Name("revoke role")
|
||||
@ -70,7 +71,7 @@ def feature(self, node="clickhouse1"):
|
||||
RQ_SRS_006_RBAC_Revoke_Role("1.0")]):
|
||||
with setup(0,0):
|
||||
with When("I revoke nonexistent role from a nonexistent user"):
|
||||
exitcode, message = errors.role_not_found_in_disk(name="user0")
|
||||
exitcode, message = errors.role_not_found_in_disk(name="user0") if check_clickhouse_version(">=21.09")(self) else errors.role_not_found_in_disk(name="role0")
|
||||
node.query("REVOKE role0 FROM user0", exitcode=exitcode, message=message)
|
||||
|
||||
with Scenario("I revoke a role from multiple users", requirements=[
|
||||
|
@ -9,12 +9,13 @@ from helpers.argparser import argparser
|
||||
@TestModule
|
||||
@Name("clickhouse")
|
||||
@ArgumentParser(argparser)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None):
|
||||
"""ClickHouse regression.
|
||||
"""
|
||||
args = {"local": local, "clickhouse_binary_path": clickhouse_binary_path, "stress": stress}
|
||||
args = {"local": local, "clickhouse_binary_path": clickhouse_binary_path, "clickhouse_version": clickhouse_version, "stress": stress}
|
||||
|
||||
self.context.stress = stress
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
with Pool(8) as pool:
|
||||
try:
|
||||
|
@ -91,7 +91,7 @@ xflags = {
|
||||
@Requirements(
|
||||
RQ_SRS_019_ClickHouse_WindowFunctions("1.0")
|
||||
)
|
||||
def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
def regression(self, local, clickhouse_binary_path, clickhouse_version=None, stress=None):
|
||||
"""Window functions regression.
|
||||
"""
|
||||
nodes = {
|
||||
@ -101,6 +101,7 @@ def regression(self, local, clickhouse_binary_path, stress=None):
|
||||
|
||||
if stress is not None:
|
||||
self.context.stress = stress
|
||||
self.context.clickhouse_version = clickhouse_version
|
||||
|
||||
with Cluster(local, clickhouse_binary_path, nodes=nodes,
|
||||
docker_compose_project_dir=os.path.join(current_dir(), "window_functions_env")) as cluster:
|
||||
|
Loading…
Reference in New Issue
Block a user