mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 07:01:59 +00:00
Merge branch 'master' into google-cloud-cpp
This commit is contained in:
commit
e11220beb3
34
.github/workflows/nightly.yml
vendored
34
.github/workflows/nightly.yml
vendored
@ -27,7 +27,7 @@ jobs:
|
||||
id: runconfig
|
||||
run: |
|
||||
echo "::group::configure CI run"
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --skip-jobs --outfile ${{ runner.temp }}/ci_run_data.json
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --workflow NightlyBuilds --outfile ${{ runner.temp }}/ci_run_data.json
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::CI run configure results"
|
||||
@ -44,9 +44,39 @@ jobs:
|
||||
with:
|
||||
data: "${{ needs.RunConfig.outputs.data }}"
|
||||
set_latest: true
|
||||
|
||||
Builds_1:
|
||||
needs: [RunConfig]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_1') }}
|
||||
uses: ./.github/workflows/reusable_build_stage.yml
|
||||
with:
|
||||
stage: Builds_1
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
Tests_1:
|
||||
needs: [RunConfig, Builds_1]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_1') }}
|
||||
uses: ./.github/workflows/reusable_test_stage.yml
|
||||
with:
|
||||
stage: Tests_1
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
Builds_2:
|
||||
needs: [RunConfig, Builds_1]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_2') }}
|
||||
uses: ./.github/workflows/reusable_build_stage.yml
|
||||
with:
|
||||
stage: Builds_2
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
Tests_2:
|
||||
needs: [RunConfig, Builds_1, Tests_1]
|
||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
||||
uses: ./.github/workflows/reusable_test_stage.yml
|
||||
with:
|
||||
stage: Tests_2
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
|
||||
CheckWorkflow:
|
||||
if: ${{ !cancelled() }}
|
||||
needs: [RunConfig, BuildDockers]
|
||||
needs: [RunConfig, BuildDockers, Tests_2]
|
||||
runs-on: [self-hosted, style-checker-aarch64]
|
||||
steps:
|
||||
- name: Check out repository code
|
||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -1,6 +1,9 @@
|
||||
# Please do not use 'branch = ...' tags with submodule entries. Such tags make updating submodules a
|
||||
# little bit more convenient but they do *not* specify the tracked submodule branch. Thus, they are
|
||||
# more confusing than useful.
|
||||
[submodule "contrib/jwt-cpp"]
|
||||
path = contrib/jwt-cpp
|
||||
url = https://github.com/Thalhammer/jwt-cpp
|
||||
[submodule "contrib/zstd"]
|
||||
path = contrib/zstd
|
||||
url = https://github.com/facebook/zstd
|
||||
|
2
contrib/CMakeLists.txt
vendored
2
contrib/CMakeLists.txt
vendored
@ -221,6 +221,8 @@ add_contrib (numactl-cmake numactl)
|
||||
|
||||
add_contrib (google-cloud-cpp-cmake google-cloud-cpp) # requires grpc, protobuf, absl
|
||||
|
||||
add_contrib (jwt-cpp-cmake jwt-cpp)
|
||||
|
||||
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
||||
# in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually,
|
||||
|
1
contrib/jwt-cpp
vendored
Submodule
1
contrib/jwt-cpp
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit a6927cb8140858c34e05d1a954626b9849fbcdfc
|
23
contrib/jwt-cpp-cmake/CMakeLists.txt
Normal file
23
contrib/jwt-cpp-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,23 @@
|
||||
set(ENABLE_JWT_CPP_DEFAULT OFF)
|
||||
if(ENABLE_LIBRARIES AND CLICKHOUSE_CLOUD)
|
||||
set(ENABLE_JWT_CPP_DEFAULT ON)
|
||||
endif()
|
||||
|
||||
option(ENABLE_JWT_CPP "Enable jwt-cpp library" ${ENABLE_JWT_CPP_DEFAULT})
|
||||
|
||||
if (NOT ENABLE_JWT_CPP)
|
||||
message(STATUS "Not using jwt-cpp")
|
||||
return()
|
||||
endif()
|
||||
|
||||
if(ENABLE_JWT_CPP)
|
||||
if(NOT TARGET OpenSSL::Crypto)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use jwt-cpp without OpenSSL")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set (JWT_CPP_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/jwt-cpp/include")
|
||||
|
||||
add_library (_jwt-cpp INTERFACE)
|
||||
target_include_directories(_jwt-cpp SYSTEM BEFORE INTERFACE ${JWT_CPP_INCLUDE_DIR})
|
||||
add_library(ch_contrib::jwt-cpp ALIAS _jwt-cpp)
|
@ -31,8 +31,8 @@ COPY entrypoint.sh /entrypoint.sh
|
||||
ARG TARGETARCH
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& case $arch in \
|
||||
amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \
|
||||
arm64) ln -sf /lib/ld-2.31.so /lib/ld-linux-aarch64.so.1 ;; \
|
||||
amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.35.so /lib64/ld-linux-x86-64.so.2 ;; \
|
||||
arm64) ln -sf /lib/ld-2.35.so /lib/ld-linux-aarch64.so.1 ;; \
|
||||
esac
|
||||
|
||||
# lts / testing / prestable / etc
|
||||
@ -86,7 +86,8 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
ARG DEFAULT_CONFIG_DIR="/etc/clickhouse-keeper"
|
||||
ARG DEFAULT_DATA_DIR="/var/lib/clickhouse-keeper"
|
||||
ARG DEFAULT_LOG_DIR="/var/log/clickhouse-keeper"
|
||||
RUN mkdir -p "${DEFAULT_DATA_DIR}" "${DEFAULT_LOG_DIR}" "${DEFAULT_CONFIG_DIR}" \
|
||||
RUN clickhouse-keeper --version \
|
||||
&& mkdir -p "${DEFAULT_DATA_DIR}" "${DEFAULT_LOG_DIR}" "${DEFAULT_CONFIG_DIR}" \
|
||||
&& chown clickhouse:clickhouse "${DEFAULT_DATA_DIR}" \
|
||||
&& chown root:clickhouse "${DEFAULT_LOG_DIR}" \
|
||||
&& chmod ugo+Xrw -R "${DEFAULT_DATA_DIR}" "${DEFAULT_LOG_DIR}" "${DEFAULT_CONFIG_DIR}"
|
||||
|
46
docs/en/sql-reference/statements/check-grant.md
Normal file
46
docs/en/sql-reference/statements/check-grant.md
Normal file
@ -0,0 +1,46 @@
|
||||
---
|
||||
slug: /en/sql-reference/statements/check-grant
|
||||
sidebar_position: 56
|
||||
sidebar_label: CHECK GRANT
|
||||
title: "CHECK GRANT Statement"
|
||||
---
|
||||
|
||||
The `CHECK GRANT` query is used to check whether the current user/role has been granted a specific privilege.
|
||||
|
||||
## Syntax
|
||||
|
||||
The basic syntax of the query is as follows:
|
||||
|
||||
```sql
|
||||
CHECK GRANT privilege[(column_name [,...])] [,...] ON {db.table[*]|db[*].*|*.*|table[*]|*}
|
||||
```
|
||||
|
||||
- `privilege` — Type of privilege.
|
||||
|
||||
## Examples
|
||||
|
||||
If the user used to be granted the privilege, the response`check_grant` will be `1`. Otherwise, the response `check_grant` will be `0`.
|
||||
|
||||
If `table_1.col1` exists and current user is granted by privilege `SELECT`/`SELECT(con)` or role(with privilege), the response is `1`.
|
||||
```sql
|
||||
CHECK GRANT SELECT(col1) ON table_1;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─result─┐
|
||||
│ 1 │
|
||||
└────────┘
|
||||
```
|
||||
If `table_2.col2` doesn't exists, or current user is not granted by privilege `SELECT`/`SELECT(con)` or role(with privilege), the response is `0`.
|
||||
```sql
|
||||
CHECK GRANT SELECT(col2) ON table_2;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─result─┐
|
||||
│ 0 │
|
||||
└────────┘
|
||||
```
|
||||
|
||||
## Wildcard
|
||||
Specifying privileges you can use asterisk (`*`) instead of a table or a database name. Please check [WILDCARD GRANTS](../../sql-reference/statements/grant.md#wildcard-grants) for wildcard rules.
|
@ -4215,3 +4215,9 @@ SELECT toFloat64('1.7091'), toFloat64('1.5008753E7') SETTINGS precise_float_pars
|
||||
│ 1.7091 │ 15008753 │
|
||||
└─────────────────────┴──────────────────────────┘
|
||||
```
|
||||
|
||||
## push_external_roles_in_interserver_queries
|
||||
|
||||
Позволяет передавать роли пользователя от инициатора запроса другим нодам при выполнении запроса.
|
||||
|
||||
Значение по умолчанию: `true`.
|
||||
|
10
docs/zh/sql-reference/statements/check-grant.mdx
Normal file
10
docs/zh/sql-reference/statements/check-grant.mdx
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
slug: /zh/sql-reference/statements/check-grant
|
||||
sidebar_position: 56
|
||||
sidebar_label: CHECK
|
||||
title: "CHECK GRANT Statement"
|
||||
---
|
||||
|
||||
import Content from '@site/docs/en/sql-reference/statements/check-grant.md';
|
||||
|
||||
<Content />
|
@ -220,7 +220,7 @@ std::vector<String> Client::loadWarningMessages()
|
||||
"" /* query_id */,
|
||||
QueryProcessingStage::Complete,
|
||||
&client_context->getSettingsRef(),
|
||||
&client_context->getClientInfo(), false, {});
|
||||
&client_context->getClientInfo(), false, {}, {});
|
||||
while (true)
|
||||
{
|
||||
Packet packet = connection->receivePacket();
|
||||
|
@ -366,6 +366,13 @@ void ContextAccess::setUser(const UserPtr & user_) const
|
||||
current_roles_with_admin_option = user->granted_roles.findGrantedWithAdminOption(*params.current_roles);
|
||||
}
|
||||
|
||||
if (params.external_roles && !params.external_roles->empty())
|
||||
{
|
||||
current_roles.insert(current_roles.end(), params.external_roles->begin(), params.external_roles->end());
|
||||
auto new_granted_with_admin_option = user->granted_roles.findGrantedWithAdminOption(*params.external_roles);
|
||||
current_roles_with_admin_option.insert(current_roles_with_admin_option.end(), new_granted_with_admin_option.begin(), new_granted_with_admin_option.end());
|
||||
}
|
||||
|
||||
subscription_for_roles_changes.reset();
|
||||
enabled_roles = access_control->getEnabledRoles(current_roles, current_roles_with_admin_option);
|
||||
subscription_for_roles_changes = enabled_roles->subscribeForChanges([weak_ptr = weak_from_this()](const std::shared_ptr<const EnabledRolesInfo> & roles_info_)
|
||||
@ -516,7 +523,6 @@ std::optional<QuotaUsage> ContextAccess::getQuotaUsage() const
|
||||
return getQuota()->getUsage();
|
||||
}
|
||||
|
||||
|
||||
SettingsChanges ContextAccess::getDefaultSettings() const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
|
@ -18,6 +18,7 @@ ContextAccessParams::ContextAccessParams(
|
||||
bool full_access_,
|
||||
bool use_default_roles_,
|
||||
const std::shared_ptr<const std::vector<UUID>> & current_roles_,
|
||||
const std::shared_ptr<const std::vector<UUID>> & external_roles_,
|
||||
const Settings & settings_,
|
||||
const String & current_database_,
|
||||
const ClientInfo & client_info_)
|
||||
@ -25,6 +26,7 @@ ContextAccessParams::ContextAccessParams(
|
||||
, full_access(full_access_)
|
||||
, use_default_roles(use_default_roles_)
|
||||
, current_roles(current_roles_)
|
||||
, external_roles(external_roles_)
|
||||
, readonly(settings_[Setting::readonly])
|
||||
, allow_ddl(settings_[Setting::allow_ddl])
|
||||
, allow_introspection(settings_[Setting::allow_introspection_functions])
|
||||
@ -59,6 +61,17 @@ String ContextAccessParams::toString() const
|
||||
}
|
||||
out << "]";
|
||||
}
|
||||
if (external_roles && !external_roles->empty())
|
||||
{
|
||||
out << separator() << "external_roles = [";
|
||||
for (size_t i = 0; i != external_roles->size(); ++i)
|
||||
{
|
||||
if (i)
|
||||
out << ", ";
|
||||
out << (*external_roles)[i];
|
||||
}
|
||||
out << "]";
|
||||
}
|
||||
if (readonly)
|
||||
out << separator() << "readonly = " << readonly;
|
||||
if (allow_ddl)
|
||||
@ -107,6 +120,7 @@ bool operator ==(const ContextAccessParams & left, const ContextAccessParams & r
|
||||
CONTEXT_ACCESS_PARAMS_EQUALS(full_access)
|
||||
CONTEXT_ACCESS_PARAMS_EQUALS(use_default_roles)
|
||||
CONTEXT_ACCESS_PARAMS_EQUALS(current_roles)
|
||||
CONTEXT_ACCESS_PARAMS_EQUALS(external_roles)
|
||||
CONTEXT_ACCESS_PARAMS_EQUALS(readonly)
|
||||
CONTEXT_ACCESS_PARAMS_EQUALS(allow_ddl)
|
||||
CONTEXT_ACCESS_PARAMS_EQUALS(allow_introspection)
|
||||
@ -157,6 +171,7 @@ bool operator <(const ContextAccessParams & left, const ContextAccessParams & ri
|
||||
CONTEXT_ACCESS_PARAMS_LESS(full_access)
|
||||
CONTEXT_ACCESS_PARAMS_LESS(use_default_roles)
|
||||
CONTEXT_ACCESS_PARAMS_LESS(current_roles)
|
||||
CONTEXT_ACCESS_PARAMS_LESS(external_roles)
|
||||
CONTEXT_ACCESS_PARAMS_LESS(readonly)
|
||||
CONTEXT_ACCESS_PARAMS_LESS(allow_ddl)
|
||||
CONTEXT_ACCESS_PARAMS_LESS(allow_introspection)
|
||||
|
@ -19,6 +19,7 @@ public:
|
||||
bool full_access_,
|
||||
bool use_default_roles_,
|
||||
const std::shared_ptr<const std::vector<UUID>> & current_roles_,
|
||||
const std::shared_ptr<const std::vector<UUID>> & external_roles_,
|
||||
const Settings & settings_,
|
||||
const String & current_database_,
|
||||
const ClientInfo & client_info_);
|
||||
@ -31,6 +32,7 @@ public:
|
||||
|
||||
const bool use_default_roles;
|
||||
const std::shared_ptr<const std::vector<UUID>> current_roles;
|
||||
const std::shared_ptr<const std::vector<UUID>> external_roles;
|
||||
|
||||
const UInt64 readonly;
|
||||
const bool allow_ddl;
|
||||
|
@ -26,7 +26,6 @@ namespace ErrorCodes
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
|
||||
LDAPAccessStorage::LDAPAccessStorage(const String & storage_name_, AccessControl & access_control_, const Poco::Util::AbstractConfiguration & config, const String & prefix)
|
||||
: IAccessStorage(storage_name_), access_control(access_control_), memory_storage(storage_name_, access_control.getChangesNotifier(), false)
|
||||
{
|
||||
@ -320,6 +319,10 @@ std::set<String> LDAPAccessStorage::mapExternalRolesNoLock(const LDAPClient::Sea
|
||||
{
|
||||
std::set<String> role_names;
|
||||
|
||||
// If this node can't access LDAP server (or has not privileges to fetch roles) and gets empty list of external roles
|
||||
if (external_roles.empty())
|
||||
return role_names;
|
||||
|
||||
if (external_roles.size() != role_search_params.size())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unable to map external roles");
|
||||
|
||||
|
@ -1121,6 +1121,7 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa
|
||||
&client_context->getSettingsRef(),
|
||||
&client_context->getClientInfo(),
|
||||
true,
|
||||
{},
|
||||
[&](const Progress & progress) { onProgress(progress); });
|
||||
|
||||
if (send_external_tables)
|
||||
@ -1624,6 +1625,7 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars
|
||||
&client_context->getSettingsRef(),
|
||||
&client_context->getClientInfo(),
|
||||
true,
|
||||
{},
|
||||
[&](const Progress & progress) { onProgress(progress); });
|
||||
|
||||
if (send_external_tables)
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <Client/ClientBase.h>
|
||||
#include <Client/Connection.h>
|
||||
#include <Client/ConnectionParameters.h>
|
||||
#include "Common/logger_useful.h"
|
||||
#include <Common/ClickHouseRevision.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/NetException.h>
|
||||
@ -22,8 +23,8 @@
|
||||
#include <Common/StringUtils.h>
|
||||
#include <Common/OpenSSLHelpers.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Core/Block.h>
|
||||
#include <Core/ProtocolDefines.h>
|
||||
#include <Interpreters/ClientInfo.h>
|
||||
#include <Interpreters/OpenTelemetrySpanLog.h>
|
||||
#include <Compression/CompressionFactory.h>
|
||||
@ -752,6 +753,7 @@ void Connection::sendQuery(
|
||||
const Settings * settings,
|
||||
const ClientInfo * client_info,
|
||||
bool with_pending_data,
|
||||
const std::vector<String> & external_roles,
|
||||
std::function<void(const Progress &)>)
|
||||
{
|
||||
OpenTelemetry::SpanHolder span("Connection::sendQuery()", OpenTelemetry::SpanKind::CLIENT);
|
||||
@ -824,6 +826,18 @@ void Connection::sendQuery(
|
||||
else
|
||||
writeStringBinary("" /* empty string is a marker of the end of settings */, *out);
|
||||
|
||||
String external_roles_str;
|
||||
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_INTERSERVER_EXTERNALLY_GRANTED_ROLES)
|
||||
{
|
||||
WriteBufferFromString buffer(external_roles_str);
|
||||
writeVectorBinary(external_roles, buffer);
|
||||
buffer.finalize();
|
||||
|
||||
LOG_TRACE(log_wrapper.get(), "Sending external_roles with query: [{}] ({})", fmt::join(external_roles, ", "), external_roles.size());
|
||||
|
||||
writeStringBinary(external_roles_str, *out);
|
||||
}
|
||||
|
||||
/// Interserver secret
|
||||
if (server_revision >= DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET)
|
||||
{
|
||||
@ -844,6 +858,9 @@ void Connection::sendQuery(
|
||||
data += query;
|
||||
data += query_id;
|
||||
data += client_info->initial_user;
|
||||
// Also for backwards compatibility
|
||||
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_INTERSERVER_EXTERNALLY_GRANTED_ROLES)
|
||||
data += external_roles_str;
|
||||
/// TODO: add source/target host/ip-address
|
||||
|
||||
std::string hash = encodeSHA256(data);
|
||||
|
@ -108,6 +108,7 @@ public:
|
||||
const Settings * settings/* = nullptr */,
|
||||
const ClientInfo * client_info/* = nullptr */,
|
||||
bool with_pending_data/* = false */,
|
||||
const std::vector<String> & external_roles,
|
||||
std::function<void(const Progress &)> process_progress_callback) override;
|
||||
|
||||
void sendCancel() override;
|
||||
|
@ -161,7 +161,8 @@ void HedgedConnections::sendQuery(
|
||||
const String & query_id,
|
||||
UInt64 stage,
|
||||
ClientInfo & client_info,
|
||||
bool with_pending_data)
|
||||
bool with_pending_data,
|
||||
const std::vector<String> & external_roles)
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
|
||||
@ -188,7 +189,7 @@ void HedgedConnections::sendQuery(
|
||||
hedged_connections_factory.skipReplicasWithTwoLevelAggregationIncompatibility();
|
||||
}
|
||||
|
||||
auto send_query = [this, timeouts, query, query_id, stage, client_info, with_pending_data](ReplicaState & replica)
|
||||
auto send_query = [this, timeouts, query, query_id, stage, client_info, with_pending_data, external_roles](ReplicaState & replica)
|
||||
{
|
||||
Settings modified_settings = settings;
|
||||
|
||||
@ -218,7 +219,8 @@ void HedgedConnections::sendQuery(
|
||||
modified_settings.set("allow_experimental_analyzer", static_cast<bool>(modified_settings[Setting::allow_experimental_analyzer]));
|
||||
|
||||
replica.connection->sendQuery(
|
||||
timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, {});
|
||||
timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, external_roles, {});
|
||||
|
||||
replica.change_replica_timeout.setRelative(timeouts.receive_data_timeout);
|
||||
replica.packet_receiver->setTimeout(hedged_connections_factory.getConnectionTimeouts().receive_timeout);
|
||||
};
|
||||
|
@ -90,7 +90,8 @@ public:
|
||||
const String & query_id,
|
||||
UInt64 stage,
|
||||
ClientInfo & client_info,
|
||||
bool with_pending_data) override;
|
||||
bool with_pending_data,
|
||||
const std::vector<String> & external_roles) override;
|
||||
|
||||
void sendReadTaskResponse(const String &) override
|
||||
{
|
||||
|
@ -23,7 +23,8 @@ public:
|
||||
const String & query_id,
|
||||
UInt64 stage,
|
||||
ClientInfo & client_info,
|
||||
bool with_pending_data) = 0;
|
||||
bool with_pending_data,
|
||||
const std::vector<String> & external_roles) = 0;
|
||||
|
||||
virtual void sendReadTaskResponse(const String &) = 0;
|
||||
virtual void sendMergeTreeReadTaskResponse(const ParallelReadResponse & response) = 0;
|
||||
|
@ -100,6 +100,7 @@ public:
|
||||
const Settings * settings,
|
||||
const ClientInfo * client_info,
|
||||
bool with_pending_data,
|
||||
const std::vector<String> & external_roles,
|
||||
std::function<void(const Progress &)> process_progress_callback) = 0;
|
||||
|
||||
virtual void sendCancel() = 0;
|
||||
|
@ -106,6 +106,7 @@ void LocalConnection::sendQuery(
|
||||
const Settings *,
|
||||
const ClientInfo * client_info,
|
||||
bool,
|
||||
const std::vector<String> & /*external_roles*/,
|
||||
std::function<void(const Progress &)> process_progress_callback)
|
||||
{
|
||||
/// Last query may not have been finished or cancelled due to exception on client side.
|
||||
|
@ -114,6 +114,7 @@ public:
|
||||
const Settings * settings/* = nullptr */,
|
||||
const ClientInfo * client_info/* = nullptr */,
|
||||
bool with_pending_data/* = false */,
|
||||
const std::vector<String> & external_roles,
|
||||
std::function<void(const Progress &)> process_progress_callback) override;
|
||||
|
||||
void sendCancel() override;
|
||||
|
@ -128,7 +128,8 @@ void MultiplexedConnections::sendQuery(
|
||||
const String & query_id,
|
||||
UInt64 stage,
|
||||
ClientInfo & client_info,
|
||||
bool with_pending_data)
|
||||
bool with_pending_data,
|
||||
const std::vector<String> & external_roles)
|
||||
{
|
||||
std::lock_guard lock(cancel_mutex);
|
||||
|
||||
@ -181,14 +182,14 @@ void MultiplexedConnections::sendQuery(
|
||||
modified_settings[Setting::parallel_replica_offset] = i;
|
||||
|
||||
replica_states[i].connection->sendQuery(
|
||||
timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, {});
|
||||
timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, external_roles, {});
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Use single replica.
|
||||
replica_states[0].connection->sendQuery(
|
||||
timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, {});
|
||||
timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, external_roles, {});
|
||||
}
|
||||
|
||||
sent_query = true;
|
||||
|
@ -36,7 +36,8 @@ public:
|
||||
const String & query_id,
|
||||
UInt64 stage,
|
||||
ClientInfo & client_info,
|
||||
bool with_pending_data) override;
|
||||
bool with_pending_data,
|
||||
const std::vector<String> & external_roles) override;
|
||||
|
||||
void sendReadTaskResponse(const String &) override;
|
||||
void sendMergeTreeReadTaskResponse(const ParallelReadResponse & response) override;
|
||||
|
@ -163,7 +163,7 @@ void Suggest::load(IServerConnection & connection,
|
||||
void Suggest::fetch(IServerConnection & connection, const ConnectionTimeouts & timeouts, const std::string & query, const ClientInfo & client_info)
|
||||
{
|
||||
connection.sendQuery(
|
||||
timeouts, query, {} /* query_parameters */, "" /* query_id */, QueryProcessingStage::Complete, nullptr, &client_info, false, {});
|
||||
timeouts, query, {} /* query_parameters */, "" /* query_id */, QueryProcessingStage::Complete, nullptr, &client_info, false, {} /* external_roles*/, {});
|
||||
|
||||
while (true)
|
||||
{
|
||||
|
@ -75,6 +75,8 @@ static struct InitFiu
|
||||
PAUSEABLE(stop_moving_part_before_swap_with_active) \
|
||||
REGULAR(slowdown_index_analysis) \
|
||||
REGULAR(replicated_merge_tree_all_replicas_stale) \
|
||||
REGULAR(zero_copy_lock_zk_fail_before_op) \
|
||||
REGULAR(zero_copy_lock_zk_fail_after_op) \
|
||||
|
||||
|
||||
namespace FailPoints
|
||||
|
@ -90,6 +90,9 @@ static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS = 54470;
|
||||
|
||||
static constexpr auto DBMS_MIN_REVISION_WITH_VERSIONED_PARALLEL_REPLICAS_PROTOCOL = 54471;
|
||||
|
||||
/// Push externally granted roles to other nodes
|
||||
static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_INTERSERVER_EXTERNALLY_GRANTED_ROLES = 54472;
|
||||
|
||||
/// Version of ClickHouse TCP protocol.
|
||||
///
|
||||
/// Should be incremented manually on protocol changes.
|
||||
@ -97,6 +100,6 @@ static constexpr auto DBMS_MIN_REVISION_WITH_VERSIONED_PARALLEL_REPLICAS_PROTOCO
|
||||
/// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION,
|
||||
/// later is just a number for server version (one number instead of commit SHA)
|
||||
/// for simplicity (sometimes it may be more convenient in some use cases).
|
||||
static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54471;
|
||||
static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54472;
|
||||
|
||||
}
|
||||
|
@ -5734,6 +5734,9 @@ If enabled, MongoDB tables will return an error when a MongoDB query cannot be b
|
||||
Allow writing simple SELECT queries without the leading SELECT keyword, which makes it simple for calculator-style usage, e.g. `1 + 2` becomes a valid query.
|
||||
|
||||
In `clickhouse-local` it is enabled by default and can be explicitly disabled.
|
||||
)", 0) \
|
||||
DECLARE(Bool, push_external_roles_in_interserver_queries, true, R"(
|
||||
Enable pushing user roles from originator to other nodes while performing a query.
|
||||
)", 0) \
|
||||
\
|
||||
\
|
||||
|
@ -88,6 +88,7 @@ static std::initializer_list<std::pair<ClickHouseVersion, SettingsChangesHistory
|
||||
{"filesystem_cache_prefer_bigger_buffer_size", true, true, "New setting"},
|
||||
{"read_in_order_use_virtual_row", false, false, "Use virtual row while reading in order of primary key or its monotonic function fashion. It is useful when searching over multiple parts as only relevant ones are touched."},
|
||||
{"filesystem_cache_boundary_alignment", 0, 0, "New setting"},
|
||||
{"push_external_roles_in_interserver_queries", false, false, "New setting."},
|
||||
}
|
||||
},
|
||||
{"24.10",
|
||||
|
@ -55,8 +55,8 @@ void validateCreateQuery(const ASTCreateQuery & query, ContextPtr context)
|
||||
serialized_query.data() + serialized_query.size(),
|
||||
"after altering table ",
|
||||
0,
|
||||
context->getSettingsRef()[Setting::max_parser_backtracks],
|
||||
context->getSettingsRef()[Setting::max_parser_depth]);
|
||||
context->getSettingsRef()[Setting::max_parser_depth],
|
||||
context->getSettingsRef()[Setting::max_parser_backtracks]);
|
||||
const auto & new_query = new_query_raw->as<const ASTCreateQuery &>();
|
||||
/// If there are no columns, then there is nothing much we can do
|
||||
if (!new_query.columns_list || !new_query.columns_list->columns)
|
||||
|
57
src/Interpreters/Access/InterpreterCheckGrantQuery.cpp
Normal file
57
src/Interpreters/Access/InterpreterCheckGrantQuery.cpp
Normal file
@ -0,0 +1,57 @@
|
||||
#include <Interpreters/InterpreterFactory.h>
|
||||
#include <Interpreters/Access/InterpreterCheckGrantQuery.h>
|
||||
#include <Interpreters/executeQuery.h>
|
||||
#include <Parsers/Access/ASTCheckGrantQuery.h>
|
||||
#include <Parsers/Access/ASTRolesOrUsersSet.h>
|
||||
#include <Access/AccessControl.h>
|
||||
#include <Access/ContextAccess.h>
|
||||
#include <Access/Role.h>
|
||||
#include <Access/RolesOrUsersSet.h>
|
||||
#include <Access/User.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/removeOnClusterClauseIfNeeded.h>
|
||||
#include <Interpreters/QueryLog.h>
|
||||
#include <Interpreters/executeDDLQueryOnCluster.h>
|
||||
#include <boost/range/algorithm/copy.hpp>
|
||||
#include <boost/range/algorithm/set_algorithm.hpp>
|
||||
#include <boost/range/algorithm_ext/erase.hpp>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||
#include "Storages/IStorage.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
BlockIO InterpreterCheckGrantQuery::execute()
|
||||
{
|
||||
auto & query = query_ptr->as<ASTCheckGrantQuery &>();
|
||||
query.access_rights_elements.eraseNonGrantable();
|
||||
|
||||
auto current_user_access = getContext()->getAccess();
|
||||
|
||||
/// Collect access rights elements which will be checked.
|
||||
AccessRightsElements & elements_to_check_grant = query.access_rights_elements;
|
||||
|
||||
/// Replacing empty database with the default. This step must be done before replication to avoid privilege escalation.
|
||||
String current_database = getContext()->getCurrentDatabase();
|
||||
elements_to_check_grant.replaceEmptyDatabase(current_database);
|
||||
query.access_rights_elements.replaceEmptyDatabase(current_database);
|
||||
bool user_is_granted = current_user_access->isGranted(elements_to_check_grant);
|
||||
BlockIO res;
|
||||
res.pipeline = QueryPipeline(
|
||||
std::make_shared<SourceFromSingleChunk>(Block{{ColumnUInt8::create(1, user_is_granted), std::make_shared<DataTypeUInt8>(), "result"}}));
|
||||
return res;
|
||||
}
|
||||
|
||||
void registerInterpreterCheckGrantQuery(InterpreterFactory & factory)
|
||||
{
|
||||
auto create_fn = [] (const InterpreterFactory::Arguments & args)
|
||||
{
|
||||
return std::make_unique<InterpreterCheckGrantQuery>(args.query, args.context);
|
||||
};
|
||||
factory.registerInterpreter("InterpreterCheckGrantQuery", create_fn);
|
||||
}
|
||||
|
||||
}
|
26
src/Interpreters/Access/InterpreterCheckGrantQuery.h
Normal file
26
src/Interpreters/Access/InterpreterCheckGrantQuery.h
Normal file
@ -0,0 +1,26 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/UUID.h>
|
||||
#include <Interpreters/IInterpreter.h>
|
||||
#include <Parsers/IAST_fwd.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ASTCheckGrantQuery;
|
||||
struct User;
|
||||
struct Role;
|
||||
|
||||
class InterpreterCheckGrantQuery : public IInterpreter, WithMutableContext
|
||||
{
|
||||
public:
|
||||
InterpreterCheckGrantQuery(const ASTPtr & query_ptr_, ContextMutablePtr context_) : WithMutableContext(context_), query_ptr(query_ptr_) {}
|
||||
|
||||
BlockIO execute() override;
|
||||
|
||||
private:
|
||||
ASTPtr query_ptr;
|
||||
};
|
||||
|
||||
}
|
@ -129,9 +129,11 @@ ClusterDiscovery::ClusterDiscovery(
|
||||
if (!config.has(cluster_config_prefix))
|
||||
continue;
|
||||
|
||||
String zk_root = config.getString(cluster_config_prefix + ".path");
|
||||
if (zk_root.empty())
|
||||
String zk_name_and_root = config.getString(cluster_config_prefix + ".path");
|
||||
if (zk_name_and_root.empty())
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "ZooKeeper path for cluster '{}' is empty", key);
|
||||
String zk_root = zkutil::extractZooKeeperPath(zk_name_and_root, true);
|
||||
String zk_name = zkutil::extractZooKeeperName(zk_name_and_root);
|
||||
|
||||
const auto & password = config.getString(cluster_config_prefix + ".password", "");
|
||||
const auto & cluster_secret = config.getString(cluster_config_prefix + ".secret", "");
|
||||
@ -142,6 +144,7 @@ ClusterDiscovery::ClusterDiscovery(
|
||||
key,
|
||||
ClusterInfo(
|
||||
/* name_= */ key,
|
||||
/* zk_name_= */ zk_name,
|
||||
/* zk_root_= */ zk_root,
|
||||
/* host_name= */ config.getString(cluster_config_prefix + ".my_hostname", getFQDNOrHostName()),
|
||||
/* username= */ config.getString(cluster_config_prefix + ".user", context->getUserName()),
|
||||
@ -288,7 +291,7 @@ bool ClusterDiscovery::updateCluster(ClusterInfo & cluster_info)
|
||||
{
|
||||
LOG_DEBUG(log, "Updating cluster '{}'", cluster_info.name);
|
||||
|
||||
auto zk = context->getZooKeeper();
|
||||
auto zk = context->getDefaultOrAuxiliaryZooKeeper(cluster_info.zk_name);
|
||||
|
||||
int start_version;
|
||||
Strings node_uuids = getNodeNames(zk, cluster_info.zk_root, cluster_info.name, &start_version, false);
|
||||
@ -381,9 +384,9 @@ void ClusterDiscovery::initialUpdate()
|
||||
throw Exception(ErrorCodes::KEEPER_EXCEPTION, "Failpoint cluster_discovery_faults is triggered");
|
||||
});
|
||||
|
||||
auto zk = context->getZooKeeper();
|
||||
for (auto & [_, info] : clusters_info)
|
||||
{
|
||||
auto zk = context->getDefaultOrAuxiliaryZooKeeper(info.zk_name);
|
||||
registerInZk(zk, info);
|
||||
if (!updateCluster(info))
|
||||
{
|
||||
|
@ -67,6 +67,7 @@ private:
|
||||
struct ClusterInfo
|
||||
{
|
||||
const String name;
|
||||
const String zk_name;
|
||||
const String zk_root;
|
||||
NodesInfo nodes_info;
|
||||
|
||||
@ -88,6 +89,7 @@ private:
|
||||
String cluster_secret;
|
||||
|
||||
ClusterInfo(const String & name_,
|
||||
const String & zk_name_,
|
||||
const String & zk_root_,
|
||||
const String & host_name,
|
||||
const String & username_,
|
||||
@ -99,6 +101,7 @@ private:
|
||||
bool observer_mode,
|
||||
bool invisible)
|
||||
: name(name_)
|
||||
, zk_name(zk_name_)
|
||||
, zk_root(zk_root_)
|
||||
, current_node(host_name + ":" + toString(port), secure, shard_id)
|
||||
, current_node_is_observer(observer_mode)
|
||||
|
@ -1493,7 +1493,7 @@ ConfigurationPtr Context::getUsersConfig()
|
||||
return shared->users_config;
|
||||
}
|
||||
|
||||
void Context::setUser(const UUID & user_id_)
|
||||
void Context::setUser(const UUID & user_id_, const std::vector<UUID> & external_roles_)
|
||||
{
|
||||
/// Prepare lists of user's profiles, constraints, settings, roles.
|
||||
/// NOTE: AccessControl::read<User>() and other AccessControl's functions may require some IO work,
|
||||
@ -1508,7 +1508,6 @@ void Context::setUser(const UUID & user_id_)
|
||||
const auto & database = user->default_database;
|
||||
|
||||
/// Apply user's profiles, constraints, settings, roles.
|
||||
|
||||
std::lock_guard lock(mutex);
|
||||
|
||||
setUserIDWithLock(user_id_, lock);
|
||||
@ -1518,6 +1517,7 @@ void Context::setUser(const UUID & user_id_)
|
||||
setCurrentProfilesWithLock(*enabled_profiles, /* check_constraints= */ false, lock);
|
||||
|
||||
setCurrentRolesWithLock(default_roles, lock);
|
||||
setExternalRolesWithLock(external_roles_, lock);
|
||||
|
||||
/// It's optional to specify the DEFAULT DATABASE in the user's definition.
|
||||
if (!database.empty())
|
||||
@ -1561,6 +1561,18 @@ void Context::setCurrentRolesWithLock(const std::vector<UUID> & new_current_role
|
||||
need_recalculate_access = true;
|
||||
}
|
||||
|
||||
void Context::setExternalRolesWithLock(const std::vector<UUID> & new_external_roles, const std::lock_guard<ContextSharedMutex> &)
|
||||
{
|
||||
if (!new_external_roles.empty())
|
||||
{
|
||||
if (current_roles)
|
||||
current_roles->insert(current_roles->end(), new_external_roles.begin(), new_external_roles.end());
|
||||
else
|
||||
current_roles = std::make_shared<std::vector<UUID>>(new_external_roles);
|
||||
need_recalculate_access = true;
|
||||
}
|
||||
}
|
||||
|
||||
void Context::setCurrentRolesImpl(const std::vector<UUID> & new_current_roles, bool throw_if_not_granted, bool skip_if_not_granted, const std::shared_ptr<const User> & user)
|
||||
{
|
||||
if (skip_if_not_granted)
|
||||
@ -1675,7 +1687,7 @@ std::shared_ptr<const ContextAccessWrapper> Context::getAccess() const
|
||||
bool full_access = !user_id;
|
||||
|
||||
return ContextAccessParams{
|
||||
user_id, full_access, /* use_default_roles= */ false, current_roles, *settings, current_database, client_info};
|
||||
user_id, full_access, /* use_default_roles= */ false, current_roles, external_roles, *settings, current_database, client_info};
|
||||
};
|
||||
|
||||
/// Check if the current access rights are still valid, otherwise get parameters for recalculating access rights.
|
||||
|
@ -289,6 +289,7 @@ protected:
|
||||
|
||||
std::optional<UUID> user_id;
|
||||
std::shared_ptr<std::vector<UUID>> current_roles;
|
||||
std::shared_ptr<std::vector<UUID>> external_roles;
|
||||
std::shared_ptr<const SettingsConstraintsAndProfileIDs> settings_constraints_and_current_profiles;
|
||||
mutable std::shared_ptr<const ContextAccess> access;
|
||||
mutable bool need_recalculate_access = true;
|
||||
@ -634,7 +635,7 @@ public:
|
||||
|
||||
/// Sets the current user assuming that he/she is already authenticated.
|
||||
/// WARNING: This function doesn't check password!
|
||||
void setUser(const UUID & user_id_);
|
||||
void setUser(const UUID & user_id_, const std::vector<UUID> & external_roles_ = {});
|
||||
UserPtr getUser() const;
|
||||
|
||||
std::optional<UUID> getUserID() const;
|
||||
@ -1398,6 +1399,8 @@ private:
|
||||
|
||||
void setCurrentRolesWithLock(const std::vector<UUID> & new_current_roles, const std::lock_guard<ContextSharedMutex> & lock);
|
||||
|
||||
void setExternalRolesWithLock(const std::vector<UUID> & new_external_roles, const std::lock_guard<ContextSharedMutex> & lock);
|
||||
|
||||
void setSettingWithLock(std::string_view name, const String & value, const std::lock_guard<ContextSharedMutex> & lock);
|
||||
|
||||
void setSettingWithLock(std::string_view name, const Field & value, const std::lock_guard<ContextSharedMutex> & lock);
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include <Parsers/Access/ASTCreateUserQuery.h>
|
||||
#include <Parsers/Access/ASTDropAccessEntityQuery.h>
|
||||
#include <Parsers/Access/ASTGrantQuery.h>
|
||||
#include <Parsers/Access/ASTCheckGrantQuery.h>
|
||||
#include <Parsers/Access/ASTMoveAccessEntityQuery.h>
|
||||
#include <Parsers/Access/ASTSetRoleQuery.h>
|
||||
#include <Parsers/Access/ASTShowAccessEntitiesQuery.h>
|
||||
@ -308,6 +309,10 @@ InterpreterFactory::InterpreterPtr InterpreterFactory::get(ASTPtr & query, Conte
|
||||
{
|
||||
interpreter_name = "InterpreterShowGrantsQuery";
|
||||
}
|
||||
else if (query->as<ASTCheckGrantQuery>())
|
||||
{
|
||||
interpreter_name = "InterpreterCheckGrantQuery";
|
||||
}
|
||||
else if (query->as<ASTShowAccessEntitiesQuery>())
|
||||
{
|
||||
interpreter_name = "InterpreterShowAccessEntitiesQuery";
|
||||
|
@ -6,6 +6,8 @@
|
||||
#include <Access/ContextAccess.h>
|
||||
#include <Access/SettingsProfilesInfo.h>
|
||||
#include <Access/User.h>
|
||||
#include <Access/Role.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/ThreadPool.h>
|
||||
@ -25,12 +27,12 @@
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace Setting
|
||||
{
|
||||
extern const SettingsUInt64 max_sessions_for_user;
|
||||
extern const SettingsBool push_external_roles_in_interserver_queries;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
@ -288,7 +290,7 @@ void Session::shutdownNamedSessions()
|
||||
Session::Session(const ContextPtr & global_context_, ClientInfo::Interface interface_, bool is_secure, const std::string & certificate)
|
||||
: auth_id(UUIDHelpers::generateV4()),
|
||||
global_context(global_context_),
|
||||
log(getLogger(String{magic_enum::enum_name(interface_)} + "-Session"))
|
||||
log(getLogger(String{magic_enum::enum_name(interface_)} + "-Session-" + toString(auth_id)))
|
||||
{
|
||||
prepared_client_info.emplace();
|
||||
prepared_client_info->interface = interface_;
|
||||
@ -342,12 +344,12 @@ std::unordered_set<AuthenticationType> Session::getAuthenticationTypesOrLogInFai
|
||||
}
|
||||
}
|
||||
|
||||
void Session::authenticate(const String & user_name, const String & password, const Poco::Net::SocketAddress & address)
|
||||
void Session::authenticate(const String & user_name, const String & password, const Poco::Net::SocketAddress & address, const Strings & external_roles_)
|
||||
{
|
||||
authenticate(BasicCredentials{user_name, password}, address);
|
||||
authenticate(BasicCredentials{user_name, password}, address, external_roles_);
|
||||
}
|
||||
|
||||
void Session::authenticate(const Credentials & credentials_, const Poco::Net::SocketAddress & address_)
|
||||
void Session::authenticate(const Credentials & credentials_, const Poco::Net::SocketAddress & address_, const Strings & external_roles_)
|
||||
{
|
||||
if (session_context)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "If there is a session context it must be created after authentication");
|
||||
@ -359,8 +361,8 @@ void Session::authenticate(const Credentials & credentials_, const Poco::Net::So
|
||||
if ((address == Poco::Net::SocketAddress{}) && (prepared_client_info->interface == ClientInfo::Interface::LOCAL))
|
||||
address = Poco::Net::SocketAddress{"127.0.0.1", 0};
|
||||
|
||||
LOG_DEBUG(log, "{} Authenticating user '{}' from {}",
|
||||
toString(auth_id), credentials_.getUserName(), address.toString());
|
||||
LOG_DEBUG(log, "Authenticating user '{}' from {}",
|
||||
credentials_.getUserName(), address.toString());
|
||||
|
||||
try
|
||||
{
|
||||
@ -370,6 +372,14 @@ void Session::authenticate(const Credentials & credentials_, const Poco::Net::So
|
||||
settings_from_auth_server = auth_result.settings;
|
||||
LOG_DEBUG(log, "{} Authenticated with global context as user {}",
|
||||
toString(auth_id), toString(*user_id));
|
||||
|
||||
if (!external_roles_.empty() && global_context->getSettingsRef()[Setting::push_external_roles_in_interserver_queries])
|
||||
{
|
||||
external_roles = global_context->getAccessControl().find<Role>(external_roles_);
|
||||
|
||||
LOG_DEBUG(log, "User {} has external_roles applied: [{}] ({})",
|
||||
toString(*user_id), fmt::join(external_roles_, ", "), external_roles_.size());
|
||||
}
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
@ -394,7 +404,7 @@ void Session::checkIfUserIsStillValid()
|
||||
|
||||
void Session::onAuthenticationFailure(const std::optional<String> & user_name, const Poco::Net::SocketAddress & address_, const Exception & e)
|
||||
{
|
||||
LOG_DEBUG(log, "{} Authentication failed with error: {}", toString(auth_id), e.what());
|
||||
LOG_DEBUG(log, "Authentication failed with error: {}", e.what());
|
||||
if (auto session_log = getSessionLog())
|
||||
{
|
||||
/// Add source address to the log
|
||||
@ -520,8 +530,8 @@ ContextMutablePtr Session::makeSessionContext()
|
||||
if (session_tracker_handle)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Session tracker handle was created before making session");
|
||||
|
||||
LOG_DEBUG(log, "{} Creating session context with user_id: {}",
|
||||
toString(auth_id), toString(*user_id));
|
||||
LOG_DEBUG(log, "Creating session context with user_id: {}",
|
||||
toString(*user_id));
|
||||
/// Make a new session context.
|
||||
ContextMutablePtr new_session_context;
|
||||
new_session_context = Context::createCopy(global_context);
|
||||
@ -532,7 +542,7 @@ ContextMutablePtr Session::makeSessionContext()
|
||||
prepared_client_info.reset();
|
||||
|
||||
/// Set user information for the new context: current profiles, roles, access rights.
|
||||
new_session_context->setUser(*user_id);
|
||||
new_session_context->setUser(*user_id, external_roles);
|
||||
|
||||
/// Session context is ready.
|
||||
session_context = new_session_context;
|
||||
@ -563,8 +573,8 @@ ContextMutablePtr Session::makeSessionContext(const String & session_name_, std:
|
||||
if (session_tracker_handle)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Session tracker handle was created before making session");
|
||||
|
||||
LOG_DEBUG(log, "{} Creating named session context with name: {}, user_id: {}",
|
||||
toString(auth_id), session_name_, toString(*user_id));
|
||||
LOG_DEBUG(log, "Creating named session context with name: {}, user_id: {}",
|
||||
session_name_, toString(*user_id));
|
||||
|
||||
/// Make a new session context OR
|
||||
/// if the `session_id` and `user_id` were used before then just get a previously created session context.
|
||||
@ -587,7 +597,7 @@ ContextMutablePtr Session::makeSessionContext(const String & session_name_, std:
|
||||
/// Set user information for the new context: current profiles, roles, access rights.
|
||||
if (!access->tryGetUser())
|
||||
{
|
||||
new_session_context->setUser(*user_id);
|
||||
new_session_context->setUser(*user_id, external_roles);
|
||||
max_sessions_for_user = new_session_context->getSettingsRef()[Setting::max_sessions_for_user];
|
||||
}
|
||||
else
|
||||
@ -639,7 +649,7 @@ ContextMutablePtr Session::makeQueryContextImpl(const ClientInfo * client_info_t
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Query context must be created after authentication");
|
||||
|
||||
/// We can create a query context either from a session context or from a global context.
|
||||
bool from_session_context = static_cast<bool>(session_context);
|
||||
const bool from_session_context = static_cast<bool>(session_context);
|
||||
|
||||
/// Create a new query context.
|
||||
ContextMutablePtr query_context = Context::createCopy(from_session_context ? session_context : global_context);
|
||||
@ -679,7 +689,7 @@ ContextMutablePtr Session::makeQueryContextImpl(const ClientInfo * client_info_t
|
||||
|
||||
/// Set user information for the new context: current profiles, roles, access rights.
|
||||
if (user_id && !query_context->getAccess()->tryGetUser())
|
||||
query_context->setUser(*user_id);
|
||||
query_context->setUser(*user_id, external_roles);
|
||||
|
||||
/// Query context is ready.
|
||||
query_context_created = true;
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <vector>
|
||||
|
||||
namespace Poco::Net { class SocketAddress; }
|
||||
|
||||
@ -50,8 +51,11 @@ public:
|
||||
|
||||
/// Sets the current user, checks the credentials and that the specified address is allowed to connect from.
|
||||
/// The function throws an exception if there is no such user or password is wrong.
|
||||
void authenticate(const String & user_name, const String & password, const Poco::Net::SocketAddress & address);
|
||||
void authenticate(const Credentials & credentials_, const Poco::Net::SocketAddress & address_);
|
||||
void authenticate(const String & user_name, const String & password, const Poco::Net::SocketAddress & address, const Strings & external_roles_ = {});
|
||||
|
||||
/// `external_roles_` names of the additional roles (over what is granted via local access control mechanisms) that would be granted to user during this session.
|
||||
/// Role is not granted if it can't be found by name via AccessControl (i.e. doesn't exist on this instance).
|
||||
void authenticate(const Credentials & credentials_, const Poco::Net::SocketAddress & address_, const Strings & external_roles_ = {});
|
||||
|
||||
// Verifies whether the user's validity extends beyond the current time.
|
||||
// Throws an exception if the user's validity has expired.
|
||||
@ -112,6 +116,7 @@ private:
|
||||
|
||||
mutable UserPtr user;
|
||||
std::optional<UUID> user_id;
|
||||
std::vector<UUID> external_roles;
|
||||
AuthenticationData user_authenticated_with;
|
||||
|
||||
ContextMutablePtr session_context;
|
||||
|
@ -45,6 +45,7 @@ void registerInterpreterDropNamedCollectionQuery(InterpreterFactory & factory);
|
||||
void registerInterpreterGrantQuery(InterpreterFactory & factory);
|
||||
void registerInterpreterShowCreateAccessEntityQuery(InterpreterFactory & factory);
|
||||
void registerInterpreterShowGrantsQuery(InterpreterFactory & factory);
|
||||
void registerInterpreterCheckGrantQuery(InterpreterFactory & factory);
|
||||
void registerInterpreterShowAccessEntitiesQuery(InterpreterFactory & factory);
|
||||
void registerInterpreterShowAccessQuery(InterpreterFactory & factory);
|
||||
void registerInterpreterShowPrivilegesQuery(InterpreterFactory & factory);
|
||||
@ -108,6 +109,7 @@ void registerInterpreters()
|
||||
registerInterpreterGrantQuery(factory);
|
||||
registerInterpreterShowCreateAccessEntityQuery(factory);
|
||||
registerInterpreterShowGrantsQuery(factory);
|
||||
registerInterpreterCheckGrantQuery(factory);
|
||||
registerInterpreterShowAccessEntitiesQuery(factory);
|
||||
registerInterpreterShowAccessQuery(factory);
|
||||
registerInterpreterShowPrivilegesQuery(factory);
|
||||
|
@ -70,8 +70,12 @@ ASTPtr ASTAlterCommand::clone() const
|
||||
|
||||
void ASTAlterCommand::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
||||
{
|
||||
scope_guard closing_bracket_guard;
|
||||
if (format_alter_commands_with_parentheses)
|
||||
{
|
||||
settings.ostr << "(";
|
||||
closing_bracket_guard = make_scope_guard(std::function<void(void)>([&settings]() { settings.ostr << ")"; }));
|
||||
}
|
||||
|
||||
if (type == ASTAlterCommand::ADD_COLUMN)
|
||||
{
|
||||
@ -498,9 +502,6 @@ void ASTAlterCommand::formatImpl(const FormatSettings & settings, FormatState &
|
||||
}
|
||||
else
|
||||
throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Unexpected type of ALTER");
|
||||
|
||||
if (format_alter_commands_with_parentheses)
|
||||
settings.ostr << ")";
|
||||
}
|
||||
|
||||
void ASTAlterCommand::forEachPointerToChild(std::function<void(void**)> f)
|
||||
|
126
src/Parsers/Access/ASTCheckGrantQuery.cpp
Normal file
126
src/Parsers/Access/ASTCheckGrantQuery.cpp
Normal file
@ -0,0 +1,126 @@
|
||||
#include <Parsers/Access/ASTCheckGrantQuery.h>
|
||||
#include <Parsers/Access/ASTRolesOrUsersSet.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
void formatColumnNames(const Strings & columns, const IAST::FormatSettings & settings)
|
||||
{
|
||||
settings.ostr << "(";
|
||||
bool need_comma = false;
|
||||
for (const auto & column : columns)
|
||||
{
|
||||
if (std::exchange(need_comma, true))
|
||||
settings.ostr << ", ";
|
||||
settings.ostr << backQuoteIfNeed(column);
|
||||
}
|
||||
settings.ostr << ")";
|
||||
}
|
||||
|
||||
|
||||
void formatONClause(const AccessRightsElement & element, const IAST::FormatSettings & settings)
|
||||
{
|
||||
settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << "ON " << (settings.hilite ? IAST::hilite_none : "");
|
||||
if (element.isGlobalWithParameter())
|
||||
{
|
||||
if (element.anyParameter())
|
||||
settings.ostr << "*";
|
||||
else
|
||||
settings.ostr << backQuoteIfNeed(element.parameter);
|
||||
}
|
||||
else if (element.anyDatabase())
|
||||
{
|
||||
settings.ostr << "*.*";
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!element.database.empty())
|
||||
settings.ostr << backQuoteIfNeed(element.database) << ".";
|
||||
if (element.anyDatabase())
|
||||
settings.ostr << "*";
|
||||
else
|
||||
settings.ostr << backQuoteIfNeed(element.table);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void formatElementsWithoutOptions(const AccessRightsElements & elements, const IAST::FormatSettings & settings)
|
||||
{
|
||||
bool no_output = true;
|
||||
for (size_t i = 0; i != elements.size(); ++i)
|
||||
{
|
||||
const auto & element = elements[i];
|
||||
auto keywords = element.access_flags.toKeywords();
|
||||
if (keywords.empty() || (!element.anyColumn() && element.columns.empty()))
|
||||
continue;
|
||||
|
||||
for (const auto & keyword : keywords)
|
||||
{
|
||||
if (!std::exchange(no_output, false))
|
||||
settings.ostr << ", ";
|
||||
|
||||
settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << keyword << (settings.hilite ? IAST::hilite_none : "");
|
||||
if (!element.anyColumn())
|
||||
formatColumnNames(element.columns, settings);
|
||||
}
|
||||
|
||||
bool next_element_on_same_db_and_table = false;
|
||||
if (i != elements.size() - 1)
|
||||
{
|
||||
const auto & next_element = elements[i + 1];
|
||||
if (element.sameDatabaseAndTableAndParameter(next_element))
|
||||
{
|
||||
next_element_on_same_db_and_table = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!next_element_on_same_db_and_table)
|
||||
{
|
||||
settings.ostr << " ";
|
||||
formatONClause(element, settings);
|
||||
}
|
||||
}
|
||||
|
||||
if (no_output)
|
||||
settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << "USAGE ON " << (settings.hilite ? IAST::hilite_none : "") << "*.*";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
String ASTCheckGrantQuery::getID(char) const
|
||||
{
|
||||
return "CheckGrantQuery";
|
||||
}
|
||||
|
||||
|
||||
ASTPtr ASTCheckGrantQuery::clone() const
|
||||
{
|
||||
auto res = std::make_shared<ASTCheckGrantQuery>(*this);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
void ASTCheckGrantQuery::formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const
|
||||
{
|
||||
settings.ostr << (settings.hilite ? IAST::hilite_keyword : "") << "CHECK GRANT"
|
||||
<< (settings.hilite ? IAST::hilite_none : "");
|
||||
|
||||
settings.ostr << " ";
|
||||
|
||||
formatElementsWithoutOptions(access_rights_elements, settings);
|
||||
}
|
||||
|
||||
|
||||
void ASTCheckGrantQuery::replaceEmptyDatabase(const String & current_database)
|
||||
{
|
||||
access_rights_elements.replaceEmptyDatabase(current_database);
|
||||
}
|
||||
|
||||
}
|
27
src/Parsers/Access/ASTCheckGrantQuery.h
Normal file
27
src/Parsers/Access/ASTCheckGrantQuery.h
Normal file
@ -0,0 +1,27 @@
|
||||
#pragma once
|
||||
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Access/Common/AccessRightsElement.h>
|
||||
#include <Parsers/ASTQueryWithOnCluster.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class ASTRolesOrUsersSet;
|
||||
|
||||
|
||||
/** Parses queries like
|
||||
* CHECK GRANT access_type[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*}
|
||||
*/
|
||||
class ASTCheckGrantQuery : public IAST
|
||||
{
|
||||
public:
|
||||
AccessRightsElements access_rights_elements;
|
||||
|
||||
String getID(char) const override;
|
||||
ASTPtr clone() const override;
|
||||
void formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override;
|
||||
void replaceEmptyDatabase(const String & current_database);
|
||||
QueryKind getQueryKind() const override { return QueryKind::Check; }
|
||||
};
|
||||
}
|
225
src/Parsers/Access/ParserCheckGrantQuery.cpp
Normal file
225
src/Parsers/Access/ParserCheckGrantQuery.cpp
Normal file
@ -0,0 +1,225 @@
|
||||
#include <Parsers/ASTIdentifier_fwd.h>
|
||||
#include <Parsers/ASTLiteral.h>
|
||||
#include <Parsers/Access/ASTCheckGrantQuery.h>
|
||||
#include <Parsers/Access/ASTRolesOrUsersSet.h>
|
||||
#include <Parsers/Access/ParserCheckGrantQuery.h>
|
||||
#include <Parsers/Access/ParserRolesOrUsersSet.h>
|
||||
#include <Parsers/ExpressionElementParsers.h>
|
||||
#include <Parsers/ExpressionListParsers.h>
|
||||
#include <Parsers/parseDatabaseAndTableName.h>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <boost/range/algorithm_ext/erase.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int INVALID_GRANT;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
bool parseAccessFlags(IParser::Pos & pos, Expected & expected, AccessFlags & access_flags)
|
||||
{
|
||||
static constexpr auto is_one_of_access_type_words = [](IParser::Pos & pos_)
|
||||
{
|
||||
if (pos_->type != TokenType::BareWord)
|
||||
return false;
|
||||
std::string_view word{pos_->begin, pos_->size()};
|
||||
return !(boost::iequals(word, toStringView(Keyword::ON)) || boost::iequals(word, toStringView(Keyword::TO)) || boost::iequals(word, toStringView(Keyword::FROM)));
|
||||
};
|
||||
|
||||
expected.add(pos, "access type");
|
||||
|
||||
return IParserBase::wrapParseImpl(pos, [&]
|
||||
{
|
||||
if (!is_one_of_access_type_words(pos))
|
||||
return false;
|
||||
|
||||
String str;
|
||||
do
|
||||
{
|
||||
if (!str.empty())
|
||||
str += " ";
|
||||
str += std::string_view(pos->begin, pos->size());
|
||||
++pos;
|
||||
}
|
||||
while (is_one_of_access_type_words(pos));
|
||||
|
||||
try
|
||||
{
|
||||
access_flags = AccessFlags{str};
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
bool parseColumnNames(IParser::Pos & pos, Expected & expected, Strings & columns)
|
||||
{
|
||||
return IParserBase::wrapParseImpl(pos, [&]
|
||||
{
|
||||
if (!ParserToken{TokenType::OpeningRoundBracket}.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
ASTPtr ast;
|
||||
if (!ParserList{std::make_unique<ParserIdentifier>(), std::make_unique<ParserToken>(TokenType::Comma), false}.parse(pos, ast, expected))
|
||||
return false;
|
||||
|
||||
Strings res_columns;
|
||||
for (const auto & child : ast->children)
|
||||
res_columns.emplace_back(getIdentifierName(child));
|
||||
|
||||
if (!ParserToken{TokenType::ClosingRoundBracket}.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
columns = std::move(res_columns);
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
bool parseAccessFlagsWithColumns(IParser::Pos & pos, Expected & expected,
|
||||
std::vector<std::pair<AccessFlags, Strings>> & access_and_columns)
|
||||
{
|
||||
std::vector<std::pair<AccessFlags, Strings>> res;
|
||||
|
||||
auto parse_access_and_columns = [&]
|
||||
{
|
||||
AccessFlags access_flags;
|
||||
if (!parseAccessFlags(pos, expected, access_flags))
|
||||
return false;
|
||||
|
||||
Strings columns;
|
||||
parseColumnNames(pos, expected, columns);
|
||||
res.emplace_back(access_flags, std::move(columns));
|
||||
return true;
|
||||
};
|
||||
|
||||
if (!ParserList::parseUtil(pos, expected, parse_access_and_columns, false))
|
||||
return false;
|
||||
|
||||
access_and_columns = std::move(res);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool parseElements(IParser::Pos & pos, Expected & expected, AccessRightsElements & elements)
|
||||
{
|
||||
return IParserBase::wrapParseImpl(pos, [&]
|
||||
{
|
||||
AccessRightsElements res_elements;
|
||||
|
||||
auto parse_around_on = [&]
|
||||
{
|
||||
std::vector<std::pair<AccessFlags, Strings>> access_and_columns;
|
||||
if (!parseAccessFlagsWithColumns(pos, expected, access_and_columns))
|
||||
return false;
|
||||
|
||||
String database_name, table_name, parameter;
|
||||
|
||||
size_t is_global_with_parameter = 0;
|
||||
for (const auto & elem : access_and_columns)
|
||||
{
|
||||
if (elem.first.isGlobalWithParameter())
|
||||
++is_global_with_parameter;
|
||||
}
|
||||
|
||||
if (!ParserKeyword{Keyword::ON}.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
bool wildcard = false;
|
||||
bool default_database = false;
|
||||
if (is_global_with_parameter && is_global_with_parameter == access_and_columns.size())
|
||||
{
|
||||
ASTPtr parameter_ast;
|
||||
if (!ParserToken{TokenType::Asterisk}.ignore(pos, expected))
|
||||
{
|
||||
if (ParserIdentifier{}.parse(pos, parameter_ast, expected))
|
||||
parameter = getIdentifierName(parameter_ast);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ParserToken{TokenType::Asterisk}.ignore(pos, expected))
|
||||
wildcard = true;
|
||||
}
|
||||
else if (!parseDatabaseAndTableNameOrAsterisks(pos, expected, database_name, table_name, wildcard, default_database))
|
||||
return false;
|
||||
|
||||
for (auto & [access_flags, columns] : access_and_columns)
|
||||
{
|
||||
AccessRightsElement element;
|
||||
element.access_flags = access_flags;
|
||||
element.columns = std::move(columns);
|
||||
element.database = database_name;
|
||||
element.table = table_name;
|
||||
element.parameter = parameter;
|
||||
element.wildcard = wildcard;
|
||||
element.default_database = default_database;
|
||||
res_elements.emplace_back(std::move(element));
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
if (!ParserList::parseUtil(pos, expected, parse_around_on, false))
|
||||
return false;
|
||||
|
||||
elements = std::move(res_elements);
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
void throwIfNotGrantable(AccessRightsElements & elements)
|
||||
{
|
||||
std::erase_if(elements, [](AccessRightsElement & element)
|
||||
{
|
||||
if (element.empty())
|
||||
return true;
|
||||
auto old_flags = element.access_flags;
|
||||
element.eraseNonGrantable();
|
||||
if (!element.empty())
|
||||
return false;
|
||||
|
||||
if (!element.anyColumn())
|
||||
throw Exception(ErrorCodes::INVALID_GRANT, "{} cannot check grant on the column level", old_flags.toString());
|
||||
else if (!element.anyTable())
|
||||
throw Exception(ErrorCodes::INVALID_GRANT, "{} cannot check grant on the table level", old_flags.toString());
|
||||
else if (!element.anyDatabase())
|
||||
throw Exception(ErrorCodes::INVALID_GRANT, "{} cannot check grant on the database level", old_flags.toString());
|
||||
else if (!element.anyParameter())
|
||||
throw Exception(ErrorCodes::INVALID_GRANT, "{} cannot check grant on the global with parameter level", old_flags.toString());
|
||||
else
|
||||
throw Exception(ErrorCodes::INVALID_GRANT, "{} cannot check grant", old_flags.toString());
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool ParserCheckGrantQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
{
|
||||
if (!ParserKeyword{Keyword::CHECK_GRANT}.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
|
||||
AccessRightsElements elements;
|
||||
|
||||
if (!parseElements(pos, expected, elements))
|
||||
return false;
|
||||
|
||||
throwIfNotGrantable(elements);
|
||||
|
||||
auto query = std::make_shared<ASTCheckGrantQuery>();
|
||||
node = query;
|
||||
|
||||
query->access_rights_elements = std::move(elements);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
17
src/Parsers/Access/ParserCheckGrantQuery.h
Normal file
17
src/Parsers/Access/ParserCheckGrantQuery.h
Normal file
@ -0,0 +1,17 @@
|
||||
#pragma once
|
||||
|
||||
#include <Parsers/IParserBase.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
/** Parses queries like
|
||||
* CHECK GRANT access_type[(column_name [,...])] [,...] ON {db.table|db.*|*.*|table|*}
|
||||
*/
|
||||
class ParserCheckGrantQuery : public IParserBase
|
||||
{
|
||||
protected:
|
||||
const char * getName() const override { return "CHECK GRANT"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
};
|
||||
}
|
@ -79,6 +79,7 @@ namespace DB
|
||||
MR_MACROS(CHARACTER, "CHARACTER") \
|
||||
MR_MACROS(CHECK_ALL_TABLES, "CHECK ALL TABLES") \
|
||||
MR_MACROS(CHECK_TABLE, "CHECK TABLE") \
|
||||
MR_MACROS(CHECK_GRANT, "CHECK GRANT")\
|
||||
MR_MACROS(CHECK, "CHECK") \
|
||||
MR_MACROS(CLEANUP, "CLEANUP") \
|
||||
MR_MACROS(CLEAR_COLUMN, "CLEAR COLUMN") \
|
||||
|
@ -140,6 +140,9 @@ bool ParserSubquery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
const ASTPtr & explained_ast = explain_query.getExplainedQuery();
|
||||
if (explained_ast)
|
||||
{
|
||||
if (!explained_ast->as<ASTSelectWithUnionQuery>())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "EXPLAIN inside subquery supports only SELECT queries");
|
||||
|
||||
auto view_explain = makeASTFunction("viewExplain",
|
||||
std::make_shared<ASTLiteral>(kind_str),
|
||||
std::make_shared<ASTLiteral>(settings_str),
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <Parsers/Access/ParserCreateUserQuery.h>
|
||||
#include <Parsers/Access/ParserDropAccessEntityQuery.h>
|
||||
#include <Parsers/Access/ParserGrantQuery.h>
|
||||
#include <Parsers/Access/ParserCheckGrantQuery.h>
|
||||
#include <Parsers/Access/ParserMoveAccessEntityQuery.h>
|
||||
#include <Parsers/Access/ParserSetRoleQuery.h>
|
||||
|
||||
@ -67,6 +68,7 @@ bool ParserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
ParserDropAccessEntityQuery drop_access_entity_p;
|
||||
ParserMoveAccessEntityQuery move_access_entity_p;
|
||||
ParserGrantQuery grant_p;
|
||||
ParserCheckGrantQuery check_grant_p;
|
||||
ParserSetRoleQuery set_role_p;
|
||||
ParserExternalDDLQuery external_ddl_p;
|
||||
ParserTransactionControl transaction_control_p;
|
||||
@ -102,6 +104,7 @@ bool ParserQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||
|| drop_access_entity_p.parse(pos, node, expected)
|
||||
|| move_access_entity_p.parse(pos, node, expected)
|
||||
|| grant_p.parse(pos, node, expected)
|
||||
|| check_grant_p.parse(pos, node, expected)
|
||||
|| external_ddl_p.parse(pos, node, expected)
|
||||
|| transaction_control_p.parse(pos, node, expected)
|
||||
|| delete_p.parse(pos, node, expected)
|
||||
|
@ -56,8 +56,9 @@ RemoteInserter::RemoteInserter(
|
||||
/** Send query and receive "header", that describes table structure.
|
||||
* Header is needed to know, what structure is required for blocks to be passed to 'write' method.
|
||||
*/
|
||||
/// TODO (vnemkov): figure out should we pass additional roles in this case or not.
|
||||
connection.sendQuery(
|
||||
timeouts, query, /* query_parameters */ {}, "", QueryProcessingStage::Complete, &settings, &modified_client_info, false, {});
|
||||
timeouts, query, /* query_parameters */ {}, "", QueryProcessingStage::Complete, &settings, &modified_client_info, false, /* external_roles */ {}, {});
|
||||
|
||||
while (true)
|
||||
{
|
||||
|
@ -22,8 +22,12 @@
|
||||
#include <Client/MultiplexedConnections.h>
|
||||
#include <Client/HedgedConnections.h>
|
||||
#include <Storages/MergeTree/MergeTreeDataPartUUID.h>
|
||||
#include <Storages/StorageMemory.h>
|
||||
#include <Storages/MergeTree/ParallelReplicasReadingCoordinator.h>
|
||||
#include <Storages/StorageMemory.h>
|
||||
|
||||
#include <Access/AccessControl.h>
|
||||
#include <Access/User.h>
|
||||
#include <Access/Role.h>
|
||||
|
||||
namespace ProfileEvents
|
||||
{
|
||||
@ -43,6 +47,7 @@ namespace Setting
|
||||
extern const SettingsBool skip_unavailable_shards;
|
||||
extern const SettingsOverflowMode timeout_overflow_mode;
|
||||
extern const SettingsBool use_hedged_requests;
|
||||
extern const SettingsBool push_external_roles_in_interserver_queries;
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
@ -398,7 +403,25 @@ void RemoteQueryExecutor::sendQueryUnlocked(ClientInfo::QueryKind query_kind, As
|
||||
if (!duplicated_part_uuids.empty())
|
||||
connections->sendIgnoredPartUUIDs(duplicated_part_uuids);
|
||||
|
||||
connections->sendQuery(timeouts, query, query_id, stage, modified_client_info, true);
|
||||
// Collect all roles granted on this node and pass those to the remote node
|
||||
std::vector<String> local_granted_roles;
|
||||
if (context->getSettingsRef()[Setting::push_external_roles_in_interserver_queries] && !modified_client_info.initial_user.empty())
|
||||
{
|
||||
auto user = context->getAccessControl().read<User>(modified_client_info.initial_user, true);
|
||||
boost::container::flat_set<String> granted_roles;
|
||||
if (user)
|
||||
{
|
||||
const auto & access_control = context->getAccessControl();
|
||||
for (const auto & e : user->granted_roles.getElements())
|
||||
{
|
||||
auto names = access_control.readNames(e.ids);
|
||||
granted_roles.insert(names.begin(), names.end());
|
||||
}
|
||||
}
|
||||
local_granted_roles.insert(local_granted_roles.end(), granted_roles.begin(), granted_roles.end());
|
||||
}
|
||||
|
||||
connections->sendQuery(timeouts, query, query_id, stage, modified_client_info, true, local_granted_roles);
|
||||
|
||||
established = false;
|
||||
sent_query = true;
|
||||
|
@ -63,12 +63,17 @@
|
||||
|
||||
#include <Core/Protocol.h>
|
||||
#include <Storages/MergeTree/RequestResponse.h>
|
||||
#include <Interpreters/ClientInfo.h>
|
||||
|
||||
#include "TCPHandler.h"
|
||||
|
||||
#include <Common/config_version.h>
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include <fmt/ostream.h>
|
||||
#include <Common/StringUtils.h>
|
||||
|
||||
using namespace std::literals;
|
||||
using namespace DB;
|
||||
|
||||
@ -1960,6 +1965,13 @@ void TCPHandler::processQuery(std::optional<QueryState> & state)
|
||||
Settings passed_settings;
|
||||
passed_settings.read(*in, settings_format);
|
||||
|
||||
std::string received_extra_roles;
|
||||
// TODO: check if having `is_interserver_mode` doesn't break interoperability with the CH-client.
|
||||
if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_INTERSERVER_EXTERNALLY_GRANTED_ROLES)
|
||||
{
|
||||
readStringBinary(received_extra_roles, *in);
|
||||
}
|
||||
|
||||
/// Interserver secret.
|
||||
std::string received_hash;
|
||||
if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET)
|
||||
@ -2019,6 +2031,7 @@ void TCPHandler::processQuery(std::optional<QueryState> & state)
|
||||
data += state->query;
|
||||
data += state->query_id;
|
||||
data += client_info.initial_user;
|
||||
data += received_extra_roles;
|
||||
|
||||
std::string calculated_hash = encodeSHA256(data);
|
||||
assert(calculated_hash.size() == 32);
|
||||
@ -2039,13 +2052,25 @@ void TCPHandler::processQuery(std::optional<QueryState> & state)
|
||||
}
|
||||
else
|
||||
{
|
||||
// In a cluster, query originator may have an access to the external auth provider (like LDAP server),
|
||||
// that grants specific roles to the user. We want these roles to be granted to the user on other nodes of cluster when
|
||||
// query is executed.
|
||||
Strings external_roles;
|
||||
if (!received_extra_roles.empty())
|
||||
{
|
||||
ReadBufferFromString buffer(received_extra_roles);
|
||||
|
||||
readVectorBinary(external_roles, buffer);
|
||||
LOG_DEBUG(log, "Parsed extra roles [{}]", fmt::join(external_roles, ", "));
|
||||
}
|
||||
|
||||
LOG_DEBUG(log, "User (initial, interserver mode): {} (client: {})", client_info.initial_user, getClientAddress(client_info).toString());
|
||||
/// In case of inter-server mode authorization is done with the
|
||||
/// initial address of the client, not the real address from which
|
||||
/// the query was come, since the real address is the address of
|
||||
/// the initiator server, while we are interested in client's
|
||||
/// address.
|
||||
session->authenticate(AlwaysAllowCredentials{client_info.initial_user}, client_info.initial_address);
|
||||
session->authenticate(AlwaysAllowCredentials{client_info.initial_user}, client_info.initial_address, external_roles);
|
||||
}
|
||||
|
||||
is_interserver_authenticated = true;
|
||||
|
@ -545,6 +545,14 @@ SerializationPtr IMergeTreeDataPart::tryGetSerialization(const String & column_n
|
||||
return it == serializations.end() ? nullptr : it->second;
|
||||
}
|
||||
|
||||
bool IMergeTreeDataPart::isMovingPart() const
|
||||
{
|
||||
fs::path part_directory_path = getDataPartStorage().getRelativePath();
|
||||
if (part_directory_path.filename().empty())
|
||||
part_directory_path = part_directory_path.parent_path();
|
||||
return part_directory_path.parent_path().filename() == "moving";
|
||||
}
|
||||
|
||||
void IMergeTreeDataPart::removeIfNeeded() noexcept
|
||||
{
|
||||
assert(assertHasValidVersionMetadata());
|
||||
@ -569,10 +577,7 @@ void IMergeTreeDataPart::removeIfNeeded() noexcept
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "relative_path {} of part {} is invalid or not set",
|
||||
getDataPartStorage().getPartDirectory(), name);
|
||||
|
||||
fs::path part_directory_path = getDataPartStorage().getRelativePath();
|
||||
if (part_directory_path.filename().empty())
|
||||
part_directory_path = part_directory_path.parent_path();
|
||||
bool is_moving_part = part_directory_path.parent_path().filename() == "moving";
|
||||
bool is_moving_part = isMovingPart();
|
||||
if (!startsWith(file_name, "tmp") && !endsWith(file_name, ".tmp_proj") && !is_moving_part)
|
||||
{
|
||||
LOG_ERROR(
|
||||
|
@ -434,6 +434,9 @@ public:
|
||||
|
||||
bool isProjectionPart() const { return parent_part != nullptr; }
|
||||
|
||||
/// Check if the part is in the `/moving` directory
|
||||
bool isMovingPart() const;
|
||||
|
||||
const IMergeTreeDataPart * getParentPart() const { return parent_part; }
|
||||
String getParentPartName() const { return parent_part_name; }
|
||||
|
||||
|
@ -8258,33 +8258,49 @@ MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr &
|
||||
/// replica will actually move the part from disk to some
|
||||
/// zero-copy storage other replicas will just fetch
|
||||
/// metainformation.
|
||||
if (auto lock = tryCreateZeroCopyExclusiveLock(moving_part.part->name, disk); lock)
|
||||
{
|
||||
if (lock->isLocked())
|
||||
{
|
||||
cloned_part = parts_mover.clonePart(moving_part, read_settings, write_settings);
|
||||
parts_mover.swapClonedPart(cloned_part);
|
||||
break;
|
||||
}
|
||||
if (wait_for_move_if_zero_copy)
|
||||
{
|
||||
LOG_DEBUG(log, "Other replica is working on move of {}, will wait until lock disappear", moving_part.part->name);
|
||||
/// Wait and checks not only for timeout but also for shutdown and so on.
|
||||
while (!waitZeroCopyLockToDisappear(*lock, 3000))
|
||||
{
|
||||
LOG_DEBUG(log, "Waiting until some replica will move {} and zero copy lock disappear", moving_part.part->name);
|
||||
}
|
||||
}
|
||||
else
|
||||
break;
|
||||
}
|
||||
else
|
||||
auto lock = tryCreateZeroCopyExclusiveLock(moving_part.part->name, disk);
|
||||
if (!lock)
|
||||
{
|
||||
/// Move will be retried but with backoff.
|
||||
LOG_DEBUG(log, "Move of part {} postponed, because zero copy mode enabled and someone other moving this part right now", moving_part.part->name);
|
||||
LOG_DEBUG(
|
||||
log,
|
||||
"Move of part {} postponed, because zero copy mode enabled and zero-copy lock was not acquired",
|
||||
moving_part.part->name);
|
||||
result = MovePartsOutcome::MoveWasPostponedBecauseOfZeroCopy;
|
||||
break;
|
||||
}
|
||||
|
||||
if (lock->isLocked())
|
||||
{
|
||||
cloned_part = parts_mover.clonePart(moving_part, read_settings, write_settings);
|
||||
/// Cloning part can take a long time.
|
||||
/// Recheck if the lock (and keeper session expirity) is OK
|
||||
if (lock->isLocked())
|
||||
{
|
||||
parts_mover.swapClonedPart(cloned_part);
|
||||
break; /// Successfully moved
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_DEBUG(
|
||||
log,
|
||||
"Move of part {} postponed, because zero copy mode enabled and zero-copy lock was lost during cloning the part",
|
||||
moving_part.part->name);
|
||||
result = MovePartsOutcome::MoveWasPostponedBecauseOfZeroCopy;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (wait_for_move_if_zero_copy)
|
||||
{
|
||||
LOG_DEBUG(log, "Other replica is working on move of {}, will wait until lock disappear", moving_part.part->name);
|
||||
/// Wait and checks not only for timeout but also for shutdown and so on.
|
||||
while (!waitZeroCopyLockToDisappear(*lock, 3000))
|
||||
{
|
||||
LOG_DEBUG(log, "Waiting until some replica will move {} and zero copy lock disappear", moving_part.part->name);
|
||||
}
|
||||
}
|
||||
else
|
||||
break;
|
||||
}
|
||||
}
|
||||
else /// Ordinary move as it should be
|
||||
|
@ -259,6 +259,7 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me
|
||||
|
||||
disk->createDirectories(path_to_clone);
|
||||
|
||||
/// TODO: Make it possible to fetch only zero-copy part without fallback to fetching a full-copy one
|
||||
auto zero_copy_part = data->tryToFetchIfShared(*part, disk, fs::path(path_to_clone) / part->name);
|
||||
|
||||
if (zero_copy_part)
|
||||
@ -301,6 +302,28 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me
|
||||
return cloned_part;
|
||||
}
|
||||
|
||||
void MergeTreePartsMover::renameClonedPart(IMergeTreeDataPart & part) const
|
||||
try
|
||||
{
|
||||
part.is_temp = false;
|
||||
/// Mark it DeleteOnDestroy to ensure deleting in destructor
|
||||
/// if something goes wrong before swapping
|
||||
part.setState(MergeTreeDataPartState::DeleteOnDestroy);
|
||||
/// Don't remove new directory but throw an error because it may contain part which is currently in use.
|
||||
part.renameTo(part.name, /* remove_new_dir_if_exists */ false);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
/// Check if part was renamed or not
|
||||
/// `renameTo()` does not provide strong exception guarantee in case of an exception
|
||||
if (part.isMovingPart())
|
||||
{
|
||||
/// Restore its temporary state
|
||||
part.is_temp = true;
|
||||
part.setState(MergeTreeDataPartState::Temporary);
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
void MergeTreePartsMover::swapClonedPart(TemporaryClonedPart & cloned_part) const
|
||||
{
|
||||
@ -327,12 +350,23 @@ void MergeTreePartsMover::swapClonedPart(TemporaryClonedPart & cloned_part) cons
|
||||
return;
|
||||
}
|
||||
|
||||
cloned_part.part->is_temp = false;
|
||||
/// It is safe to acquire zero-copy lock for the temporary part here
|
||||
/// because no one can fetch it until it is *swapped*.
|
||||
///
|
||||
/// Set ASK_KEEPER to try to unlock it in destructor if something goes wrong before *renaming*
|
||||
/// If unlocking is failed we will not get a stuck part in moving directory
|
||||
/// because it will be renamed to delete_tmp_<name> beforehand and cleaned up later.
|
||||
/// Worst outcomes: trash in object storage and/or orphaned shared zero-copy lock. It is acceptable.
|
||||
/// See DataPartStorageOnDiskBase::remove().
|
||||
cloned_part.part->remove_tmp_policy = IMergeTreeDataPart::BlobsRemovalPolicyForTemporaryParts::ASK_KEEPER;
|
||||
data->lockSharedData(*cloned_part.part, /* replace_existing_lock = */ true);
|
||||
|
||||
/// Don't remove new directory but throw an error because it may contain part which is currently in use.
|
||||
cloned_part.part->renameTo(active_part->name, false);
|
||||
renameClonedPart(*cloned_part.part);
|
||||
|
||||
/// TODO what happen if server goes down here?
|
||||
/// If server goes down here we will get two copy of the part with the same name on different disks.
|
||||
/// And on the next ClickHouse startup during loading parts the first copy (in the order of defining disks
|
||||
/// in the storage policy) will be loaded as Active, the second one will be loaded as Outdated and removed as duplicate.
|
||||
/// See MergeTreeData::loadDataParts().
|
||||
data->swapActivePart(cloned_part.part, part_lock);
|
||||
|
||||
LOG_TRACE(log, "Part {} was moved to {}", cloned_part.part->name, cloned_part.part->getDataPartStorage().getFullPath());
|
||||
|
@ -75,6 +75,9 @@ public:
|
||||
/// merge or mutation.
|
||||
void swapClonedPart(TemporaryClonedPart & cloned_part) const;
|
||||
|
||||
/// Rename cloned part from `moving/` directory to the actual part storage
|
||||
void renameClonedPart(IMergeTreeDataPart & part) const;
|
||||
|
||||
/// Can stop background moves and moves from queries
|
||||
ActionBlocker moves_blocker;
|
||||
|
||||
|
@ -47,6 +47,7 @@ String StorageObjectStorage::getPathSample(StorageInMemoryMetadata metadata, Con
|
||||
auto query_settings = configuration->getQuerySettings(context);
|
||||
/// We don't want to throw an exception if there are no files with specified path.
|
||||
query_settings.throw_on_zero_files_match = false;
|
||||
query_settings.ignore_non_existent_file = true;
|
||||
|
||||
bool local_distributed_processing = distributed_processing;
|
||||
if (context->getSettingsRef()[Setting::use_hive_partitioning])
|
||||
@ -64,6 +65,9 @@ String StorageObjectStorage::getPathSample(StorageInMemoryMetadata metadata, Con
|
||||
{} // file_progress_callback
|
||||
);
|
||||
|
||||
if (!configuration->isArchive() && !configuration->isPathWithGlobs() && !local_distributed_processing)
|
||||
return configuration->getPath();
|
||||
|
||||
if (auto file = file_iterator->next(0))
|
||||
return file->getPath();
|
||||
return "";
|
||||
|
@ -216,6 +216,8 @@ namespace FailPoints
|
||||
extern const char replicated_queue_fail_next_entry[];
|
||||
extern const char replicated_queue_unfail_entries[];
|
||||
extern const char finish_set_quorum_failed_parts[];
|
||||
extern const char zero_copy_lock_zk_fail_before_op[];
|
||||
extern const char zero_copy_lock_zk_fail_after_op[];
|
||||
}
|
||||
|
||||
namespace ErrorCodes
|
||||
@ -10480,6 +10482,10 @@ void StorageReplicatedMergeTree::createZeroCopyLockNode(
|
||||
Coordination::Requests ops;
|
||||
Coordination::Responses responses;
|
||||
getZeroCopyLockNodeCreateOps(zookeeper, zookeeper_node, ops, mode, replace_existing_lock, path_to_set_hardlinked_files, hardlinked_files);
|
||||
|
||||
fiu_do_on(FailPoints::zero_copy_lock_zk_fail_before_op, { zookeeper->forceFailureBeforeOperation(); });
|
||||
fiu_do_on(FailPoints::zero_copy_lock_zk_fail_after_op, { zookeeper->forceFailureAfterOperation(); });
|
||||
|
||||
auto error = zookeeper->tryMulti(ops, responses);
|
||||
if (error == Coordination::Error::ZOK)
|
||||
{
|
||||
|
@ -46,7 +46,13 @@ class CI:
|
||||
JobNames.JEPSEN_KEEPER,
|
||||
JobNames.JEPSEN_SERVER,
|
||||
]
|
||||
)
|
||||
),
|
||||
WorkFlowNames.NIGHTLY: LabelConfig(
|
||||
run_jobs=[
|
||||
BuildNames.FUZZERS,
|
||||
JobNames.LIBFUZZER_TEST,
|
||||
]
|
||||
),
|
||||
} # type: Dict[str, LabelConfig]
|
||||
|
||||
TAG_CONFIGS = {
|
||||
|
@ -92,6 +92,7 @@ class WorkFlowNames(metaclass=WithIter):
|
||||
|
||||
JEPSEN = "JepsenWorkflow"
|
||||
CreateRelease = "CreateRelease"
|
||||
NIGHTLY = "NightlyBuilds"
|
||||
|
||||
|
||||
class BuildNames(metaclass=WithIter):
|
||||
@ -570,6 +571,7 @@ class CommonJobConfigs:
|
||||
"tests/ci/docker_server.py",
|
||||
"tests/ci/docker_images_helper.py",
|
||||
"./docker/server",
|
||||
"./docker/keeper",
|
||||
]
|
||||
),
|
||||
runner_type=Runners.STYLE_CHECKER,
|
||||
|
@ -1,11 +1,6 @@
|
||||
<clickhouse>
|
||||
<allow_experimental_cluster_discovery>1</allow_experimental_cluster_discovery>
|
||||
<remote_servers>
|
||||
<test_auto_cluster>
|
||||
<discovery>
|
||||
<path>/clickhouse/discovery/test_auto_cluster</path>
|
||||
</discovery>
|
||||
</test_auto_cluster>
|
||||
<two_shards>
|
||||
<!-- just to check that there's no conflict between automatic and manual clusters -->
|
||||
<shard>
|
||||
|
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<test_auto_cluster>
|
||||
<discovery>
|
||||
<path>/clickhouse/discovery/test_auto_cluster</path>
|
||||
</discovery>
|
||||
</test_auto_cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<test_auto_cluster>
|
||||
<discovery>
|
||||
<path>zookeeper2:/clickhouse/discovery/test_auto_cluster</path>
|
||||
</discovery>
|
||||
</test_auto_cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
@ -0,0 +1,24 @@
|
||||
<clickhouse>
|
||||
<zookeeper>
|
||||
<node index="1">
|
||||
<host>zoo1</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<node index="2">
|
||||
<host>zoo2</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
<node index="3">
|
||||
<host>zoo3</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
</zookeeper>
|
||||
<auxiliary_zookeepers>
|
||||
<zookeeper2>
|
||||
<node index="1">
|
||||
<host>zoo1</host>
|
||||
<port>2181</port>
|
||||
</node>
|
||||
</zookeeper2>
|
||||
</auxiliary_zookeepers>
|
||||
</clickhouse>
|
@ -3,7 +3,6 @@
|
||||
<remote_servers>
|
||||
<test_auto_cluster>
|
||||
<discovery>
|
||||
<path>/clickhouse/discovery/test_auto_cluster</path>
|
||||
<observer/>
|
||||
</discovery>
|
||||
</test_auto_cluster>
|
||||
|
@ -3,7 +3,6 @@
|
||||
<remote_servers>
|
||||
<test_auto_cluster>
|
||||
<discovery>
|
||||
<path>/clickhouse/discovery/test_auto_cluster</path>
|
||||
<shard>1</shard>
|
||||
</discovery>
|
||||
</test_auto_cluster>
|
||||
|
@ -3,7 +3,6 @@
|
||||
<remote_servers>
|
||||
<test_auto_cluster>
|
||||
<discovery>
|
||||
<path>/clickhouse/discovery/test_auto_cluster</path>
|
||||
<shard>3</shard>
|
||||
</discovery>
|
||||
</test_auto_cluster>
|
||||
|
@ -0,0 +1,6 @@
|
||||
<clickhouse>
|
||||
<macros>
|
||||
<shard>shard0</shard>
|
||||
<replica>replica0</replica>
|
||||
</macros>
|
||||
</clickhouse>
|
@ -0,0 +1,6 @@
|
||||
<clickhouse>
|
||||
<macros>
|
||||
<shard>shard1</shard>
|
||||
<replica>replica1</replica>
|
||||
</macros>
|
||||
</clickhouse>
|
@ -0,0 +1,6 @@
|
||||
<clickhouse>
|
||||
<macros>
|
||||
<shard>shard2</shard>
|
||||
<replica>replica2</replica>
|
||||
</macros>
|
||||
</clickhouse>
|
@ -0,0 +1,6 @@
|
||||
<clickhouse>
|
||||
<macros>
|
||||
<shard>shard3</shard>
|
||||
<replica>replica3</replica>
|
||||
</macros>
|
||||
</clickhouse>
|
@ -0,0 +1,6 @@
|
||||
<clickhouse>
|
||||
<macros>
|
||||
<shard>shard4</shard>
|
||||
<replica>replica4</replica>
|
||||
</macros>
|
||||
</clickhouse>
|
@ -0,0 +1,6 @@
|
||||
<clickhouse>
|
||||
<macros>
|
||||
<shard>shard_o</shard>
|
||||
<replica>replica_o</replica>
|
||||
</macros>
|
||||
</clickhouse>
|
@ -20,7 +20,7 @@ shard_configs = {
|
||||
nodes = {
|
||||
node_name: cluster.add_instance(
|
||||
node_name,
|
||||
main_configs=[shard_config],
|
||||
main_configs=[shard_config, "config/config_discovery_path.xml"],
|
||||
stay_alive=True,
|
||||
with_zookeeper=True,
|
||||
)
|
||||
@ -119,3 +119,6 @@ def test_cluster_discovery_startup_and_stop(start_cluster):
|
||||
check_nodes_count(
|
||||
[nodes["node1"], nodes["node2"]], 2, cluster_name="two_shards", retries=1
|
||||
)
|
||||
|
||||
# cleanup
|
||||
nodes["node0"].query("DROP TABLE tbl ON CLUSTER 'test_auto_cluster' SYNC")
|
||||
|
@ -0,0 +1,127 @@
|
||||
import functools
|
||||
|
||||
import pytest
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
|
||||
from .common import check_on_cluster
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
|
||||
shard_configs = {
|
||||
"node0": ["config/config.xml", "config/macros0.xml"],
|
||||
"node1": ["config/config_shard1.xml", "config/macros1.xml"],
|
||||
"node2": ["config/config.xml", "config/macros2.xml"],
|
||||
"node3": ["config/config_shard3.xml", "config/macros3.xml"],
|
||||
"node4": ["config/config.xml", "config/macros4.xml"],
|
||||
"node_observer": ["config/config_observer.xml", "config/macros_o.xml"],
|
||||
}
|
||||
|
||||
nodes = {
|
||||
node_name: cluster.add_instance(
|
||||
node_name,
|
||||
main_configs=shard_config
|
||||
+ [
|
||||
"config/config_discovery_path_auxiliary_keeper.xml",
|
||||
"config/config_keepers.xml",
|
||||
],
|
||||
stay_alive=True,
|
||||
with_zookeeper=True,
|
||||
)
|
||||
for node_name, shard_config in shard_configs.items()
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def start_cluster():
|
||||
try:
|
||||
cluster.start()
|
||||
yield cluster
|
||||
finally:
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def test_cluster_discovery_with_auxiliary_keeper_startup_and_stop(start_cluster):
|
||||
"""
|
||||
Start cluster, check nodes count in system.clusters,
|
||||
then stop/start some nodes and check that it (dis)appeared in cluster.
|
||||
"""
|
||||
|
||||
check_nodes_count = functools.partial(
|
||||
check_on_cluster, what="count()", msg="Wrong nodes count in cluster"
|
||||
)
|
||||
check_shard_num = functools.partial(
|
||||
check_on_cluster,
|
||||
what="count(DISTINCT shard_num)",
|
||||
msg="Wrong shard_num count in cluster",
|
||||
)
|
||||
|
||||
total_shards = 3
|
||||
total_nodes = 5
|
||||
|
||||
check_nodes_count(
|
||||
[nodes["node0"], nodes["node2"], nodes["node_observer"]], total_nodes
|
||||
)
|
||||
check_shard_num(
|
||||
[nodes["node0"], nodes["node2"], nodes["node_observer"]], total_shards
|
||||
)
|
||||
|
||||
# test ON CLUSTER query
|
||||
nodes["node0"].query(
|
||||
"CREATE TABLE tbl ON CLUSTER 'test_auto_cluster' (x UInt64) ENGINE = ReplicatedMergeTree('zookeeper2:/clickhouse/{shard}/tbl', '{replica}') ORDER BY x"
|
||||
)
|
||||
nodes["node0"].query("INSERT INTO tbl VALUES (1)")
|
||||
nodes["node1"].query("INSERT INTO tbl VALUES (2)")
|
||||
|
||||
assert (
|
||||
int(
|
||||
nodes["node_observer"]
|
||||
.query(
|
||||
"SELECT sum(x) FROM clusterAllReplicas(test_auto_cluster, default.tbl)"
|
||||
)
|
||||
.strip()
|
||||
)
|
||||
== 3
|
||||
)
|
||||
|
||||
# Query SYSTEM DROP DNS CACHE may reload cluster configuration
|
||||
# check that it does not affect cluster discovery
|
||||
nodes["node1"].query("SYSTEM DROP DNS CACHE")
|
||||
nodes["node0"].query("SYSTEM DROP DNS CACHE")
|
||||
|
||||
check_shard_num(
|
||||
[nodes["node0"], nodes["node2"], nodes["node_observer"]], total_shards
|
||||
)
|
||||
|
||||
nodes["node1"].stop_clickhouse(kill=True)
|
||||
check_nodes_count(
|
||||
[nodes["node0"], nodes["node2"], nodes["node_observer"]], total_nodes - 1
|
||||
)
|
||||
|
||||
# node1 was the only node in shard '1'
|
||||
check_shard_num(
|
||||
[nodes["node0"], nodes["node2"], nodes["node_observer"]], total_shards - 1
|
||||
)
|
||||
|
||||
nodes["node3"].stop_clickhouse()
|
||||
check_nodes_count(
|
||||
[nodes["node0"], nodes["node2"], nodes["node_observer"]], total_nodes - 2
|
||||
)
|
||||
|
||||
nodes["node1"].start_clickhouse()
|
||||
check_nodes_count(
|
||||
[nodes["node0"], nodes["node2"], nodes["node_observer"]], total_nodes - 1
|
||||
)
|
||||
|
||||
nodes["node3"].start_clickhouse()
|
||||
check_nodes_count(
|
||||
[nodes["node0"], nodes["node2"], nodes["node_observer"]], total_nodes
|
||||
)
|
||||
|
||||
# regular cluster is not affected
|
||||
check_nodes_count(
|
||||
[nodes["node1"], nodes["node2"]], 2, cluster_name="two_shards", retries=1
|
||||
)
|
||||
|
||||
# cleanup
|
||||
nodes["node0"].query("DROP TABLE tbl ON CLUSTER 'test_auto_cluster' SYNC")
|
@ -112,11 +112,12 @@ def test_usage(cluster, node_name):
|
||||
for i in range(3):
|
||||
node2.query(
|
||||
"""
|
||||
DROP TABLE IF EXISTS test{};
|
||||
CREATE TABLE test{} UUID '{}'
|
||||
(id Int32) ENGINE = MergeTree() ORDER BY id
|
||||
SETTINGS storage_policy = 'web';
|
||||
""".format(
|
||||
i, uuids[i]
|
||||
i, i, uuids[i]
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -0,0 +1,18 @@
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<test_ldap_cluster>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>instance1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>instance2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_ldap_cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
@ -9,8 +9,22 @@ LDAP_ADMIN_BIND_DN = "cn=admin,dc=example,dc=org"
|
||||
LDAP_ADMIN_PASSWORD = "clickhouse"
|
||||
|
||||
cluster = ClickHouseCluster(__file__)
|
||||
instance = cluster.add_instance(
|
||||
"instance", main_configs=["configs/ldap_with_role_mapping.xml"], with_ldap=True
|
||||
|
||||
instance1 = cluster.add_instance(
|
||||
"instance1",
|
||||
main_configs=["configs/ldap_with_role_mapping.xml", "configs/remote_servers.xml"],
|
||||
macros={"shard": 1, "replica": "instance1"},
|
||||
stay_alive=True,
|
||||
with_ldap=True,
|
||||
with_zookeeper=True,
|
||||
)
|
||||
|
||||
instance2 = cluster.add_instance(
|
||||
"instance2",
|
||||
main_configs=["configs/remote_servers.xml"],
|
||||
macros={"shard": 1, "replica": "instance2"},
|
||||
stay_alive=True,
|
||||
with_zookeeper=True,
|
||||
)
|
||||
|
||||
|
||||
@ -74,59 +88,98 @@ def delete_ldap_group(ldap_cluster, group_cn):
|
||||
|
||||
|
||||
def test_authentication_pass():
|
||||
assert instance.query(
|
||||
assert instance1.query(
|
||||
"SELECT currentUser()", user="janedoe", password="qwerty"
|
||||
) == TSV([["janedoe"]])
|
||||
|
||||
|
||||
def test_authentication_fail():
|
||||
# User doesn't exist.
|
||||
assert "doesnotexist: Authentication failed" in instance.query_and_get_error(
|
||||
assert "doesnotexist: Authentication failed" in instance1.query_and_get_error(
|
||||
"SELECT currentUser()", user="doesnotexist"
|
||||
)
|
||||
|
||||
# Wrong password.
|
||||
assert "janedoe: Authentication failed" in instance.query_and_get_error(
|
||||
assert "janedoe: Authentication failed" in instance1.query_and_get_error(
|
||||
"SELECT currentUser()", user="janedoe", password="123"
|
||||
)
|
||||
|
||||
|
||||
def test_role_mapping(ldap_cluster):
|
||||
instance.query("DROP ROLE IF EXISTS role_1")
|
||||
instance.query("DROP ROLE IF EXISTS role_2")
|
||||
instance.query("DROP ROLE IF EXISTS role_3")
|
||||
instance.query("CREATE ROLE role_1")
|
||||
instance.query("CREATE ROLE role_2")
|
||||
instance1.query("DROP ROLE IF EXISTS role_1")
|
||||
instance1.query("DROP ROLE IF EXISTS role_2")
|
||||
instance1.query("DROP ROLE IF EXISTS role_3")
|
||||
instance1.query("CREATE ROLE role_1")
|
||||
instance1.query("CREATE ROLE role_2")
|
||||
add_ldap_group(ldap_cluster, group_cn="clickhouse-role_1", member_cn="johndoe")
|
||||
add_ldap_group(ldap_cluster, group_cn="clickhouse-role_2", member_cn="johndoe")
|
||||
|
||||
assert instance.query(
|
||||
assert instance1.query(
|
||||
"select currentUser()", user="johndoe", password="qwertz"
|
||||
) == TSV([["johndoe"]])
|
||||
|
||||
assert instance.query(
|
||||
assert instance1.query(
|
||||
"select role_name from system.current_roles ORDER BY role_name",
|
||||
user="johndoe",
|
||||
password="qwertz",
|
||||
) == TSV([["role_1"], ["role_2"]])
|
||||
|
||||
instance.query("CREATE ROLE role_3")
|
||||
instance1.query("CREATE ROLE role_3")
|
||||
add_ldap_group(ldap_cluster, group_cn="clickhouse-role_3", member_cn="johndoe")
|
||||
# Check that non-existing role in ClickHouse is ignored during role update
|
||||
# See https://github.com/ClickHouse/ClickHouse/issues/54318
|
||||
add_ldap_group(ldap_cluster, group_cn="clickhouse-role_4", member_cn="johndoe")
|
||||
|
||||
assert instance.query(
|
||||
assert instance1.query(
|
||||
"select role_name from system.current_roles ORDER BY role_name",
|
||||
user="johndoe",
|
||||
password="qwertz",
|
||||
) == TSV([["role_1"], ["role_2"], ["role_3"]])
|
||||
|
||||
instance.query("DROP ROLE role_1")
|
||||
instance.query("DROP ROLE role_2")
|
||||
instance.query("DROP ROLE role_3")
|
||||
instance1.query("DROP ROLE role_1")
|
||||
instance1.query("DROP ROLE role_2")
|
||||
instance1.query("DROP ROLE role_3")
|
||||
|
||||
delete_ldap_group(ldap_cluster, group_cn="clickhouse-role_1")
|
||||
delete_ldap_group(ldap_cluster, group_cn="clickhouse-role_2")
|
||||
delete_ldap_group(ldap_cluster, group_cn="clickhouse-role_3")
|
||||
delete_ldap_group(ldap_cluster, group_cn="clickhouse-role_4")
|
||||
|
||||
|
||||
def test_push_role_to_other_nodes(ldap_cluster):
|
||||
instance1.query("DROP TABLE IF EXISTS distributed_table SYNC")
|
||||
instance1.query("DROP TABLE IF EXISTS local_table SYNC")
|
||||
instance2.query("DROP TABLE IF EXISTS local_table SYNC")
|
||||
instance1.query("DROP ROLE IF EXISTS role_read")
|
||||
|
||||
instance1.query("CREATE ROLE role_read")
|
||||
instance1.query("GRANT SELECT ON *.* TO role_read")
|
||||
|
||||
add_ldap_group(ldap_cluster, group_cn="clickhouse-role_read", member_cn="johndoe")
|
||||
|
||||
assert instance1.query(
|
||||
"select currentUser()", user="johndoe", password="qwertz"
|
||||
) == TSV([["johndoe"]])
|
||||
|
||||
instance1.query(
|
||||
"CREATE TABLE IF NOT EXISTS local_table (id UInt32) ENGINE = MergeTree() ORDER BY id"
|
||||
)
|
||||
instance2.query(
|
||||
"CREATE TABLE IF NOT EXISTS local_table (id UInt32) ENGINE = MergeTree() ORDER BY id"
|
||||
)
|
||||
instance2.query("INSERT INTO local_table VALUES (1), (2), (3)")
|
||||
instance1.query(
|
||||
"CREATE TABLE IF NOT EXISTS distributed_table AS local_table ENGINE = Distributed(test_ldap_cluster, default, local_table)"
|
||||
)
|
||||
|
||||
result = instance1.query(
|
||||
"SELECT sum(id) FROM distributed_table", user="johndoe", password="qwertz"
|
||||
)
|
||||
assert result.strip() == "6"
|
||||
|
||||
instance1.query("DROP TABLE IF EXISTS distributed_table SYNC")
|
||||
instance1.query("DROP TABLE IF EXISTS local_table SYNC")
|
||||
instance2.query("DROP TABLE IF EXISTS local_table SYNC")
|
||||
instance2.query("DROP ROLE IF EXISTS role_read")
|
||||
|
||||
delete_ldap_group(ldap_cluster, group_cn="clickhouse-role_read")
|
||||
|
@ -7,21 +7,21 @@
|
||||
<endpoint>http://minio1:9001/root/data/</endpoint>
|
||||
<access_key_id>minio</access_key_id>
|
||||
<secret_access_key>minio123</secret_access_key>
|
||||
<send_metadata>true</send_metadata>
|
||||
<send_metadata>false</send_metadata>
|
||||
</s31>
|
||||
<s31_again>
|
||||
<type>s3</type>
|
||||
<endpoint>http://minio1:9001/root/data/</endpoint>
|
||||
<access_key_id>minio</access_key_id>
|
||||
<secret_access_key>minio123</secret_access_key>
|
||||
<send_metadata>true</send_metadata>
|
||||
<send_metadata>false</send_metadata>
|
||||
</s31_again>
|
||||
<s32>
|
||||
<type>s3</type>
|
||||
<endpoint>http://minio1:9001/root/data2/</endpoint>
|
||||
<access_key_id>minio</access_key_id>
|
||||
<secret_access_key>minio123</secret_access_key>
|
||||
<send_metadata>true</send_metadata>
|
||||
<send_metadata>false</send_metadata>
|
||||
</s32>
|
||||
</disks>
|
||||
<policies>
|
||||
|
@ -1,10 +1,12 @@
|
||||
import datetime
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.network import PartitionManager
|
||||
|
||||
logging.getLogger().setLevel(logging.INFO)
|
||||
logging.getLogger().addHandler(logging.StreamHandler())
|
||||
@ -77,15 +79,19 @@ def wait_for_large_objects_count(cluster, expected, size=100, timeout=30):
|
||||
assert get_large_objects_count(cluster, size=size) == expected
|
||||
|
||||
|
||||
def wait_for_active_parts(node, num_expected_parts, table_name, timeout=30):
|
||||
def wait_for_active_parts(
|
||||
node, num_expected_parts, table_name, timeout=30, disk_name=None
|
||||
):
|
||||
deadline = time.monotonic() + timeout
|
||||
num_parts = 0
|
||||
while time.monotonic() < deadline:
|
||||
num_parts_str = node.query(
|
||||
"select count() from system.parts where table = '{}' and active".format(
|
||||
table_name
|
||||
)
|
||||
query = (
|
||||
f"select count() from system.parts where table = '{table_name}' and active"
|
||||
)
|
||||
if disk_name:
|
||||
query += f" and disk_name='{disk_name}'"
|
||||
|
||||
num_parts_str = node.query(query)
|
||||
num_parts = int(num_parts_str.strip())
|
||||
if num_parts == num_expected_parts:
|
||||
return
|
||||
@ -95,6 +101,22 @@ def wait_for_active_parts(node, num_expected_parts, table_name, timeout=30):
|
||||
assert num_parts == num_expected_parts
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def test_name(request):
|
||||
return request.node.name
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def test_table(test_name):
|
||||
normalized = (
|
||||
test_name.replace("[", "_")
|
||||
.replace("]", "_")
|
||||
.replace(" ", "_")
|
||||
.replace("-", "_")
|
||||
)
|
||||
return "table_" + normalized
|
||||
|
||||
|
||||
# Result of `get_large_objects_count` can be changed in other tests, so run this case at the beginning
|
||||
@pytest.mark.order(0)
|
||||
@pytest.mark.parametrize("policy", ["s3"])
|
||||
@ -668,3 +690,111 @@ def test_s3_zero_copy_keeps_data_after_mutation(started_cluster):
|
||||
time.sleep(10)
|
||||
|
||||
check_objects_not_exisis(cluster, objectsY)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"failpoint", ["zero_copy_lock_zk_fail_before_op", "zero_copy_lock_zk_fail_after_op"]
|
||||
)
|
||||
def test_move_shared_lock_fail_once(started_cluster, test_table, failpoint):
|
||||
node1 = cluster.instances["node1"]
|
||||
node2 = cluster.instances["node2"]
|
||||
|
||||
node1.query(
|
||||
f"""
|
||||
CREATE TABLE {test_table} ON CLUSTER test_cluster (num UInt64, date DateTime)
|
||||
ENGINE=ReplicatedMergeTree('/clickhouse/tables/{test_table}', '{{replica}}')
|
||||
ORDER BY date PARTITION BY date
|
||||
SETTINGS storage_policy='hybrid'
|
||||
"""
|
||||
)
|
||||
|
||||
date = "2024-10-23"
|
||||
|
||||
node2.query(f"SYSTEM STOP FETCHES {test_table}")
|
||||
node1.query(f"INSERT INTO {test_table} VALUES (1, '{date}')")
|
||||
|
||||
# Try to move and get fail on acquring zero-copy shared lock
|
||||
node1.query(f"SYSTEM ENABLE FAILPOINT {failpoint}")
|
||||
node1.query_and_get_error(
|
||||
f"ALTER TABLE {test_table} MOVE PARTITION '{date}' TO VOLUME 'external'"
|
||||
)
|
||||
|
||||
# After fail the part must remain on the source disk
|
||||
assert (
|
||||
node1.query(
|
||||
f"SELECT disk_name FROM system.parts WHERE table='{test_table}' GROUP BY disk_name"
|
||||
)
|
||||
== "default\n"
|
||||
)
|
||||
|
||||
# Try another attempt after zk connection is restored
|
||||
# It should not failed due to leftovers of previous attempt (temporary cloned files)
|
||||
node1.query(f"SYSTEM DISABLE FAILPOINT {failpoint}")
|
||||
node1.query(
|
||||
f"ALTER TABLE {test_table} MOVE PARTITION '{date}' TO VOLUME 'external'"
|
||||
)
|
||||
|
||||
assert (
|
||||
node1.query(
|
||||
f"SELECT disk_name FROM system.parts WHERE table='{test_table}' GROUP BY disk_name"
|
||||
)
|
||||
== "s31\n"
|
||||
)
|
||||
|
||||
# Sanity check
|
||||
node2.query(f"SYSTEM START FETCHES {test_table}")
|
||||
wait_for_active_parts(node2, 1, test_table, disk_name="s31")
|
||||
assert node2.query(f"SELECT sum(num) FROM {test_table}") == "1\n"
|
||||
|
||||
node1.query(f"DROP TABLE IF EXISTS {test_table} SYNC")
|
||||
node2.query(f"DROP TABLE IF EXISTS {test_table} SYNC")
|
||||
|
||||
|
||||
def test_move_shared_lock_fail_keeper_unavailable(started_cluster, test_table):
|
||||
node1 = cluster.instances["node1"]
|
||||
node2 = cluster.instances["node2"]
|
||||
|
||||
node1.query(
|
||||
f"""
|
||||
CREATE TABLE {test_table} ON CLUSTER test_cluster (num UInt64, date DateTime)
|
||||
ENGINE=ReplicatedMergeTree('/clickhouse/tables/{test_table}', '{{replica}}')
|
||||
ORDER BY date PARTITION BY date
|
||||
SETTINGS storage_policy='hybrid'
|
||||
"""
|
||||
)
|
||||
|
||||
date = "2024-10-23"
|
||||
node2.query(f"SYSTEM STOP FETCHES {test_table}")
|
||||
|
||||
node1.query(f"INSERT INTO {test_table} VALUES (1, '{date}')")
|
||||
# Pause moving after part cloning, but before swapping
|
||||
node1.query("SYSTEM ENABLE FAILPOINT stop_moving_part_before_swap_with_active")
|
||||
|
||||
def move(node):
|
||||
node.query_and_get_error(
|
||||
f"ALTER TABLE {test_table} MOVE PARTITION '{date}' TO VOLUME 'external'"
|
||||
)
|
||||
|
||||
# Start moving
|
||||
t1 = threading.Thread(target=move, args=[node1])
|
||||
t1.start()
|
||||
|
||||
with PartitionManager() as pm:
|
||||
pm.drop_instance_zk_connections(node1)
|
||||
# Continue moving and try to swap
|
||||
node1.query("SYSTEM DISABLE FAILPOINT stop_moving_part_before_swap_with_active")
|
||||
t1.join()
|
||||
|
||||
# Previous MOVE was failed, try another one after zk connection is restored
|
||||
# It should not failed due to leftovers of previous attempt (temporary cloned files)
|
||||
node1.query_with_retry(
|
||||
f"ALTER TABLE {test_table} MOVE PARTITION '{date}' TO VOLUME 'external'"
|
||||
)
|
||||
|
||||
# Sanity check
|
||||
node2.query(f"SYSTEM START FETCHES {test_table}")
|
||||
wait_for_active_parts(node2, 1, test_table, disk_name="s31")
|
||||
assert node2.query(f"SELECT sum(num) FROM {test_table}") == "1\n"
|
||||
|
||||
node1.query(f"DROP TABLE IF EXISTS {test_table} SYNC")
|
||||
node2.query(f"DROP TABLE IF EXISTS {test_table} SYNC")
|
||||
|
@ -43,3 +43,10 @@ ALTER TABLE a\\n (DROP COLUMN b),\\n (DROP COLUMN c)
|
||||
"""
|
||||
result = node.query(INPUT)
|
||||
assert result == EXPECTED_OUTPUT
|
||||
|
||||
|
||||
def test_move_partition_to_table_command():
|
||||
INPUT = "SELECT formatQuery('ALTER TABLE a MOVE PARTITION tuple() TO TABLE b')"
|
||||
EXPECTED_OUTPUT = "ALTER TABLE a\\n (MOVE PARTITION tuple() TO TABLE b)\n"
|
||||
result = node.query(INPUT)
|
||||
assert result == EXPECTED_OUTPUT
|
||||
|
6
tests/queries/0_stateless/03234_check_grant.reference
Normal file
6
tests/queries/0_stateless/03234_check_grant.reference
Normal file
@ -0,0 +1,6 @@
|
||||
1
|
||||
1
|
||||
0
|
||||
1
|
||||
1
|
||||
1
|
35
tests/queries/0_stateless/03234_check_grant.sh
Executable file
35
tests/queries/0_stateless/03234_check_grant.sh
Executable file
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env bash
|
||||
# Tags: no-parallel
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
db=${CLICKHOUSE_DATABASE}
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "DROP USER IF EXISTS user_03234; DROP TABLE IF EXISTS ${db}.tb;CREATE USER user_03234; GRANT SELECT ON ${db}.tb TO user_03234;"
|
||||
|
||||
# Has been granted but not table not exists
|
||||
# expected to 1
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=user_03234" --data-binary "CHECK GRANT SELECT ON ${db}.tb"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "CREATE TABLE ${db}.tb (\`content\` UInt64) ENGINE = MergeTree ORDER BY content; INSERT INTO ${db}.tb VALUES (1);"
|
||||
# Has been granted and table exists
|
||||
# expected to 1
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=user_03234" --data-binary "CHECK GRANT SELECT ON ${db}.tb"
|
||||
|
||||
${CLICKHOUSE_CLIENT} --query "REVOKE SELECT ON ${db}.tb FROM user_03234;"
|
||||
# Has not been granted but table exists
|
||||
# expected to 0
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=user_03234" --data-binary "CHECK GRANT SELECT ON ${db}.tb"
|
||||
|
||||
# Role
|
||||
# expected to 1
|
||||
${CLICKHOUSE_CLIENT} --query "DROP ROLE IF EXISTS role_03234;CREATE ROLE role_03234;GRANT SELECT ON ${db}.tb TO role_03234;GRANT role_03234 TO user_03234"
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=user_03234" --data-binary "SET ROLE role_03234"
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=user_03234" --data-binary "CHECK GRANT SELECT ON ${db}.tb"
|
||||
|
||||
# wildcard
|
||||
${CLICKHOUSE_CLIENT} --query "GRANT SELECT ON ${db}.tbb* TO user_03234;"
|
||||
# expected to 1
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=user_03234" --data-binary "CHECK GRANT SELECT ON ${db}.tbb1"
|
||||
# expected to 1
|
||||
${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&user=user_03234" --data-binary "CHECK GRANT SELECT ON ${db}.tbb2*"
|
@ -0,0 +1,11 @@
|
||||
SelectWithUnionQuery (children 1)
|
||||
ExpressionList (children 1)
|
||||
SelectQuery (children 2)
|
||||
ExpressionList (children 1)
|
||||
Asterisk
|
||||
TablesInSelectQuery (children 1)
|
||||
TablesInSelectQueryElement (children 1)
|
||||
TableExpression (children 1)
|
||||
Function numbers (children 1)
|
||||
ExpressionList (children 1)
|
||||
Literal UInt64_10
|
@ -0,0 +1,5 @@
|
||||
SELECT * FROM ( EXPLAIN AST SELECT * FROM numbers(10) );
|
||||
SELECT * FROM ( EXPLAIN AST CREATE TABLE test ENGINE=Memory ); -- {clientError BAD_ARGUMENTS}
|
||||
SELECT * FROM ( EXPLAIN AST CREATE MATERIALIZED VIEW mv (data String) AS SELECT data FROM table ); -- {clientError BAD_ARGUMENTS}
|
||||
SELECT * FROM ( EXPLAIN AST INSERT INTO TABLE test VALUES); -- {clientError BAD_ARGUMENTS}
|
||||
SELECT * FROM ( EXPLAIN AST ALTER TABLE test MODIFY COLUMN x UInt32 ); -- {clientError BAD_ARGUMENTS}
|
@ -13,8 +13,8 @@ fi
|
||||
ROOT_PATH="$(git rev-parse --show-toplevel)"
|
||||
LIBS_PATH="${ROOT_PATH}/contrib"
|
||||
|
||||
mapfile -t libs < <(echo "${ROOT_PATH}/base/poco"; find "${LIBS_PATH}" -maxdepth 1 -type d -not -name '*-cmake' -not -name 'rust_vendor' | LC_ALL=C sort)
|
||||
for LIB in "${libs[@]}"
|
||||
libs=$(echo "${ROOT_PATH}/base/poco"; (find "${LIBS_PATH}" -maxdepth 1 -type d -not -name '*-cmake' -not -name 'rust_vendor' | LC_ALL=C sort) )
|
||||
for LIB in ${libs}
|
||||
do
|
||||
LIB_NAME=$(basename "$LIB")
|
||||
|
||||
@ -97,4 +97,4 @@ do
|
||||
done
|
||||
|
||||
# Special care for Rust
|
||||
find "${LIBS_PATH}/rust_vendor/" -name 'Cargo.toml' | xargs grep 'license = ' | (grep -v -P 'MIT|Apache|MPL' && echo "Fatal error: unrecognized licenses in the Rust code" >&2 && exit 1 || true)
|
||||
find "${LIBS_PATH}/rust_vendor/" -name 'Cargo.toml' | xargs ${GREP_CMD} 'license = ' | (${GREP_CMD} -v -P 'MIT|Apache|MPL' && echo "Fatal error: unrecognized licenses in the Rust code" >&2 && exit 1 || true)
|
||||
|
Loading…
Reference in New Issue
Block a user