mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 10:02:01 +00:00
Merge branch 'master' of github.com:ClickHouse/ClickHouse into vxider-window-view
This commit is contained in:
commit
29978f9707
147
.github/workflows/main.yml
vendored
147
.github/workflows/main.yml
vendored
@ -186,11 +186,11 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
##########################################################################################
|
||||
##################################### ORDINARY BUILDS ####################################
|
||||
##########################################################################################
|
||||
#########################################################################################
|
||||
#################################### ORDINARY BUILDS ####################################
|
||||
#########################################################################################
|
||||
BuilderDebRelease:
|
||||
needs: DockerHubPush
|
||||
needs: [DockerHubPush, FastTest]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
@ -229,7 +229,7 @@ jobs:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderBinRelease:
|
||||
needs: DockerHubPush
|
||||
needs: [DockerHubPush, FastTest]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
@ -268,7 +268,7 @@ jobs:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebAsan:
|
||||
needs: DockerHubPush
|
||||
needs: [DockerHubPush, FastTest]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
@ -307,7 +307,7 @@ jobs:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebUBsan:
|
||||
needs: DockerHubPush
|
||||
needs: [DockerHubPush, FastTest]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
@ -346,7 +346,7 @@ jobs:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebTsan:
|
||||
needs: DockerHubPush
|
||||
needs: [DockerHubPush, FastTest]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
@ -385,7 +385,7 @@ jobs:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebMsan:
|
||||
needs: DockerHubPush
|
||||
needs: [DockerHubPush, FastTest]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
@ -424,7 +424,7 @@ jobs:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
BuilderDebDebug:
|
||||
needs: DockerHubPush
|
||||
needs: [DockerHubPush, FastTest]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
@ -466,7 +466,7 @@ jobs:
|
||||
##################################### SPECIAL BUILDS #####################################
|
||||
##########################################################################################
|
||||
BuilderDebSplitted:
|
||||
needs: DockerHubPush
|
||||
needs: [DockerHubPush, FastTest]
|
||||
if: ${{ !contains(github.event.pull_request.labels.*.name, 'pr-documentation') && !contains(github.event.pull_request.labels.*.name, 'pr-doc-fix') }}
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
@ -1210,38 +1210,96 @@ jobs:
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
# IntegrationTestsAsan:
|
||||
# needs: [BuilderDebAsan]
|
||||
# runs-on: [self-hosted, stress-tester]
|
||||
# steps:
|
||||
# - name: Download json reports
|
||||
# uses: actions/download-artifact@v2
|
||||
# with:
|
||||
# path: ${{runner.temp}}/reports_dir
|
||||
# - name: Check out repository code
|
||||
# uses: actions/checkout@v2
|
||||
# - name: Integration test
|
||||
# env:
|
||||
# TEMP_PATH: ${{runner.temp}}/integration_tests_asan
|
||||
# REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
# CHECK_NAME: 'Integration tests (asan, actions)'
|
||||
# REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
# run: |
|
||||
# sudo rm -fr $TEMP_PATH
|
||||
# mkdir -p $TEMP_PATH
|
||||
# cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
# cd $REPO_COPY/tests/ci
|
||||
# python3 integration_test_check.py "$CHECK_NAME"
|
||||
# - name: Cleanup
|
||||
# if: always()
|
||||
# run: |
|
||||
# docker kill $(docker ps -q) ||:
|
||||
# docker rm -f $(docker ps -a -q) ||:
|
||||
# sudo rm -fr $TEMP_PATH
|
||||
|
||||
##############################################################################################
|
||||
##################################### UNIT TESTS #############################################
|
||||
##############################################################################################
|
||||
#############################################################################################
|
||||
############################# INTEGRATION TESTS #############################################
|
||||
#############################################################################################
|
||||
IntegrationTestsAsan:
|
||||
needs: [BuilderDebAsan, FunctionalStatelessTestAsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/integration_tests_asan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Integration tests (asan, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/integration_tests_asan/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsTsan:
|
||||
needs: [BuilderDebTsan, FunctionalStatelessTestTsan]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/integration_tests_tsan
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Integration tests (thread, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/integration_tests_tsan/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
IntegrationTestsRelease:
|
||||
needs: [BuilderDebRelease, FunctionalStatelessTestRelease]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
steps:
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
path: ${{runner.temp}}/reports_dir
|
||||
- name: Check out repository code
|
||||
uses: actions/checkout@v2
|
||||
- name: Integration test
|
||||
env:
|
||||
TEMP_PATH: ${{runner.temp}}/integration_tests_release
|
||||
REPORTS_PATH: ${{runner.temp}}/reports_dir
|
||||
CHECK_NAME: 'Integration tests (release, actions)'
|
||||
REPO_COPY: ${{runner.temp}}/integration_tests_release/ClickHouse
|
||||
run: |
|
||||
sudo rm -fr $TEMP_PATH
|
||||
mkdir -p $TEMP_PATH
|
||||
cp -r $GITHUB_WORKSPACE $TEMP_PATH
|
||||
cd $REPO_COPY/tests/ci
|
||||
python3 integration_test_check.py "$CHECK_NAME"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker kill $(docker ps -q) ||:
|
||||
docker rm -f $(docker ps -a -q) ||:
|
||||
sudo rm -fr $TEMP_PATH
|
||||
#############################################################################################
|
||||
#################################### UNIT TESTS #############################################
|
||||
#############################################################################################
|
||||
UnitTestsAsan:
|
||||
needs: [BuilderDebAsan]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
@ -1412,6 +1470,9 @@ jobs:
|
||||
- ASTFuzzerTestTsan
|
||||
- ASTFuzzerTestMSan
|
||||
- ASTFuzzerTestUBSan
|
||||
- IntegrationTestsAsan
|
||||
- IntegrationTestsRelease
|
||||
- IntegrationTestsTsan
|
||||
- PVSCheck
|
||||
- UnitTestsAsan
|
||||
- UnitTestsTsan
|
||||
|
@ -17,7 +17,7 @@
|
||||
* Support `EXISTS (subquery)`. Closes [#6852](https://github.com/ClickHouse/ClickHouse/issues/6852). [#29731](https://github.com/ClickHouse/ClickHouse/pull/29731) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Session logging for audit. Logging all successful and failed login and logout events to a new `system.session_log` table. [#22415](https://github.com/ClickHouse/ClickHouse/pull/22415) ([Vasily Nemkov](https://github.com/Enmk)) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Support multidimensional cosine distance and euclidean distance functions; L1, L2, Lp, Linf distances and norms. Scalar product on tuples and various arithmetic operators on tuples. This fully closes [#4509](https://github.com/ClickHouse/ClickHouse/issues/4509) and even more. [#27933](https://github.com/ClickHouse/ClickHouse/pull/27933) ([Alexey Boykov](https://github.com/mathalex)).
|
||||
* Add support for compression and decompression for `INTO OUTPUT` and `FROM INFILE` (with autodetect or with additional optional parameter). [#27135](https://github.com/ClickHouse/ClickHouse/pull/27135) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Add support for compression and decompression for `INTO OUTFILE` and `FROM INFILE` (with autodetect or with additional optional parameter). [#27135](https://github.com/ClickHouse/ClickHouse/pull/27135) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Add CORS (Cross Origin Resource Sharing) support with HTTP `OPTIONS` request. It means, now Grafana will work with serverless requests without a kludges. Closes [#18693](https://github.com/ClickHouse/ClickHouse/issues/18693). [#29155](https://github.com/ClickHouse/ClickHouse/pull/29155) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Queries with JOIN ON now supports disjunctions (OR). [#21320](https://github.com/ClickHouse/ClickHouse/pull/21320) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Added function `tokens`. That allow to split string into tokens using non-alpha numeric ASCII characters as separators. [#29981](https://github.com/ClickHouse/ClickHouse/pull/29981) ([Maksim Kita](https://github.com/kitaisreal)). Added function `ngrams` to extract ngrams from text. Closes [#29699](https://github.com/ClickHouse/ClickHouse/issues/29699). [#29738](https://github.com/ClickHouse/ClickHouse/pull/29738) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
@ -25,6 +25,16 @@ void trim(String & s)
|
||||
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
|
||||
}
|
||||
|
||||
std::string getEditor()
|
||||
{
|
||||
const char * editor = std::getenv("EDITOR");
|
||||
|
||||
if (!editor || !*editor)
|
||||
editor = "vim";
|
||||
|
||||
return editor;
|
||||
}
|
||||
|
||||
/// Copied from replxx::src/util.cxx::now_ms_str() under the terms of 3-clause BSD license of Replxx.
|
||||
/// Copyright (c) 2017-2018, Marcin Konarski (amok at codestation.org)
|
||||
/// Copyright (c) 2010, Salvatore Sanfilippo (antirez at gmail dot com)
|
||||
@ -123,6 +133,7 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
Patterns delimiters_,
|
||||
replxx::Replxx::highlighter_callback_t highlighter_)
|
||||
: LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_)), highlighter(std::move(highlighter_))
|
||||
, editor(getEditor())
|
||||
{
|
||||
using namespace std::placeholders;
|
||||
using Replxx = replxx::Replxx;
|
||||
@ -236,14 +247,13 @@ void ReplxxLineReader::addToHistory(const String & line)
|
||||
rx.print("Unlock of history file failed: %s\n", errnoToString(errno).c_str());
|
||||
}
|
||||
|
||||
int ReplxxLineReader::execute(const std::string & command)
|
||||
/// See comments in ShellCommand::executeImpl()
|
||||
/// (for the vfork via dlsym())
|
||||
int ReplxxLineReader::executeEditor(const std::string & path)
|
||||
{
|
||||
std::vector<char> argv0("sh", &("sh"[3]));
|
||||
std::vector<char> argv1("-c", &("-c"[3]));
|
||||
std::vector<char> argv2(command.data(), command.data() + command.size() + 1);
|
||||
|
||||
const char * filename = "/bin/sh";
|
||||
char * const argv[] = {argv0.data(), argv1.data(), argv2.data(), nullptr};
|
||||
std::vector<char> argv0(editor.data(), editor.data() + editor.size() + 1);
|
||||
std::vector<char> argv1(path.data(), path.data() + path.size() + 1);
|
||||
char * const argv[] = {argv0.data(), argv1.data(), nullptr};
|
||||
|
||||
static void * real_vfork = dlsym(RTLD_DEFAULT, "vfork");
|
||||
if (!real_vfork)
|
||||
@ -260,6 +270,7 @@ int ReplxxLineReader::execute(const std::string & command)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/// Child
|
||||
if (0 == pid)
|
||||
{
|
||||
sigset_t mask;
|
||||
@ -267,16 +278,26 @@ int ReplxxLineReader::execute(const std::string & command)
|
||||
sigprocmask(0, nullptr, &mask);
|
||||
sigprocmask(SIG_UNBLOCK, &mask, nullptr);
|
||||
|
||||
execv(filename, argv);
|
||||
execvp(editor.c_str(), argv);
|
||||
rx.print("Cannot execute %s: %s\n", editor.c_str(), errnoToString(errno).c_str());
|
||||
_exit(-1);
|
||||
}
|
||||
|
||||
int status = 0;
|
||||
if (-1 == waitpid(pid, &status, 0))
|
||||
do
|
||||
{
|
||||
rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str());
|
||||
return -1;
|
||||
}
|
||||
int exited_pid = waitpid(pid, &status, 0);
|
||||
if (exited_pid == -1)
|
||||
{
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
|
||||
rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str());
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
break;
|
||||
} while (true);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -290,10 +311,6 @@ void ReplxxLineReader::openEditor()
|
||||
return;
|
||||
}
|
||||
|
||||
const char * editor = std::getenv("EDITOR");
|
||||
if (!editor || !*editor)
|
||||
editor = "vim";
|
||||
|
||||
replxx::Replxx::State state(rx.get_state());
|
||||
|
||||
size_t bytes_written = 0;
|
||||
@ -316,7 +333,7 @@ void ReplxxLineReader::openEditor()
|
||||
return;
|
||||
}
|
||||
|
||||
if (0 == execute(fmt::format("{} {}", editor, filename)))
|
||||
if (0 == executeEditor(filename))
|
||||
{
|
||||
try
|
||||
{
|
||||
|
@ -22,7 +22,7 @@ public:
|
||||
private:
|
||||
InputStatus readOneLine(const String & prompt) override;
|
||||
void addToHistory(const String & line) override;
|
||||
int execute(const std::string & command);
|
||||
int executeEditor(const std::string & path);
|
||||
void openEditor();
|
||||
|
||||
replxx::Replxx rx;
|
||||
@ -31,4 +31,6 @@ private:
|
||||
// used to call flock() to synchronize multiple clients using same history file
|
||||
int history_file_fd = -1;
|
||||
bool bracketed_paste_enabled = false;
|
||||
|
||||
std::string editor;
|
||||
};
|
||||
|
@ -63,6 +63,9 @@
|
||||
#include <Common/Elf.h>
|
||||
#include <filesystem>
|
||||
|
||||
#include <loggers/OwnFormattingChannel.h>
|
||||
#include <loggers/OwnPatternFormatter.h>
|
||||
|
||||
#include <Common/config_version.h>
|
||||
|
||||
#if defined(OS_DARWIN)
|
||||
@ -1001,6 +1004,14 @@ void BaseDaemon::setupWatchdog()
|
||||
memcpy(argv0, new_process_name, std::min(strlen(new_process_name), original_process_name.size()));
|
||||
}
|
||||
|
||||
/// If streaming compression of logs is used then we write watchdog logs to cerr
|
||||
if (config().getRawString("logger.stream_compress", "false") == "true")
|
||||
{
|
||||
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter;
|
||||
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel(std::cerr));
|
||||
logger().setChannel(log);
|
||||
}
|
||||
|
||||
logger().information(fmt::format("Will watch for the process with pid {}", pid));
|
||||
|
||||
/// Forward signals to the child process.
|
||||
|
@ -62,7 +62,13 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
if (!log_path.empty())
|
||||
{
|
||||
createDirectory(log_path);
|
||||
std::cerr << "Logging " << log_level_string << " to " << log_path << std::endl;
|
||||
|
||||
std::string ext;
|
||||
if (config.getRawString("logger.stream_compress", "false") == "true")
|
||||
ext = ".lz4";
|
||||
|
||||
std::cerr << "Logging " << log_level_string << " to " << log_path << ext << std::endl;
|
||||
|
||||
auto log_level = Poco::Logger::parseLevel(log_level_string);
|
||||
if (log_level > max_log_level)
|
||||
{
|
||||
@ -75,6 +81,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number");
|
||||
log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_STREAMCOMPRESS, config.getRawString("logger.stream_compress", "false"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_PURGECOUNT, config.getRawString("logger.count", "1"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_FLUSH, config.getRawString("logger.flush", "true"));
|
||||
log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false"));
|
||||
@ -100,13 +107,18 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
|
||||
max_log_level = errorlog_level;
|
||||
}
|
||||
|
||||
std::cerr << "Logging errors to " << errorlog_path << std::endl;
|
||||
std::string ext;
|
||||
if (config.getRawString("logger.stream_compress", "false") == "true")
|
||||
ext = ".lz4";
|
||||
|
||||
std::cerr << "Logging errors to " << errorlog_path << ext << std::endl;
|
||||
|
||||
error_log_file = new Poco::FileChannel;
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_PATH, fs::weakly_canonical(errorlog_path));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number");
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_STREAMCOMPRESS, config.getRawString("logger.stream_compress", "false"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_PURGECOUNT, config.getRawString("logger.count", "1"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_FLUSH, config.getRawString("logger.flush", "true"));
|
||||
error_log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false"));
|
||||
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit 173fb31717837d366152c508619b09dcf11786da
|
||||
Subproject commit 258b9ba6cd245ff88e9346f75c43464c403f329d
|
@ -51,6 +51,7 @@ if (USE_INTERNAL_POCO_LIBRARY)
|
||||
"${LIBRARY_DIR}/Foundation/src/Channel.cpp"
|
||||
"${LIBRARY_DIR}/Foundation/src/Checksum.cpp"
|
||||
"${LIBRARY_DIR}/Foundation/src/Clock.cpp"
|
||||
"${LIBRARY_DIR}/Foundation/src/CompressedLogFile.cpp"
|
||||
"${LIBRARY_DIR}/Foundation/src/Condition.cpp"
|
||||
"${LIBRARY_DIR}/Foundation/src/Configurable.cpp"
|
||||
"${LIBRARY_DIR}/Foundation/src/ConsoleChannel.cpp"
|
||||
@ -222,7 +223,7 @@ if (USE_INTERNAL_POCO_LIBRARY)
|
||||
POCO_OS_FAMILY_UNIX
|
||||
)
|
||||
target_include_directories (_poco_foundation SYSTEM PUBLIC "${LIBRARY_DIR}/Foundation/include")
|
||||
target_link_libraries (_poco_foundation PRIVATE Poco::Foundation::PCRE ${ZLIB_LIBRARIES})
|
||||
target_link_libraries (_poco_foundation PRIVATE Poco::Foundation::PCRE ${ZLIB_LIBRARIES} lz4)
|
||||
else ()
|
||||
add_library (Poco::Foundation UNKNOWN IMPORTED GLOBAL)
|
||||
|
||||
|
@ -19,6 +19,7 @@ RUN apt-get update \
|
||||
sqlite3 \
|
||||
curl \
|
||||
tar \
|
||||
lz4 \
|
||||
krb5-user \
|
||||
iproute2 \
|
||||
lsof \
|
||||
|
@ -37,6 +37,12 @@ function configure()
|
||||
# install test configs
|
||||
/usr/share/clickhouse-test/config/install.sh
|
||||
|
||||
# avoid too slow startup
|
||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml | sed "s|<snapshot_distance>100000</snapshot_distance>|<snapshot_distance>10000</snapshot_distance>|" > /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
|
||||
# for clickhouse-server (via service)
|
||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||
# for clickhouse-client
|
||||
|
@ -241,7 +241,7 @@ Adding third-party libraries: https://clickhouse.com/docs/en/development/contrib
|
||||
|
||||
Writing tests: https://clickhouse.com/docs/en/development/tests/
|
||||
|
||||
List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3A%22easy+task%22
|
||||
List of tasks: https://github.com/ClickHouse/ClickHouse/issues?q=is%3Aopen+is%3Aissue+label%3Ahacktoberfest
|
||||
|
||||
## Test Data {#test-data}
|
||||
|
||||
|
@ -11,7 +11,8 @@ This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ec
|
||||
|
||||
``` sql
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||
ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||
[SETTINGS ...]
|
||||
```
|
||||
|
||||
**Engine parameters**
|
||||
@ -23,21 +24,13 @@ ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compressi
|
||||
|
||||
**Example**
|
||||
|
||||
1. Set up the `s3_engine_table` table:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32) ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip');
|
||||
```
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip')
|
||||
SETTINGS input_format_with_names_use_header = 0;
|
||||
|
||||
2. Fill file:
|
||||
|
||||
``` sql
|
||||
INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3);
|
||||
```
|
||||
|
||||
3. Query the data:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM s3_engine_table LIMIT 2;
|
||||
```
|
||||
|
||||
@ -73,57 +66,54 @@ For more information about virtual columns see [here](../../../engines/table-eng
|
||||
|
||||
Constructions with `{}` are similar to the [remote](../../../sql-reference/table-functions/remote.md) table function.
|
||||
|
||||
**Example**
|
||||
!!! warning "Warning"
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
|
||||
1. Suppose we have several files in CSV format with the following URIs on S3:
|
||||
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv'
|
||||
|
||||
There are several ways to make a table consisting of all six files:
|
||||
|
||||
The first way:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_range (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV');
|
||||
```
|
||||
|
||||
Another way:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_question_mark (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV');
|
||||
```
|
||||
|
||||
Table consists of all the files in both directories (all files should satisfy format and schema described in query):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_asterisk (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV');
|
||||
```
|
||||
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
|
||||
**Example**
|
||||
**Example with wildcards 1**
|
||||
|
||||
Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV');
|
||||
CREATE TABLE big_table (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/my_folder/file-{000..999}.csv', 'CSV');
|
||||
```
|
||||
|
||||
## Virtual Columns {#virtual-columns}
|
||||
**Example with wildcards 2**
|
||||
|
||||
- `_path` — Path to the file.
|
||||
- `_file` — Name of the file.
|
||||
Suppose we have several files in CSV format with the following URIs on S3:
|
||||
|
||||
**See Also**
|
||||
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_3.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_3.csv'
|
||||
|
||||
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||
|
||||
## S3-related settings {#settings}
|
||||
There are several ways to make a table consisting of all six files:
|
||||
|
||||
1. Specify the range of file postfixes:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_range (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_{1..3}', 'CSV');
|
||||
```
|
||||
|
||||
2. Take all files with `some_file_` prefix (there should be no extra files with such prefix in both folders):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_question_mark (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_?', 'CSV');
|
||||
```
|
||||
|
||||
3. Take all the files in both folders (all files should satisfy format and schema described in query):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_asterisk (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/*', 'CSV');
|
||||
```
|
||||
|
||||
## S3-related Settings {#settings}
|
||||
|
||||
The following settings can be set before query execution or placed into configuration file.
|
||||
|
||||
@ -165,49 +155,6 @@ The following settings can be specified in configuration file for given endpoint
|
||||
</s3>
|
||||
```
|
||||
|
||||
## Usage {#usage-examples}
|
||||
|
||||
Suppose we have several files in CSV format with the following URIs on S3:
|
||||
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv'
|
||||
|
||||
|
||||
1. There are several ways to make a table consisting of all six files:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_range (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV');
|
||||
```
|
||||
|
||||
2. Another way:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_question_mark (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV');
|
||||
```
|
||||
|
||||
3. Table consists of all the files in both directories (all files should satisfy format and schema described in query):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_asterisk (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV');
|
||||
```
|
||||
|
||||
!!! warning "Warning"
|
||||
If the listing of files contains number ranges with leading zeros, use the construction with braces for each digit separately or use `?`.
|
||||
|
||||
4. Create table with files named `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE big_table (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV');
|
||||
```
|
||||
|
||||
## See also
|
||||
|
||||
- [s3 table function](../../../sql-reference/table-functions/s3.md)
|
||||
|
@ -381,8 +381,11 @@ We ran queries using a client located in a Yandex datacenter in Finland on a clu
|
||||
|
||||
| servers | Q1 | Q2 | Q3 | Q4 |
|
||||
|---------|-------|-------|-------|-------|
|
||||
| 1 | 0.490 | 1.224 | 2.104 | 3.593 |
|
||||
| 3 | 0.212 | 0.438 | 0.733 | 1.241 |
|
||||
| 140 | 0.028 | 0.043 | 0.051 | 0.072 |
|
||||
| 1, E5-2650v2 | 0.490 | 1.224 | 2.104 | 3.593 |
|
||||
| 3, E5-2650v2 | 0.212 | 0.438 | 0.733 | 1.241 |
|
||||
| 1, AWS c5n.4xlarge | 0.249 | 1.279 | 1.738 | 3.527 |
|
||||
| 1, AWS c5n.9xlarge | 0.130 | 0.584 | 0.777 | 1.811 |
|
||||
| 3, AWS c5n.9xlarge | 0.057 | 0.231 | 0.285 | 0.641 |
|
||||
| 140, E5-2650v2 | 0.028 | 0.043 | 0.051 | 0.072 |
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/getting_started/example_datasets/nyc_taxi/) <!--hide-->
|
||||
|
@ -177,6 +177,9 @@ This format is also available under the name `TSVRaw`.
|
||||
## TabSeparatedWithNames {#tabseparatedwithnames}
|
||||
|
||||
Differs from the `TabSeparated` format in that the column names are written in the first row.
|
||||
|
||||
During parsing, the first row is expected to contain the column names. You can use column names to determine their position and to check their correctness.
|
||||
|
||||
If setting [input_format_with_names_use_header](../operations/settings/settings.md#settings-input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](../operations/settings/settings.md#settings-input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
|
@ -36,6 +36,7 @@ toc_title: Client Libraries
|
||||
- [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse)
|
||||
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
||||
- [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse)
|
||||
- [clickhouse-client](https://github.com/depyronick/clickhouse-client)
|
||||
- Perl
|
||||
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
|
||||
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
|
||||
|
@ -79,7 +79,7 @@ toc_title: Adopters
|
||||
| <a href="https://www.ivi.ru/" class="favicon">Ivi</a> | Online Cinema | Analytics, Monitoring | — | — | [Article in Russian, Jan 2018](https://habr.com/en/company/ivi/blog/347408/) |
|
||||
| <a href="https://jinshuju.net" class="favicon">Jinshuju 金数据</a> | BI Analytics | Main product | — | — | [Slides in Chinese, October 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/3.%20金数据数据架构调整方案Public.pdf) |
|
||||
| <a href="https://jitsu.com" class="favicon">Jitsu</a> | Cloud Software | Data Pipeline | — | — | [Documentation](https://jitsu.com/docs/destinations-configuration/clickhouse-destination), [Hacker News](https://news.ycombinator.com/item?id=29106082) |
|
||||
| <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020 conference](https://if.kakao.com/session/117) |
|
||||
| <a href="https://www.kakaocorp.com/" class="favicon">kakaocorp</a> | Internet company | — | — | — | [if(kakao)2020](https://tv.kakao.com/channel/3693125/cliplink/414129353), [if(kakao)2021](https://if.kakao.com/session/24) |
|
||||
| <a href="https://www.kodiakdata.com/" class="favicon">Kodiak Data</a> | Clouds | Main product | — | — | [Slides in Engish, April 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup13/kodiak_data.pdf) |
|
||||
| <a href="https://kontur.ru" class="favicon">Kontur</a> | Software Development | Metrics | — | — | [Talk in Russian, November 2018](https://www.youtube.com/watch?v=U4u4Bd0FtrY) |
|
||||
| <a href="https://www.kuaishou.com/" class="favicon">Kuaishou</a> | Video | — | — | — | [ClickHouse Meetup, October 2018](https://clickhouse.com/blog/en/2018/clickhouse-community-meetup-in-beijing-on-october-28-2018/) |
|
||||
@ -108,6 +108,7 @@ toc_title: Adopters
|
||||
| <a href="https://panelbear.com/" class="favicon">Panelbear | Analytics | Monitoring and Analytics | — | — | [Tech Stack, November 2020](https://panelbear.com/blog/tech-stack/) |
|
||||
| <a href="https://www.percent.cn/" class="favicon">Percent 百分点</a> | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) |
|
||||
| <a href="https://www.percona.com/" class="favicon">Percona</a> | Performance analysis | Percona Monitoring and Management | — | — | [Official website, Mar 2020](https://www.percona.com/blog/2020/03/30/advanced-query-analysis-in-percona-monitoring-and-management-with-direct-clickhouse-access/) |
|
||||
| <a href="https://piwik.pro/" class="favicon">Piwik PRO</a> | Web Analytics | Main Product | — | — | [Official website, Dec 2018](https://piwik.pro/blog/piwik-pro-clickhouse-faster-efficient-reports/) |
|
||||
| <a href="https://plausible.io/" class="favicon">Plausible</a> | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) |
|
||||
| <a href="https://posthog.com/" class="favicon">PostHog</a> | Product Analytics | Main Product | — | — | [Release Notes, Oct 2020](https://posthog.com/blog/the-posthog-array-1-15-0) |
|
||||
| <a href="https://postmates.com/" class="favicon">Postmates</a> | Delivery | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=188) |
|
||||
@ -175,5 +176,6 @@ toc_title: Adopters
|
||||
| <a href="https://domclick.ru/" class="favicon">ДомКлик</a> | Real Estate | — | — | — | [Article in Russian, October 2021](https://habr.com/ru/company/domclick/blog/585936/) |
|
||||
| <a href="https://www.deepl.com/" class="favicon">Deepl</a> | Machine Learning | — | — | — | [Video, October 2021](https://www.youtube.com/watch?v=WIYJiPwxXdM&t=1182s) |
|
||||
| <a href="https://vercel.com/" class="favicon">Vercel</a> | Traffic and Performance Analytics | — | — | — | Direct reference, October 2021 |
|
||||
| <a href="https://www.your-analytics.org/" class="favicon">YourAnalytics</a> | Web Analytics | — | — | — | [Tweet, Nov 2021](https://twitter.com/mikenikles/status/1460860140249235461) |
|
||||
|
||||
[Original article](https://clickhouse.com/docs/en/introduction/adopters/) <!--hide-->
|
||||
|
@ -107,7 +107,7 @@ Loading key from the environment variable:
|
||||
```xml
|
||||
<encryption_codecs>
|
||||
<aes_128_gcm_siv>
|
||||
<key_hex from_env="KEY"></key_hex>
|
||||
<key_hex from_env="ENVVAR"></key_hex>
|
||||
</aes_128_gcm_siv>
|
||||
</encryption_codecs>
|
||||
```
|
||||
@ -120,7 +120,7 @@ Each of these methods can be applied for multiple keys:
|
||||
<encryption_codecs>
|
||||
<aes_128_gcm_siv>
|
||||
<key_hex id="0">00112233445566778899aabbccddeeff</key_hex>
|
||||
<key_hex id="1" from_env=".."></key_hex>
|
||||
<key_hex id="1" from_env="ENVVAR"></key_hex>
|
||||
<current_key_id>1</current_key_id>
|
||||
</aes_128_gcm_siv>
|
||||
</encryption_codecs>
|
||||
|
@ -746,7 +746,7 @@ Setting fields:
|
||||
!!! info "Note"
|
||||
The `column_family` or `where` fields cannot be used together with the `query` field. And either one of the `column_family` or `query` fields must be declared.
|
||||
|
||||
### PosgreSQL {#dicts-external_dicts_dict_sources-postgresql}
|
||||
### PostgreSQL {#dicts-external_dicts_dict_sources-postgresql}
|
||||
|
||||
Example of settings:
|
||||
|
||||
|
@ -320,7 +320,7 @@ Can be optimized by enabling the [optimize_functions_to_subcolumns](../../operat
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapKeys(map)
|
||||
mapValues(map)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
@ -252,21 +252,47 @@ CREATE TABLE codec_example
|
||||
ENGINE = MergeTree()
|
||||
```
|
||||
|
||||
<!--
|
||||
|
||||
### Encryption Codecs {#create-query-encryption-codecs}
|
||||
|
||||
These codecs don't actually compress data, but instead encrypt data on disk. These are only available when an encryption key is specified by [encryption](../../../operations/server-configuration-parameters/settings.md#server-settings-encryption) settings. Note that encryption only makes sense at the end of codec pipelines, because encrypted data usually can't be compressed in any meaningful way.
|
||||
|
||||
Encryption codecs:
|
||||
|
||||
- `Encrypted('AES-128-GCM-SIV')` — Encrypts data with AES-128 in [RFC 8452](https://tools.ietf.org/html/rfc8452) GCM-SIV mode. This codec uses a fixed nonce and encryption is therefore deterministic. This makes it compatible with deduplicating engines such as [ReplicatedMergeTree](../../../engines/table-engines/mergetree-family/replication.md) but has a weakness: when the same data block is encrypted twice, the resulting ciphertext will be exactly the same so an adversary who can read the disk can see this equivalence (although only the equivalence).
|
||||
- `CODEC('AES-128-GCM-SIV')` — Encrypts data with AES-128 in [RFC 8452](https://tools.ietf.org/html/rfc8452) GCM-SIV mode.
|
||||
- `CODEC('AES-256-GCM-SIV')` — Encrypts data with AES-256 in GCM-SIV mode.
|
||||
|
||||
These codecs use a fixed nonce and encryption is therefore deterministic. This makes it compatible with deduplicating engines such as [ReplicatedMergeTree](../../../engines/table-engines/mergetree-family/replication.md) but has a weakness: when the same data block is encrypted twice, the resulting ciphertext will be exactly the same so an adversary who can read the disk can see this equivalence (although only the equivalence, without getting its content).
|
||||
|
||||
!!! attention "Attention"
|
||||
Most engines including the "*MergeTree" family create index files on disk without applying codecs. This means plaintext will appear on disk if an encrypted column is indexed.
|
||||
|
||||
!!! attention "Attention"
|
||||
If you perform a SELECT query mentioning a specific value in an encrypted column (such as in its WHERE clause), the value may appear in [system.query_log](../../../operations/system-tables/query_log.md). You may want to disable the logging.
|
||||
-->
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
CREATE TABLE mytable
|
||||
(
|
||||
x String Codec(AES_128_GCM_SIV)
|
||||
)
|
||||
ENGINE = MergeTree ORDER BY x;
|
||||
```
|
||||
|
||||
!!!note "Note"
|
||||
If compression needs to be applied, it must be explicitly specified. Otherwise, only encryption will be applied to data.
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
CREATE TABLE mytable
|
||||
(
|
||||
x String Codec(Delta, LZ4, AES_128_GCM_SIV)
|
||||
)
|
||||
ENGINE = MergeTree ORDER BY x;
|
||||
```
|
||||
|
||||
## Temporary Tables {#temporary-tables}
|
||||
|
||||
ClickHouse supports temporary tables which have the following characteristics:
|
||||
|
@ -13,6 +13,8 @@ SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY|VIEW] [db.]table|view [INTO OUTFILE fi
|
||||
|
||||
Returns a single `String`-type ‘statement’ column, which contains a single value – the `CREATE` query used for creating the specified object.
|
||||
|
||||
Note that if you use this statement to get `CREATE` query of system tables, you will get a *fake* query, which only declares table structure, but cannot be used to create table.
|
||||
|
||||
## SHOW DATABASES {#show-databases}
|
||||
|
||||
Prints a list of all databases.
|
||||
|
@ -34,7 +34,9 @@ DDL-запросы с базой данных `Replicated` работают по
|
||||
|
||||
В системной таблице [system.clusters](../../operations/system-tables/clusters.md) есть кластер с именем, как у реплицируемой базы, который состоит из всех реплик базы. Этот кластер обновляется автоматически при создании/удалении реплик, и его можно использовать для [Distributed](../../engines/table-engines/special/distributed.md#distributed) таблиц.
|
||||
|
||||
При создании новой реплики базы, эта реплика сама создаёт таблицы. Если реплика долго была недоступна и отстала от лога репликации — она сверяет свои локальные метаданные с актуальными метаданными в ZooKeeper, перекладывает лишние таблицы с данными в отдельную нереплицируемую базу (чтобы случайно не удалить что-нибудь лишнее), создаёт недостающие таблицы, обновляет имена таблиц, если были переименования. Данные реплицируются на уровне `ReplicatedMergeTree`, т.е. если таблица не реплицируемая, то данные реплицироваться не будут (база отвечает только за метаданные).
|
||||
При создании новой реплики базы, эта реплика сама создаёт таблицы. Если реплика долго была недоступна и отстала от лога репликации — она сверяет свои локальные метаданные с актуальными метаданными в ZooKeeper, перекладывает лишние таблицы с данными в отдельную нереплицируемую базу (чтобы случайно не удалить что-нибудь лишнее), создаёт недостающие таблицы, обновляет имена таблиц, если были переименования. Данные реплицируются на уровне `ReplicatedMergeTree`, т.е. если таблица не реплицируемая, то данные реплицироваться не будут (база отвечает только за метаданные).
|
||||
|
||||
Запросы [`ALTER TABLE ATTACH|FETCH|DROP|DROP DETACHED|DETACH PARTITION|PART`](../../sql-reference/statements/alter/partition.md) допустимы, но не реплицируются. Движок базы данных может только добавить/извлечь/удалить партицию или кусок нынешней реплики. Однако если сама таблица использует движок реплицируемой таблицы, тогда данные будут реплицированы после применения `ATTACH`.
|
||||
|
||||
## Примеры использования {#usage-example}
|
||||
|
||||
|
@ -11,7 +11,8 @@ toc_title: S3
|
||||
|
||||
``` sql
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||
ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||
[SETTINGS ...]
|
||||
```
|
||||
|
||||
**Параметры движка**
|
||||
@ -24,9 +25,12 @@ ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compressi
|
||||
**Пример**
|
||||
|
||||
``` sql
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip');
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
ENGINE=S3('https://storage.yandexcloud.net/my-test-bucket-768/test-data.csv.gz', 'CSV', 'gzip')
|
||||
SETTINGS input_format_with_names_use_header = 0;
|
||||
|
||||
INSERT INTO s3_engine_table VALUES ('one', 1), ('two', 2), ('three', 3);
|
||||
|
||||
SELECT * FROM s3_engine_table LIMIT 2;
|
||||
```
|
||||
|
||||
@ -54,7 +58,7 @@ SELECT * FROM s3_engine_table LIMIT 2;
|
||||
|
||||
## Символы подстановки {#wildcards-in-path}
|
||||
|
||||
Аргумент `path` может указывать на несколько файлов, используя подстановочные знаки. Для обработки файл должен существовать и соответствовать всему шаблону пути. Список файлов определяется во время выполнения запроса `SELECT` (не в момент выполнения запроса `CREATE`).
|
||||
Аргумент `path` может указывать на несколько файлов, используя символы подстановки. Для обработки файл должен существовать и соответствовать всему шаблону пути. Список файлов определяется во время выполнения запроса `SELECT` (не в момент выполнения запроса `CREATE`).
|
||||
|
||||
- `*` — заменяет любое количество любых символов, кроме `/`, включая пустую строку.
|
||||
- `?` — заменяет любые одиночные символы.
|
||||
@ -63,6 +67,52 @@ SELECT * FROM s3_engine_table LIMIT 2;
|
||||
|
||||
Конструкции с `{}` аналогичны функции [remote](../../../sql-reference/table-functions/remote.md).
|
||||
|
||||
!!! warning "Примечание"
|
||||
Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`.
|
||||
|
||||
**Пример подстановки 1**
|
||||
|
||||
Таблица содержит данные из файлов с именами `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE big_table (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/my_folder/file-{000..999}.csv', 'CSV');
|
||||
```
|
||||
|
||||
**Пример подстановки 2**
|
||||
|
||||
Предположим, есть несколько файлов в формате CSV со следующими URL-адресами в S3:
|
||||
|
||||
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/some_folder/some_file_3.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-bucket/another_folder/some_file_3.csv'
|
||||
|
||||
Существует несколько способов создать таблицу, включающую в себя все шесть файлов:
|
||||
|
||||
1. Задайте диапазон для суффиксов в названии файла:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_range (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_{1..3}', 'CSV');
|
||||
```
|
||||
|
||||
2. Таблица содержит все файлы с префиксом `some_file_` (в каталогах не должно быть других файлов с таким префиксом):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_question_mark (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/some_file_?', 'CSV');
|
||||
```
|
||||
|
||||
3. Таблица содержит все файлы в обоих каталогах (в каталогах не должно быть других файлов, соответствующих формату и схеме, описанным в запросе):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_asterisk (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-bucket/{some,another}_folder/*', 'CSV');
|
||||
```
|
||||
|
||||
## Настройки движка S3 {#s3-settings}
|
||||
|
||||
Перед выполнением запроса или в конфигурационном файле могут быть установлены следующие настройки:
|
||||
@ -108,47 +158,6 @@ SELECT * FROM s3_engine_table LIMIT 2;
|
||||
</s3>
|
||||
```
|
||||
|
||||
## Примеры использования {#usage-examples}
|
||||
|
||||
Предположим, у нас есть несколько файлов в формате CSV со следующими URL-адресами в S3:
|
||||
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/some_prefix/some_file_3.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_1.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_2.csv'
|
||||
- 'https://storage.yandexcloud.net/my-test-bucket-768/another_prefix/some_file_3.csv'
|
||||
|
||||
1. Существует несколько способов создать таблицу, включающую в себя все шесть файлов:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_range (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_{1..3}', 'CSV');
|
||||
```
|
||||
|
||||
2. Другой способ:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_question_mark (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/some_file_?', 'CSV');
|
||||
```
|
||||
|
||||
3. Таблица содержит все файлы в обоих каталогах (все файлы должны соответствовать формату и схеме, описанным в запросе):
|
||||
|
||||
``` sql
|
||||
CREATE TABLE table_with_asterisk (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/{some,another}_prefix/*', 'CSV');
|
||||
```
|
||||
|
||||
Если список файлов содержит диапазоны чисел с ведущими нулями, используйте конструкцию с фигурными скобками для каждой цифры отдельно или используйте `?`.
|
||||
|
||||
4. Создание таблицы из файлов с именами `file-000.csv`, `file-001.csv`, … , `file-999.csv`:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE big_table (name String, value UInt32)
|
||||
ENGINE = S3('https://storage.yandexcloud.net/my-test-bucket-768/big_prefix/file-{000..999}.csv', 'CSV');
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [Табличная функция s3](../../../sql-reference/table-functions/s3.md)
|
||||
|
@ -9,7 +9,7 @@ toc_title: AggregatingMergeTree
|
||||
|
||||
Таблицы типа `AggregatingMergeTree` могут использоваться для инкрементальной агрегации данных, в том числе, для агрегирующих материализованных представлений.
|
||||
|
||||
Движок обрабатывает все столбцы типа [AggregateFunction](../../../engines/table-engines/mergetree-family/aggregatingmergetree.md).
|
||||
Движок обрабатывает все столбцы типа [AggregateFunction](../../../sql-reference/data-types/aggregatefunction.md).
|
||||
|
||||
Использование `AggregatingMergeTree` оправдано только в том случае, когда это уменьшает количество строк на порядки.
|
||||
|
||||
|
@ -64,9 +64,9 @@ WHERE table = 'visits'
|
||||
└───────────┴────────────────┴────────┘
|
||||
```
|
||||
|
||||
Столбец `partition` содержит имена всех партиций таблицы. Таблица `visits` из нашего примера содержит две партиции: `201901` и `201902`. Используйте значения из этого столбца в запросах [ALTER … PARTITION](#alter_manipulations-with-partitions).
|
||||
Столбец `partition` содержит имена всех партиций таблицы. Таблица `visits` из нашего примера содержит две партиции: `201901` и `201902`. Используйте значения из этого столбца в запросах [ALTER … PARTITION](../../../sql-reference/statements/alter/partition.md).
|
||||
|
||||
Столбец `name` содержит названия кусков партиций. Значения из этого столбца можно использовать в запросах [ALTER ATTACH PART](#alter_attach-partition).
|
||||
Столбец `name` содержит названия кусков партиций. Значения из этого столбца можно использовать в запросах [ALTER ATTACH PART](../../../sql-reference/statements/alter/partition.md#alter_attach-partition).
|
||||
|
||||
Столбец `active` отображает состояние куска. `1` означает, что кусок активен; `0` – неактивен. К неактивным можно отнести куски, оставшиеся после слияния данных. Поврежденные куски также отображаются как неактивные. Неактивные куски удаляются приблизительно через 10 минут после того, как было выполнено слияние.
|
||||
|
||||
@ -82,7 +82,7 @@ WHERE table = 'visits'
|
||||
|
||||
Как видно из примера выше, таблица содержит несколько отдельных кусков для одной и той же партиции (например, куски `201901_1_3_1` и `201901_1_9_2` принадлежат партиции `201901`). Это означает, что эти куски еще не были объединены – в файловой системе они хранятся отдельно. После того как будет выполнено автоматическое слияние данных (выполняется примерно спустя 10 минут после вставки данных), исходные куски будут объединены в один более крупный кусок и помечены как неактивные.
|
||||
|
||||
Вы можете запустить внеочередное слияние данных с помощью запроса [OPTIMIZE](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#misc_operations-optimize). Пример:
|
||||
Вы можете запустить внеочередное слияние данных с помощью запроса [OPTIMIZE](../../../sql-reference/statements/optimize.md). Пример:
|
||||
|
||||
``` sql
|
||||
OPTIMIZE TABLE visits PARTITION 201902;
|
||||
@ -123,11 +123,11 @@ drwxr-xr-x 2 clickhouse clickhouse 4096 Feb 1 16:48 detached
|
||||
|
||||
Директория `detached` содержит куски, отсоединенные от таблицы с помощью запроса [DETACH](../../../sql-reference/statements/alter/partition.md#alter_detach-partition). Поврежденные куски также попадают в эту директорию – они не удаляются с сервера.
|
||||
|
||||
Сервер не использует куски из директории `detached`. Вы можете в любое время добавлять, удалять, модифицировать данные в директории detached - сервер не будет об этом знать, пока вы не сделаете запрос [ATTACH](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#alter_attach-partition).
|
||||
Сервер не использует куски из директории `detached`. Вы можете в любое время добавлять, удалять, модифицировать данные в директории detached - сервер не будет об этом знать, пока вы не сделаете запрос [ATTACH](../../../sql-reference/statements/alter/partition.md#alter_attach-partition).
|
||||
|
||||
Следует иметь в виду, что при работающем сервере нельзя вручную изменять набор кусков на файловой системе, так как сервер не будет знать об этом.
|
||||
Для нереплицируемых таблиц, вы можете это делать при остановленном сервере, однако это не рекомендуется.
|
||||
Для реплицируемых таблиц, набор кусков нельзя менять в любом случае.
|
||||
|
||||
ClickHouse позволяет производить различные манипуляции с кусками: удалять, копировать из одной таблицы в другую или создавать их резервные копии. Подробнее см. в разделе [Манипуляции с партициями и кусками](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#alter_manipulations-with-partitions).
|
||||
ClickHouse позволяет производить различные манипуляции с кусками: удалять, копировать из одной таблицы в другую или создавать их резервные копии. Подробнее см. в разделе [Манипуляции с партициями и кусками](../../../sql-reference/statements/alter/partition.md).
|
||||
|
||||
|
@ -132,7 +132,7 @@ ClickHouse может слить куски данных таким образо
|
||||
[(1, 100), (2, 150)] + [(1, -100)] -> [(2, 150)]
|
||||
```
|
||||
|
||||
При запросе данных используйте функцию [sumMap(key, value)](../../../engines/table-engines/mergetree-family/summingmergetree.md) для агрегации `Map`.
|
||||
При запросе данных используйте функцию [sumMap(key, value)](../../../sql-reference/aggregate-functions/reference/summap.md#agg_functions-summap) для агрегации `Map`.
|
||||
|
||||
Для вложенной структуры данных не нужно указывать её столбцы в кортеже столбцов для суммирования.
|
||||
|
||||
|
@ -62,7 +62,7 @@ WHERE name = 'products'
|
||||
└──────────┴──────┴────────┴─────────────────┴─────────────────┴─────────────────┴───────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
В таком виде данные из словаря можно получить при помощи функций [dictGet\*](../../../engines/table-engines/special/dictionary.md#ext_dict_functions).
|
||||
В таком виде данные из словаря можно получить при помощи функций [dictGet\*](../../../sql-reference/functions/ext-dict-functions.md#dictget).
|
||||
|
||||
Такое представление неудобно, когда нам необходимо получить данные в чистом виде, а также при выполнении операции `JOIN`. Для этих случаев можно использовать движок `Dictionary`, который отобразит данные словаря в таблицу.
|
||||
|
||||
|
@ -21,11 +21,11 @@ File(Format)
|
||||
|
||||
`Format` должен быть таким, который ClickHouse может использовать и в запросах `INSERT` и в запросах `SELECT`. Полный список поддерживаемых форматов смотрите в разделе [Форматы](../../../interfaces/formats.md#formats).
|
||||
|
||||
Сервер ClickHouse не позволяет указать путь к файлу, с которым будет работать `File`. Используется путь к хранилищу, определенный параметром [path](../../../operations/server-configuration-parameters/settings.md) в конфигурации сервера.
|
||||
Сервер ClickHouse не позволяет указать путь к файлу, с которым будет работать `File`. Используется путь к хранилищу, определенный параметром [path](../../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-path) в конфигурации сервера.
|
||||
|
||||
При создании таблицы с помощью `File(Format)` сервер ClickHouse создает в хранилище каталог с именем таблицы, а после добавления в таблицу данных помещает туда файл `data.Format`.
|
||||
|
||||
Можно вручную создать в хранилище каталог таблицы, поместить туда файл, затем на сервере ClickHouse добавить ([ATTACH](../../../engines/table-engines/special/file.md)) информацию о таблице, соответствующей имени каталога и прочитать из файла данные.
|
||||
Можно вручную создать в хранилище каталог таблицы, поместить туда файл, затем на сервере ClickHouse добавить ([ATTACH](../../../sql-reference/statements/attach.md#attach)) информацию о таблице, соответствующей имени каталога и прочитать из файла данные.
|
||||
|
||||
!!! warning "Warning"
|
||||
Будьте аккуратны с этой функциональностью, поскольку сервер ClickHouse не отслеживает внешние изменения данных. Если в файл будет производиться запись одновременно со стороны сервера ClickHouse и с внешней стороны, то результат непредсказуем.
|
||||
|
@ -5,7 +5,7 @@ toc_title: Join
|
||||
|
||||
# Join {#join}
|
||||
|
||||
Подготовленная структура данных для использования в операциях [JOIN](../../../engines/table-engines/special/join.md#select-join).
|
||||
Подготовленная структура данных для использования в операциях [JOIN](../../../sql-reference/statements/select/join.md#select-join).
|
||||
|
||||
## Создание таблицы {#creating-a-table}
|
||||
|
||||
@ -21,8 +21,8 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
|
||||
**Параметры движка**
|
||||
|
||||
- `join_strictness` – [строгость JOIN](../../../engines/table-engines/special/join.md#select-join-types).
|
||||
- `join_type` – [тип JOIN](../../../engines/table-engines/special/join.md#select-join-types).
|
||||
- `join_strictness` – [строгость JOIN](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `join_type` – [тип JOIN](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||
- `k1[, k2, ...]` – ключевые столбцы секции `USING` с которыми выполняется операция `JOIN`.
|
||||
|
||||
Вводите параметры `join_strictness` и `join_type` без кавычек, например, `Join(ANY, LEFT, col1)`. Они должны быть такими же как и в той операции `JOIN`, в которой таблица будет использоваться. Если параметры не совпадают, ClickHouse не генерирует исключение и может возвращать неверные данные.
|
||||
@ -42,7 +42,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
Основные применения `Join` таблиц:
|
||||
|
||||
- Использование в правой части секции `JOIN`.
|
||||
- Извлечение данных из таблицы таким же образом как из словаря с помощью функции [joinGet](../../../engines/table-engines/special/join.md#joinget).
|
||||
- Извлечение данных из таблицы таким же образом как из словаря с помощью функции [joinGet](../../../sql-reference/functions/other-functions.md#joinget).
|
||||
|
||||
### Удаление данных {#deleting-data}
|
||||
|
||||
|
@ -163,8 +163,8 @@ SELECT * FROM nestedt FORMAT TSV
|
||||
## TabSeparatedWithNames {#tabseparatedwithnames}
|
||||
|
||||
Отличается от формата `TabSeparated` тем, что в первой строке пишутся имена столбцов.
|
||||
При парсинге, первая строка полностью игнорируется. Вы не можете использовать имена столбцов, чтобы указать их порядок расположения, или чтобы проверить их корректность.
|
||||
(Поддержка обработки заголовка при парсинге может быть добавлена в будущем.)
|
||||
|
||||
При парсинге первая строка должна содержать имена столбцов. Вы можете использовать имена столбцов, чтобы указать их порядок расположения, или чтобы проверить их корректность.
|
||||
|
||||
Этот формат также доступен под именем `TSVWithNames`.
|
||||
|
||||
|
@ -31,6 +31,8 @@ toc_title: "Клиентские библиотеки от сторонних р
|
||||
- NodeJs
|
||||
- [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse)
|
||||
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
||||
- [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse)
|
||||
- [clickhouse-client](https://github.com/depyronick/clickhouse-client)
|
||||
- Perl
|
||||
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
|
||||
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
|
||||
|
38
docs/ru/interfaces/third-party/gui.md
vendored
38
docs/ru/interfaces/third-party/gui.md
vendored
@ -69,6 +69,14 @@ toc_title: "Визуальные интерфейсы от сторонних р
|
||||
- Проводник по базе данных.
|
||||
- Инструменты визуализации, позволяющие представить данные в различных формах.
|
||||
|
||||
### Grafana {#grafana}
|
||||
|
||||
[Grafana](https://grafana.com/grafana/plugins/vertamedia-clickhouse-datasource) — платформа для мониторинга и визуализации.
|
||||
|
||||
"С помощью Grafana вы можете делать запросы, визуализировать, получать уведомления и разбираться в метриках, где бы они ни хранились. Создавайте, исследуйте, делитесь дашбордами с командой и прививайте культуру принятия решений на основе данных. Мы пользуемся доверием и любовью пользователей" — grafana.com.
|
||||
|
||||
Плагин источника данных ClickHouse поддерживает ClickHouse в качестве бэкенд базы данных.
|
||||
|
||||
### DBeaver {#dbeaver}
|
||||
|
||||
[DBeaver](https://dbeaver.io/) - универсальный desktop клиент баз данных с поддержкой ClickHouse.
|
||||
@ -109,6 +117,36 @@ toc_title: "Визуальные интерфейсы от сторонних р
|
||||
|
||||
[MindsDB](https://mindsdb.com/) — это продукт с открытым исходным кодом, реализующий слой искусственного интеллекта (Artificial Intelligence, AI) для различных СУБД, в том числе для ClickHouse. MindsDB облегчает процессы создания, обучения и развертывания современных моделей машинного обучения. Графический пользовательский интерфейс MindsDB Studio позволяет обучать новые модели на основе данных в БД, интерпретировать сделанные моделями прогнозы, выявлять потенциальные ошибки в данных, визуализировать и оценивать достоверность моделей с помощью функции Explainable AI, так чтобы вы могли быстрее адаптировать и настраивать ваши модели машинного обучения.
|
||||
|
||||
### DBM {#dbm}
|
||||
|
||||
[DBM](https://dbm.incubator.edurt.io/) DBM — инструмент для визуального менеджмента в ClickHouse!
|
||||
|
||||
Основные возможности:
|
||||
|
||||
- Поддержка истории запросов (пагинация, очистка и т.д.)
|
||||
- Поддержка отдельных секций запросов
|
||||
- Поддержка остановки запроса
|
||||
- Поддержка управления таблицами (метаданные, удаление, предпросмотр)
|
||||
- Поддержка управления базами данных (удаление, создание)
|
||||
- Поддержка пользовательских запросов
|
||||
- Поддержка управления различными источниками данных (проверка соединения, мониторинг)
|
||||
- Поддержка монитора (процессор, соединение, запрос)
|
||||
- Поддержка миграции данных
|
||||
|
||||
### Bytebase {#bytebase}
|
||||
|
||||
[Bytebase](https://bytebase.com) — сетевой инструмент для смены схем и контроля версий с открытым исходным кодом для работы в команде. Поддерживает различные базы данных, в том числе ClickHouse.
|
||||
|
||||
Основные возможности:
|
||||
|
||||
- Проверка схемы для разработчиков и администраторов баз данных.
|
||||
- "База данных в виде кода", хранение схемы базы данных с помощью системы контроля версий, например, GitLab, а также активация развертывания по коммиту.
|
||||
- Рациональное развертывание и соответствующая среда.
|
||||
- Полная история миграций.
|
||||
- Определение смещения схемы.
|
||||
- Резервное копирование и восстановление.
|
||||
- Управление доступом на основе ролей.
|
||||
|
||||
## Коммерческие {#commercial}
|
||||
|
||||
### DataGrip {#datagrip}
|
||||
|
@ -107,5 +107,5 @@ SELECT * FROM mysql('localhost:3306', 'test', 'test', 'bayonet', '123');
|
||||
**Смотрите также**
|
||||
|
||||
- [Движок таблиц ‘MySQL’](../../sql-reference/table-functions/mysql.md)
|
||||
- [Использование MySQL как источника данных для внешнего словаря](../../sql-reference/table-functions/mysql.md#dicts-external_dicts_dict_sources-mysql)
|
||||
- [Использование MySQL как источника данных для внешнего словаря](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-mysql)
|
||||
|
||||
|
@ -1,3 +1,3 @@
|
||||
# 设置 {#set}
|
||||
# 集合 {#set}
|
||||
|
||||
可以用在 IN 表达式的右半部分。
|
||||
|
@ -12,6 +12,7 @@ SHOW CREATE [TEMPORARY] [TABLE|DICTIONARY] [db.]table [INTO OUTFILE filename] [F
|
||||
```
|
||||
返回单个字符串类型的 ‘statement’列,其中只包含了一个值 - 用来创建指定对象的 `CREATE` 语句。
|
||||
|
||||
注意,如果使用该查询去获取系统表的 `CREATE` 语句,你得到的是一个虚构的语句,仅用来展示系统的表结构,而不能实际创建表。
|
||||
|
||||
## SHOW DATABASES {#show-databases}
|
||||
|
||||
|
@ -1003,7 +1003,6 @@ void Client::addOptions(OptionsDescription & options_description)
|
||||
("password", po::value<std::string>()->implicit_value("\n", ""), "password")
|
||||
("ask-password", "ask-password")
|
||||
("quota_key", po::value<std::string>(), "A string to differentiate quotas when the user have keyed quotas configured on server")
|
||||
("pager", po::value<std::string>(), "pager")
|
||||
("testmode,T", "enable test hints in comments")
|
||||
|
||||
("max_client_network_bandwidth", po::value<int>(), "the maximum speed of data exchange over the network for the client in bytes per second.")
|
||||
@ -1104,8 +1103,6 @@ void Client::processOptions(const OptionsDescription & options_description,
|
||||
config().setString("host", options["host"].as<std::string>());
|
||||
if (options.count("interleave-queries-file"))
|
||||
interleave_queries_files = options["interleave-queries-file"].as<std::vector<std::string>>();
|
||||
if (options.count("pager"))
|
||||
config().setString("pager", options["pager"].as<std::string>());
|
||||
if (options.count("port") && !options["port"].defaulted())
|
||||
config().setInt("port", options["port"].as<int>());
|
||||
if (options.count("secure"))
|
||||
|
@ -744,8 +744,8 @@ std::shared_ptr<ASTCreateQuery> rewriteCreateQueryStorage(const ASTPtr & create_
|
||||
if (create.storage == nullptr || new_storage_ast == nullptr)
|
||||
throw Exception("Storage is not specified", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
res->database = new_table.first;
|
||||
res->table = new_table.second;
|
||||
res->setDatabase(new_table.first);
|
||||
res->setTable(new_table.second);
|
||||
|
||||
res->children.clear();
|
||||
res->set(res->columns_list, create.columns_list->clone());
|
||||
@ -1659,7 +1659,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
||||
void ClusterCopier::dropAndCreateLocalTable(const ASTPtr & create_ast)
|
||||
{
|
||||
const auto & create = create_ast->as<ASTCreateQuery &>();
|
||||
dropLocalTableIfExists({create.database, create.table});
|
||||
dropLocalTableIfExists({create.getDatabase(), create.getTable()});
|
||||
|
||||
auto create_context = Context::createCopy(getContext());
|
||||
|
||||
@ -1671,8 +1671,8 @@ void ClusterCopier::dropLocalTableIfExists(const DatabaseAndTableName & table_na
|
||||
{
|
||||
auto drop_ast = std::make_shared<ASTDropQuery>();
|
||||
drop_ast->if_exists = true;
|
||||
drop_ast->database = table_name.first;
|
||||
drop_ast->table = table_name.second;
|
||||
drop_ast->setDatabase(table_name.first);
|
||||
drop_ast->setTable(table_name.second);
|
||||
|
||||
auto drop_context = Context::createCopy(getContext());
|
||||
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <Dictionaries/registerDictionaries.h>
|
||||
#include <Disks/registerDisks.h>
|
||||
#include <Formats/registerFormats.h>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
#include <boost/program_options/options_description.hpp>
|
||||
#include <base/argsToConfig.h>
|
||||
#include <filesystem>
|
||||
|
@ -39,9 +39,6 @@ namespace ErrorCodes
|
||||
extern const int INCORRECT_ACCESS_ENTITY_DEFINITION;
|
||||
}
|
||||
|
||||
using EntityType = IAccessStorage::EntityType;
|
||||
using EntityTypeInfo = IAccessStorage::EntityTypeInfo;
|
||||
|
||||
namespace
|
||||
{
|
||||
/// Special parser for the 'ATTACH access entity' queries.
|
||||
@ -80,7 +77,7 @@ String serializeAccessEntity(const IAccessEntity & entity)
|
||||
/// Build list of ATTACH queries.
|
||||
ASTs queries;
|
||||
queries.push_back(InterpreterShowCreateAccessEntityQuery::getAttachQuery(entity));
|
||||
if ((entity.getType() == EntityType::USER) || (entity.getType() == EntityType::ROLE))
|
||||
if ((entity.getType() == AccessEntityType::USER) || (entity.getType() == AccessEntityType::ROLE))
|
||||
boost::range::push_back(queries, InterpreterShowGrantsQuery::getAttachGrantQueries(entity));
|
||||
|
||||
/// Serialize the list of ATTACH queries to a string.
|
||||
|
@ -1,9 +1,12 @@
|
||||
#pragma once
|
||||
|
||||
#include <Access/IAccessEntity.h>
|
||||
#include <base/types.h>
|
||||
#include <memory>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
struct IAccessEntity;
|
||||
using AccessEntityPtr = std::shared_ptr<const IAccessEntity>;
|
||||
|
||||
String serializeAccessEntity(const IAccessEntity & entity);
|
||||
|
||||
|
86
src/Access/Common/AccessEntityType.cpp
Normal file
86
src/Access/Common/AccessEntityType.cpp
Normal file
@ -0,0 +1,86 @@
|
||||
#include <Access/Common/AccessEntityType.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNKNOWN_USER;
|
||||
extern const int UNKNOWN_ROLE;
|
||||
extern const int UNKNOWN_ROW_POLICY;
|
||||
extern const int UNKNOWN_QUOTA;
|
||||
extern const int THERE_IS_NO_PROFILE;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
String toString(AccessEntityType type)
|
||||
{
|
||||
return AccessEntityTypeInfo::get(type).name;
|
||||
}
|
||||
|
||||
String AccessEntityTypeInfo::formatEntityNameWithType(const String & entity_name) const
|
||||
{
|
||||
String msg = name_for_output_with_entity_name;
|
||||
msg += " ";
|
||||
msg += backQuote(entity_name);
|
||||
return msg;
|
||||
}
|
||||
|
||||
const AccessEntityTypeInfo & AccessEntityTypeInfo::get(AccessEntityType type_)
|
||||
{
|
||||
static constexpr auto make_info = [](const char * raw_name_, const char * plural_raw_name_, char unique_char_, int not_found_error_code_)
|
||||
{
|
||||
String init_names[2] = {raw_name_, plural_raw_name_};
|
||||
String init_aliases[2];
|
||||
for (size_t i = 0; i != std::size(init_names); ++i)
|
||||
{
|
||||
String & init_name = init_names[i];
|
||||
String & init_alias = init_aliases[i];
|
||||
boost::to_upper(init_name);
|
||||
boost::replace_all(init_name, "_", " ");
|
||||
if (auto underscore_pos = init_name.find_first_of(' '); underscore_pos != String::npos)
|
||||
init_alias = init_name.substr(underscore_pos + 1);
|
||||
}
|
||||
String init_name_for_output_with_entity_name = init_names[0];
|
||||
boost::to_lower(init_name_for_output_with_entity_name);
|
||||
return AccessEntityTypeInfo{raw_name_, plural_raw_name_, std::move(init_names[0]), std::move(init_aliases[0]), std::move(init_names[1]), std::move(init_aliases[1]), std::move(init_name_for_output_with_entity_name), unique_char_, not_found_error_code_};
|
||||
};
|
||||
|
||||
switch (type_)
|
||||
{
|
||||
case AccessEntityType::USER:
|
||||
{
|
||||
static const auto info = make_info("USER", "USERS", 'U', ErrorCodes::UNKNOWN_USER);
|
||||
return info;
|
||||
}
|
||||
case AccessEntityType::ROLE:
|
||||
{
|
||||
static const auto info = make_info("ROLE", "ROLES", 'R', ErrorCodes::UNKNOWN_ROLE);
|
||||
return info;
|
||||
}
|
||||
case AccessEntityType::SETTINGS_PROFILE:
|
||||
{
|
||||
static const auto info = make_info("SETTINGS_PROFILE", "SETTINGS_PROFILES", 'S', ErrorCodes::THERE_IS_NO_PROFILE);
|
||||
return info;
|
||||
}
|
||||
case AccessEntityType::ROW_POLICY:
|
||||
{
|
||||
static const auto info = make_info("ROW_POLICY", "ROW_POLICIES", 'P', ErrorCodes::UNKNOWN_ROW_POLICY);
|
||||
return info;
|
||||
}
|
||||
case AccessEntityType::QUOTA:
|
||||
{
|
||||
static const auto info = make_info("QUOTA", "QUOTAS", 'Q', ErrorCodes::UNKNOWN_QUOTA);
|
||||
return info;
|
||||
}
|
||||
case AccessEntityType::MAX: break;
|
||||
}
|
||||
throw Exception("Unknown type: " + std::to_string(static_cast<size_t>(type_)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
}
|
40
src/Access/Common/AccessEntityType.h
Normal file
40
src/Access/Common/AccessEntityType.h
Normal file
@ -0,0 +1,40 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/Types.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Represents the type of an access entity (see the IAccessEntity class).
|
||||
enum class AccessEntityType
|
||||
{
|
||||
USER,
|
||||
ROLE,
|
||||
SETTINGS_PROFILE,
|
||||
ROW_POLICY,
|
||||
QUOTA,
|
||||
|
||||
MAX,
|
||||
};
|
||||
|
||||
String toString(AccessEntityType type);
|
||||
|
||||
struct AccessEntityTypeInfo
|
||||
{
|
||||
const char * const raw_name;
|
||||
const char * const plural_raw_name;
|
||||
const String name; /// Uppercased with spaces instead of underscores, e.g. "SETTINGS PROFILE".
|
||||
const String alias; /// Alias of the keyword or empty string, e.g. "PROFILE".
|
||||
const String plural_name; /// Uppercased with spaces plural name, e.g. "SETTINGS PROFILES".
|
||||
const String plural_alias; /// Uppercased with spaces plural name alias, e.g. "PROFILES".
|
||||
const String name_for_output_with_entity_name; /// Lowercased with spaces instead of underscores, e.g. "settings profile".
|
||||
const char unique_char; /// Unique character for this type. E.g. 'P' for SETTINGS_PROFILE.
|
||||
const int not_found_error_code;
|
||||
|
||||
String formatEntityNameWithType(const String & entity_name) const;
|
||||
|
||||
static const AccessEntityTypeInfo & get(AccessEntityType type_);
|
||||
};
|
||||
|
||||
}
|
189
src/Access/Common/QuotaDefs.cpp
Normal file
189
src/Access/Common/QuotaDefs.cpp
Normal file
@ -0,0 +1,189 @@
|
||||
#include <Access/Common/QuotaDefs.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
#include <base/range.h>
|
||||
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
#include <boost/algorithm/string/classification.hpp>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
String toString(QuotaType type)
|
||||
{
|
||||
return QuotaTypeInfo::get(type).raw_name;
|
||||
}
|
||||
|
||||
String QuotaTypeInfo::valueToString(QuotaValue value) const
|
||||
{
|
||||
if (!(value % output_denominator))
|
||||
return std::to_string(value / output_denominator);
|
||||
else
|
||||
return boost::lexical_cast<std::string>(static_cast<double>(value) / output_denominator);
|
||||
}
|
||||
|
||||
QuotaValue QuotaTypeInfo::stringToValue(const String & str) const
|
||||
{
|
||||
if (output_denominator == 1)
|
||||
return static_cast<QuotaValue>(std::strtoul(str.c_str(), nullptr, 10));
|
||||
else
|
||||
return static_cast<QuotaValue>(std::strtod(str.c_str(), nullptr) * output_denominator);
|
||||
}
|
||||
|
||||
String QuotaTypeInfo::valueToStringWithName(QuotaValue value) const
|
||||
{
|
||||
String res = name;
|
||||
res += " = ";
|
||||
res += valueToString(value);
|
||||
return res;
|
||||
}
|
||||
|
||||
const QuotaTypeInfo & QuotaTypeInfo::get(QuotaType type)
|
||||
{
|
||||
static constexpr auto make_info = [](const char * raw_name_, UInt64 output_denominator_)
|
||||
{
|
||||
String init_name = raw_name_;
|
||||
boost::to_lower(init_name);
|
||||
String init_keyword = raw_name_;
|
||||
boost::replace_all(init_keyword, "_", " ");
|
||||
bool init_output_as_float = (output_denominator_ != 1);
|
||||
return QuotaTypeInfo{raw_name_, std::move(init_name), std::move(init_keyword), init_output_as_float, output_denominator_};
|
||||
};
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case QuotaType::QUERIES:
|
||||
{
|
||||
static const auto info = make_info("QUERIES", 1);
|
||||
return info;
|
||||
}
|
||||
case QuotaType::QUERY_SELECTS:
|
||||
{
|
||||
static const auto info = make_info("QUERY_SELECTS", 1);
|
||||
return info;
|
||||
}
|
||||
case QuotaType::QUERY_INSERTS:
|
||||
{
|
||||
static const auto info = make_info("QUERY_INSERTS", 1);
|
||||
return info;
|
||||
}
|
||||
case QuotaType::ERRORS:
|
||||
{
|
||||
static const auto info = make_info("ERRORS", 1);
|
||||
return info;
|
||||
}
|
||||
case QuotaType::RESULT_ROWS:
|
||||
{
|
||||
static const auto info = make_info("RESULT_ROWS", 1);
|
||||
return info;
|
||||
}
|
||||
case QuotaType::RESULT_BYTES:
|
||||
{
|
||||
static const auto info = make_info("RESULT_BYTES", 1);
|
||||
return info;
|
||||
}
|
||||
case QuotaType::READ_ROWS:
|
||||
{
|
||||
static const auto info = make_info("READ_ROWS", 1);
|
||||
return info;
|
||||
}
|
||||
case QuotaType::READ_BYTES:
|
||||
{
|
||||
static const auto info = make_info("READ_BYTES", 1);
|
||||
return info;
|
||||
}
|
||||
case QuotaType::EXECUTION_TIME:
|
||||
{
|
||||
static const auto info = make_info("EXECUTION_TIME", 1000000000 /* execution_time is stored in nanoseconds */);
|
||||
return info;
|
||||
}
|
||||
case QuotaType::MAX: break;
|
||||
}
|
||||
throw Exception("Unexpected quota type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
String toString(QuotaKeyType type)
|
||||
{
|
||||
return QuotaKeyTypeInfo::get(type).raw_name;
|
||||
}
|
||||
|
||||
const QuotaKeyTypeInfo & QuotaKeyTypeInfo::get(QuotaKeyType type)
|
||||
{
|
||||
static constexpr auto make_info = [](const char * raw_name_)
|
||||
{
|
||||
String init_name = raw_name_;
|
||||
boost::to_lower(init_name);
|
||||
std::vector<QuotaKeyType> init_base_types;
|
||||
String replaced = boost::algorithm::replace_all_copy(init_name, "_or_", "|");
|
||||
Strings tokens;
|
||||
boost::algorithm::split(tokens, replaced, boost::is_any_of("|"));
|
||||
if (tokens.size() > 1)
|
||||
{
|
||||
for (const auto & token : tokens)
|
||||
{
|
||||
for (auto kt : collections::range(QuotaKeyType::MAX))
|
||||
{
|
||||
if (QuotaKeyTypeInfo::get(kt).name == token)
|
||||
{
|
||||
init_base_types.push_back(kt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return QuotaKeyTypeInfo{raw_name_, std::move(init_name), std::move(init_base_types)};
|
||||
};
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case QuotaKeyType::NONE:
|
||||
{
|
||||
static const auto info = make_info("NONE");
|
||||
return info;
|
||||
}
|
||||
case QuotaKeyType::USER_NAME:
|
||||
{
|
||||
static const auto info = make_info("USER_NAME");
|
||||
return info;
|
||||
}
|
||||
case QuotaKeyType::IP_ADDRESS:
|
||||
{
|
||||
static const auto info = make_info("IP_ADDRESS");
|
||||
return info;
|
||||
}
|
||||
case QuotaKeyType::FORWARDED_IP_ADDRESS:
|
||||
{
|
||||
static const auto info = make_info("FORWARDED_IP_ADDRESS");
|
||||
return info;
|
||||
}
|
||||
case QuotaKeyType::CLIENT_KEY:
|
||||
{
|
||||
static const auto info = make_info("CLIENT_KEY");
|
||||
return info;
|
||||
}
|
||||
case QuotaKeyType::CLIENT_KEY_OR_USER_NAME:
|
||||
{
|
||||
static const auto info = make_info("CLIENT_KEY_OR_USER_NAME");
|
||||
return info;
|
||||
}
|
||||
case QuotaKeyType::CLIENT_KEY_OR_IP_ADDRESS:
|
||||
{
|
||||
static const auto info = make_info("CLIENT_KEY_OR_IP_ADDRESS");
|
||||
return info;
|
||||
}
|
||||
case QuotaKeyType::MAX: break;
|
||||
}
|
||||
throw Exception("Unexpected quota key type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
}
|
67
src/Access/Common/QuotaDefs.h
Normal file
67
src/Access/Common/QuotaDefs.h
Normal file
@ -0,0 +1,67 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/Types.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
/// We use UInt64 to count used resources.
|
||||
using QuotaValue = UInt64;
|
||||
|
||||
/// Kinds of resource what we wish to quota.
|
||||
enum class QuotaType
|
||||
{
|
||||
QUERIES, /// Number of queries.
|
||||
QUERY_SELECTS, /// Number of select queries.
|
||||
QUERY_INSERTS, /// Number of inserts queries.
|
||||
ERRORS, /// Number of queries with exceptions.
|
||||
RESULT_ROWS, /// Number of rows returned as result.
|
||||
RESULT_BYTES, /// Number of bytes returned as result.
|
||||
READ_ROWS, /// Number of rows read from tables.
|
||||
READ_BYTES, /// Number of bytes read from tables.
|
||||
EXECUTION_TIME, /// Total amount of query execution time in nanoseconds.
|
||||
|
||||
MAX
|
||||
};
|
||||
|
||||
String toString(QuotaType type);
|
||||
|
||||
struct QuotaTypeInfo
|
||||
{
|
||||
const char * const raw_name = "";
|
||||
const String name; /// Lowercased with underscores, e.g. "result_rows".
|
||||
const String keyword; /// Uppercased with spaces, e.g. "RESULT ROWS".
|
||||
const bool output_as_float = false;
|
||||
const UInt64 output_denominator = 1;
|
||||
String valueToString(QuotaValue value) const;
|
||||
QuotaValue stringToValue(const String & str) const;
|
||||
String valueToStringWithName(QuotaValue value) const;
|
||||
static const QuotaTypeInfo & get(QuotaType type);
|
||||
};
|
||||
|
||||
/// Key to share quota consumption.
|
||||
/// Users with the same key share the same amount of resource.
|
||||
enum class QuotaKeyType
|
||||
{
|
||||
NONE, /// All users share the same quota.
|
||||
USER_NAME, /// Connections with the same user name share the same quota.
|
||||
IP_ADDRESS, /// Connections from the same IP share the same quota.
|
||||
FORWARDED_IP_ADDRESS, /// Use X-Forwarded-For HTTP header instead of IP address.
|
||||
CLIENT_KEY, /// Client should explicitly supply a key to use.
|
||||
CLIENT_KEY_OR_USER_NAME, /// Same as CLIENT_KEY, but use USER_NAME if the client doesn't supply a key.
|
||||
CLIENT_KEY_OR_IP_ADDRESS, /// Same as CLIENT_KEY, but use IP_ADDRESS if the client doesn't supply a key.
|
||||
|
||||
MAX
|
||||
};
|
||||
|
||||
String toString(QuotaKeyType type);
|
||||
|
||||
struct QuotaKeyTypeInfo
|
||||
{
|
||||
const char * const raw_name;
|
||||
const String name; /// Lowercased with underscores, e.g. "client_key".
|
||||
const std::vector<QuotaKeyType> base_types; /// For combined types keeps base types, e.g. for CLIENT_KEY_OR_USER_NAME it keeps [KeyType::CLIENT_KEY, KeyAccessEntityType::USER_NAME].
|
||||
static const QuotaKeyTypeInfo & get(QuotaKeyType type);
|
||||
};
|
||||
|
||||
}
|
81
src/Access/Common/RowPolicyDefs.cpp
Normal file
81
src/Access/Common/RowPolicyDefs.cpp
Normal file
@ -0,0 +1,81 @@
|
||||
#include <Access/Common/RowPolicyDefs.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
String RowPolicyName::toString() const
|
||||
{
|
||||
String name;
|
||||
name.reserve(database.length() + table_name.length() + short_name.length() + 6);
|
||||
name += backQuoteIfNeed(short_name);
|
||||
name += " ON ";
|
||||
if (!database.empty())
|
||||
{
|
||||
name += backQuoteIfNeed(database);
|
||||
name += '.';
|
||||
}
|
||||
name += backQuoteIfNeed(table_name);
|
||||
return name;
|
||||
}
|
||||
|
||||
String toString(RowPolicyFilterType type)
|
||||
{
|
||||
return RowPolicyFilterTypeInfo::get(type).raw_name;
|
||||
}
|
||||
|
||||
const RowPolicyFilterTypeInfo & RowPolicyFilterTypeInfo::get(RowPolicyFilterType type_)
|
||||
{
|
||||
static constexpr auto make_info = [](const char * raw_name_)
|
||||
{
|
||||
String init_name = raw_name_;
|
||||
boost::to_lower(init_name);
|
||||
size_t underscore_pos = init_name.find('_');
|
||||
String init_command = init_name.substr(0, underscore_pos);
|
||||
boost::to_upper(init_command);
|
||||
bool init_is_check = (std::string_view{init_name}.substr(underscore_pos + 1) == "check");
|
||||
return RowPolicyFilterTypeInfo{raw_name_, std::move(init_name), std::move(init_command), init_is_check};
|
||||
};
|
||||
|
||||
switch (type_)
|
||||
{
|
||||
case RowPolicyFilterType::SELECT_FILTER:
|
||||
{
|
||||
static const auto info = make_info("SELECT_FILTER");
|
||||
return info;
|
||||
}
|
||||
#if 0 /// Row-level security for INSERT, UPDATE, DELETE is not implemented yet.
|
||||
case RowPolicyFilterType::INSERT_CHECK:
|
||||
{
|
||||
static const auto info = make_info("INSERT_CHECK");
|
||||
return info;
|
||||
}
|
||||
case RowPolicyFilterType::UPDATE_FILTER:
|
||||
{
|
||||
static const auto info = make_info("UPDATE_FILTER");
|
||||
return info;
|
||||
}
|
||||
case RowPolicyFilterType::UPDATE_CHECK:
|
||||
{
|
||||
static const auto info = make_info("UPDATE_CHECK");
|
||||
return info;
|
||||
}
|
||||
case RowPolicyFilterType::DELETE_FILTER:
|
||||
{
|
||||
static const auto info = make_info("DELETE_FILTER");
|
||||
return info;
|
||||
}
|
||||
#endif
|
||||
case RowPolicyFilterType::MAX: break;
|
||||
}
|
||||
throw Exception("Unknown type: " + std::to_string(static_cast<size_t>(type_)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
}
|
57
src/Access/Common/RowPolicyDefs.h
Normal file
57
src/Access/Common/RowPolicyDefs.h
Normal file
@ -0,0 +1,57 @@
|
||||
#pragma once
|
||||
|
||||
#include <Core/Types.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Represents the full name of a row policy, e.g. "myfilter ON mydb.mytable".
|
||||
struct RowPolicyName
|
||||
{
|
||||
String short_name;
|
||||
String database;
|
||||
String table_name;
|
||||
|
||||
bool empty() const { return short_name.empty(); }
|
||||
String toString() const;
|
||||
auto toTuple() const { return std::tie(short_name, database, table_name); }
|
||||
friend bool operator ==(const RowPolicyName & left, const RowPolicyName & right) { return left.toTuple() == right.toTuple(); }
|
||||
friend bool operator !=(const RowPolicyName & left, const RowPolicyName & right) { return left.toTuple() != right.toTuple(); }
|
||||
};
|
||||
|
||||
|
||||
/// Types of the filters of row policies.
|
||||
/// Currently only RowPolicyFilterType::SELECT is supported.
|
||||
enum class RowPolicyFilterType
|
||||
{
|
||||
/// Filter is a SQL conditional expression used to figure out which rows should be visible
|
||||
/// for user or available for modification. If the expression returns NULL or false for some rows
|
||||
/// those rows are silently suppressed.
|
||||
SELECT_FILTER,
|
||||
|
||||
#if 0 /// Row-level security for INSERT, UPDATE, DELETE is not implemented yet.
|
||||
/// Check is a SQL condition expression used to check whether a row can be written into
|
||||
/// the table. If the expression returns NULL or false an exception is thrown.
|
||||
/// If a conditional expression here is empty it means no filtering is applied.
|
||||
INSERT_CHECK,
|
||||
UPDATE_FILTER,
|
||||
UPDATE_CHECK,
|
||||
DELETE_FILTER,
|
||||
#endif
|
||||
|
||||
MAX
|
||||
};
|
||||
|
||||
String toString(RowPolicyFilterType type);
|
||||
|
||||
struct RowPolicyFilterTypeInfo
|
||||
{
|
||||
const char * const raw_name;
|
||||
const String name; /// Lowercased with underscores, e.g. "select_filter".
|
||||
const String command; /// Uppercased without last word, e.g. "SELECT".
|
||||
const bool is_check; /// E.g. false for SELECT_FILTER.
|
||||
static const RowPolicyFilterTypeInfo & get(RowPolicyFilterType type);
|
||||
};
|
||||
|
||||
}
|
@ -269,11 +269,11 @@ std::shared_ptr<const EnabledRowPolicies> ContextAccess::getEnabledRowPolicies()
|
||||
return no_row_policies;
|
||||
}
|
||||
|
||||
ASTPtr ContextAccess::getRowPolicyCondition(const String & database, const String & table_name, RowPolicy::ConditionType index, const ASTPtr & extra_condition) const
|
||||
ASTPtr ContextAccess::getRowPolicyFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type, const ASTPtr & combine_with_expr) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
if (enabled_row_policies)
|
||||
return enabled_row_policies->getCondition(database, table_name, index, extra_condition);
|
||||
return enabled_row_policies->getFilter(database, table_name, filter_type, combine_with_expr);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -1,13 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#include <Access/AccessRights.h>
|
||||
#include <Access/RowPolicy.h>
|
||||
#include <Access/Common/RowPolicyDefs.h>
|
||||
#include <Interpreters/ClientInfo.h>
|
||||
#include <Core/UUID.h>
|
||||
#include <base/scope_guard.h>
|
||||
#include <base/shared_ptr_helper.h>
|
||||
#include <boost/container/flat_set.hpp>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <unordered_map>
|
||||
|
||||
|
||||
@ -81,7 +82,7 @@ public:
|
||||
|
||||
/// Returns the row policy filter for a specified table.
|
||||
/// The function returns nullptr if there is no filter to apply.
|
||||
ASTPtr getRowPolicyCondition(const String & database, const String & table_name, RowPolicy::ConditionType index, const ASTPtr & extra_condition = nullptr) const;
|
||||
ASTPtr getRowPolicyFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type, const ASTPtr & combine_with_expr = nullptr) const;
|
||||
|
||||
/// Returns the quota to track resource consumption.
|
||||
std::shared_ptr<const EnabledQuota> getQuota() const;
|
||||
|
@ -1,23 +1,16 @@
|
||||
#include <Access/DiskAccessStorage.h>
|
||||
#include <Access/AccessEntityIO.h>
|
||||
#include <Access/User.h>
|
||||
#include <Access/Role.h>
|
||||
#include <Access/RowPolicy.h>
|
||||
#include <Access/Quota.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <Interpreters/Access/InterpreterCreateUserQuery.h>
|
||||
#include <Interpreters/Access/InterpreterShowGrantsQuery.h>
|
||||
#include <Parsers/Access/ASTCreateUserQuery.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <base/logger_useful.h>
|
||||
#include <Poco/JSON/JSON.h>
|
||||
#include <Poco/JSON/Object.h>
|
||||
#include <Poco/JSON/Stringifier.h>
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <filesystem>
|
||||
#include <fstream>
|
||||
@ -34,10 +27,6 @@ namespace ErrorCodes
|
||||
|
||||
namespace
|
||||
{
|
||||
using EntityType = IAccessStorage::EntityType;
|
||||
using EntityTypeInfo = IAccessStorage::EntityTypeInfo;
|
||||
|
||||
|
||||
/// Reads a file containing ATTACH queries and then parses it to build an access entity.
|
||||
AccessEntityPtr readEntityFile(const String & file_path)
|
||||
{
|
||||
@ -144,9 +133,9 @@ namespace
|
||||
|
||||
|
||||
/// Calculates the path for storing a map of name of access entity to UUID for access entities of some type.
|
||||
String getListFilePath(const String & directory_path, EntityType type)
|
||||
String getListFilePath(const String & directory_path, AccessEntityType type)
|
||||
{
|
||||
String file_name = EntityTypeInfo::get(type).plural_raw_name;
|
||||
String file_name = AccessEntityTypeInfo::get(type).plural_raw_name;
|
||||
boost::to_lower(file_name);
|
||||
return directory_path + file_name + ".list";
|
||||
}
|
||||
@ -238,7 +227,7 @@ bool DiskAccessStorage::isPathEqual(const String & directory_path_) const
|
||||
void DiskAccessStorage::clear()
|
||||
{
|
||||
entries_by_id.clear();
|
||||
for (auto type : collections::range(EntityType::MAX))
|
||||
for (auto type : collections::range(AccessEntityType::MAX))
|
||||
entries_by_name_and_type[static_cast<size_t>(type)].clear();
|
||||
}
|
||||
|
||||
@ -248,7 +237,7 @@ bool DiskAccessStorage::readLists()
|
||||
clear();
|
||||
|
||||
bool ok = true;
|
||||
for (auto type : collections::range(EntityType::MAX))
|
||||
for (auto type : collections::range(AccessEntityType::MAX))
|
||||
{
|
||||
auto & entries_by_name = entries_by_name_and_type[static_cast<size_t>(type)];
|
||||
auto file_path = getListFilePath(directory_path, type);
|
||||
@ -321,7 +310,7 @@ bool DiskAccessStorage::writeLists()
|
||||
}
|
||||
|
||||
|
||||
void DiskAccessStorage::scheduleWriteLists(EntityType type)
|
||||
void DiskAccessStorage::scheduleWriteLists(AccessEntityType type)
|
||||
{
|
||||
if (failed_to_write_lists)
|
||||
return; /// We don't try to write list files after the first fail.
|
||||
@ -407,14 +396,14 @@ bool DiskAccessStorage::rebuildLists()
|
||||
entries_by_name[entry.name] = &entry;
|
||||
}
|
||||
|
||||
for (auto type : collections::range(EntityType::MAX))
|
||||
for (auto type : collections::range(AccessEntityType::MAX))
|
||||
types_of_lists_to_write.insert(type);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
std::optional<UUID> DiskAccessStorage::findImpl(EntityType type, const String & name) const
|
||||
std::optional<UUID> DiskAccessStorage::findImpl(AccessEntityType type, const String & name) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
const auto & entries_by_name = entries_by_name_and_type[static_cast<size_t>(type)];
|
||||
@ -426,7 +415,7 @@ std::optional<UUID> DiskAccessStorage::findImpl(EntityType type, const String &
|
||||
}
|
||||
|
||||
|
||||
std::vector<UUID> DiskAccessStorage::findAllImpl(EntityType type) const
|
||||
std::vector<UUID> DiskAccessStorage::findAllImpl(AccessEntityType type) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
const auto & entries_by_name = entries_by_name_and_type[static_cast<size_t>(type)];
|
||||
@ -489,7 +478,7 @@ UUID DiskAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool repl
|
||||
void DiskAccessStorage::insertNoLock(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, Notifications & notifications)
|
||||
{
|
||||
const String & name = new_entity->getName();
|
||||
EntityType type = new_entity->getType();
|
||||
AccessEntityType type = new_entity->getType();
|
||||
|
||||
if (readonly)
|
||||
throwReadonlyCannotInsert(type, name);
|
||||
@ -543,7 +532,7 @@ void DiskAccessStorage::removeNoLock(const UUID & id, Notifications & notificati
|
||||
throwNotFound(id);
|
||||
|
||||
Entry & entry = it->second;
|
||||
EntityType type = entry.type;
|
||||
AccessEntityType type = entry.type;
|
||||
|
||||
if (readonly)
|
||||
throwReadonlyCannotRemove(type, entry.name);
|
||||
@ -591,7 +580,7 @@ void DiskAccessStorage::updateNoLock(const UUID & id, const UpdateFunc & update_
|
||||
|
||||
const String & new_name = new_entity->getName();
|
||||
const String & old_name = old_entity->getName();
|
||||
const EntityType type = entry.type;
|
||||
const AccessEntityType type = entry.type;
|
||||
auto & entries_by_name = entries_by_name_and_type[static_cast<size_t>(type)];
|
||||
|
||||
bool name_changed = (new_name != old_name);
|
||||
@ -671,7 +660,7 @@ scope_guard DiskAccessStorage::subscribeForChangesImpl(const UUID & id, const On
|
||||
};
|
||||
}
|
||||
|
||||
scope_guard DiskAccessStorage::subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const
|
||||
scope_guard DiskAccessStorage::subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto & handlers = handlers_by_type[static_cast<size_t>(type)];
|
||||
@ -698,7 +687,7 @@ bool DiskAccessStorage::hasSubscriptionImpl(const UUID & id) const
|
||||
return false;
|
||||
}
|
||||
|
||||
bool DiskAccessStorage::hasSubscriptionImpl(EntityType type) const
|
||||
bool DiskAccessStorage::hasSubscriptionImpl(AccessEntityType type) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
const auto & handlers = handlers_by_type[static_cast<size_t>(type)];
|
||||
|
@ -27,8 +27,8 @@ public:
|
||||
bool isReadOnly() const { return readonly; }
|
||||
|
||||
private:
|
||||
std::optional<UUID> findImpl(EntityType type, const String & name) const override;
|
||||
std::vector<UUID> findAllImpl(EntityType type) const override;
|
||||
std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
|
||||
std::vector<UUID> findAllImpl(AccessEntityType type) const override;
|
||||
bool existsImpl(const UUID & id) const override;
|
||||
AccessEntityPtr readImpl(const UUID & id) const override;
|
||||
String readNameImpl(const UUID & id) const override;
|
||||
@ -37,14 +37,14 @@ private:
|
||||
void removeImpl(const UUID & id) override;
|
||||
void updateImpl(const UUID & id, const UpdateFunc & update_func) override;
|
||||
scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override;
|
||||
scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const override;
|
||||
scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const override;
|
||||
bool hasSubscriptionImpl(const UUID & id) const override;
|
||||
bool hasSubscriptionImpl(EntityType type) const override;
|
||||
bool hasSubscriptionImpl(AccessEntityType type) const override;
|
||||
|
||||
void clear();
|
||||
bool readLists();
|
||||
bool writeLists();
|
||||
void scheduleWriteLists(EntityType type);
|
||||
void scheduleWriteLists(AccessEntityType type);
|
||||
bool rebuildLists();
|
||||
|
||||
void listsWritingThreadFunc();
|
||||
@ -63,7 +63,7 @@ private:
|
||||
{
|
||||
UUID id;
|
||||
String name;
|
||||
EntityType type;
|
||||
AccessEntityType type;
|
||||
mutable AccessEntityPtr entity; /// may be nullptr, if the entity hasn't been loaded yet.
|
||||
mutable std::list<OnChangedHandler> handlers_by_id;
|
||||
};
|
||||
@ -73,13 +73,13 @@ private:
|
||||
String directory_path;
|
||||
std::atomic<bool> readonly;
|
||||
std::unordered_map<UUID, Entry> entries_by_id;
|
||||
std::unordered_map<std::string_view, Entry *> entries_by_name_and_type[static_cast<size_t>(EntityType::MAX)];
|
||||
boost::container::flat_set<EntityType> types_of_lists_to_write;
|
||||
std::unordered_map<std::string_view, Entry *> entries_by_name_and_type[static_cast<size_t>(AccessEntityType::MAX)];
|
||||
boost::container::flat_set<AccessEntityType> types_of_lists_to_write;
|
||||
bool failed_to_write_lists = false; /// Whether writing of the list files has been failed since the recent restart of the server.
|
||||
ThreadFromGlobalPool lists_writing_thread; /// List files are written in a separate thread.
|
||||
std::condition_variable lists_writing_thread_should_exit; /// Signals `lists_writing_thread` to exit.
|
||||
bool lists_writing_thread_is_waiting = false;
|
||||
mutable std::list<OnChangedHandler> handlers_by_type[static_cast<size_t>(EntityType::MAX)];
|
||||
mutable std::list<OnChangedHandler> handlers_by_type[static_cast<size_t>(AccessEntityType::MAX)];
|
||||
mutable std::mutex mutex;
|
||||
};
|
||||
}
|
||||
|
@ -20,16 +20,16 @@ struct EnabledQuota::Impl
|
||||
[[noreturn]] static void throwQuotaExceed(
|
||||
const String & user_name,
|
||||
const String & quota_name,
|
||||
ResourceType resource_type,
|
||||
ResourceAmount used,
|
||||
ResourceAmount max,
|
||||
QuotaType quota_type,
|
||||
QuotaValue used,
|
||||
QuotaValue max,
|
||||
std::chrono::seconds duration,
|
||||
std::chrono::system_clock::time_point end_of_interval)
|
||||
{
|
||||
const auto & type_info = Quota::ResourceTypeInfo::get(resource_type);
|
||||
const auto & type_info = QuotaTypeInfo::get(quota_type);
|
||||
throw Exception(
|
||||
"Quota for user " + backQuote(user_name) + " for " + to_string(duration) + " has been exceeded: "
|
||||
+ type_info.outputWithAmount(used) + "/" + type_info.amountToString(max) + ". "
|
||||
+ type_info.valueToStringWithName(used) + "/" + type_info.valueToString(max) + ". "
|
||||
+ "Interval will end at " + to_string(end_of_interval) + ". " + "Name of quota template: " + backQuote(quota_name),
|
||||
ErrorCodes::QUOTA_EXPIRED);
|
||||
}
|
||||
@ -52,9 +52,7 @@ struct EnabledQuota::Impl
|
||||
return end;
|
||||
}
|
||||
|
||||
/// We reset counters only if the interval's end has been calculated before.
|
||||
/// If it hasn't we just calculate the interval's end for the first time and don't reset counters yet.
|
||||
bool need_reset_counters = (end_loaded.count() != 0);
|
||||
bool need_reset_counters = false;
|
||||
|
||||
do
|
||||
{
|
||||
@ -66,7 +64,12 @@ struct EnabledQuota::Impl
|
||||
UInt64 n = static_cast<UInt64>((current_time - end + duration) / duration);
|
||||
end = end + duration * n;
|
||||
if (end_of_interval.compare_exchange_strong(end_loaded, end.time_since_epoch()))
|
||||
{
|
||||
/// We reset counters only if the interval's end has been calculated before.
|
||||
/// If it hasn't we just calculate the interval's end for the first time and don't reset counters yet.
|
||||
need_reset_counters = (end_loaded.count() != 0);
|
||||
break;
|
||||
}
|
||||
end = std::chrono::system_clock::time_point{end_loaded};
|
||||
}
|
||||
while (current_time >= end);
|
||||
@ -83,15 +86,16 @@ struct EnabledQuota::Impl
|
||||
static void used(
|
||||
const String & user_name,
|
||||
const Intervals & intervals,
|
||||
ResourceType resource_type,
|
||||
ResourceAmount amount,
|
||||
QuotaType quota_type,
|
||||
QuotaValue value,
|
||||
std::chrono::system_clock::time_point current_time,
|
||||
bool check_exceeded)
|
||||
{
|
||||
for (const auto & interval : intervals.intervals)
|
||||
{
|
||||
ResourceAmount used = (interval.used[resource_type] += amount);
|
||||
ResourceAmount max = interval.max[resource_type];
|
||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||
QuotaValue used = (interval.used[quota_type_i] += value);
|
||||
QuotaValue max = interval.max[quota_type_i];
|
||||
if (!max)
|
||||
continue;
|
||||
if (used > max)
|
||||
@ -100,12 +104,12 @@ struct EnabledQuota::Impl
|
||||
auto end_of_interval = getEndOfInterval(interval, current_time, counters_were_reset);
|
||||
if (counters_were_reset)
|
||||
{
|
||||
used = (interval.used[resource_type] += amount);
|
||||
used = (interval.used[quota_type_i] += value);
|
||||
if ((used > max) && check_exceeded)
|
||||
throwQuotaExceed(user_name, intervals.quota_name, resource_type, used, max, interval.duration, end_of_interval);
|
||||
throwQuotaExceed(user_name, intervals.quota_name, quota_type, used, max, interval.duration, end_of_interval);
|
||||
}
|
||||
else if (check_exceeded)
|
||||
throwQuotaExceed(user_name, intervals.quota_name, resource_type, used, max, interval.duration, end_of_interval);
|
||||
throwQuotaExceed(user_name, intervals.quota_name, quota_type, used, max, interval.duration, end_of_interval);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -113,13 +117,14 @@ struct EnabledQuota::Impl
|
||||
static void checkExceeded(
|
||||
const String & user_name,
|
||||
const Intervals & intervals,
|
||||
ResourceType resource_type,
|
||||
QuotaType quota_type,
|
||||
std::chrono::system_clock::time_point current_time)
|
||||
{
|
||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||
for (const auto & interval : intervals.intervals)
|
||||
{
|
||||
ResourceAmount used = interval.used[resource_type];
|
||||
ResourceAmount max = interval.max[resource_type];
|
||||
QuotaValue used = interval.used[quota_type_i];
|
||||
QuotaValue max = interval.max[quota_type_i];
|
||||
if (!max)
|
||||
continue;
|
||||
if (used > max)
|
||||
@ -127,7 +132,7 @@ struct EnabledQuota::Impl
|
||||
bool counters_were_reset = false;
|
||||
std::chrono::system_clock::time_point end_of_interval = getEndOfInterval(interval, current_time, counters_were_reset);
|
||||
if (!counters_were_reset)
|
||||
throwQuotaExceed(user_name, intervals.quota_name, resource_type, used, max, interval.duration, end_of_interval);
|
||||
throwQuotaExceed(user_name, intervals.quota_name, quota_type, used, max, interval.duration, end_of_interval);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -137,18 +142,19 @@ struct EnabledQuota::Impl
|
||||
const Intervals & intervals,
|
||||
std::chrono::system_clock::time_point current_time)
|
||||
{
|
||||
for (auto resource_type : collections::range(Quota::MAX_RESOURCE_TYPE))
|
||||
checkExceeded(user_name, intervals, resource_type, current_time);
|
||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||
checkExceeded(user_name, intervals, quota_type, current_time);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
EnabledQuota::Interval::Interval()
|
||||
{
|
||||
for (auto resource_type : collections::range(MAX_RESOURCE_TYPE))
|
||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||
{
|
||||
used[resource_type].store(0);
|
||||
max[resource_type] = 0;
|
||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||
used[quota_type_i].store(0);
|
||||
max[quota_type_i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -161,10 +167,11 @@ EnabledQuota::Interval & EnabledQuota::Interval::operator =(const Interval & src
|
||||
randomize_interval = src.randomize_interval;
|
||||
duration = src.duration;
|
||||
end_of_interval.store(src.end_of_interval.load());
|
||||
for (auto resource_type : collections::range(MAX_RESOURCE_TYPE))
|
||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||
{
|
||||
max[resource_type] = src.max[resource_type];
|
||||
used[resource_type].store(src.used[resource_type].load());
|
||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||
max[quota_type_i] = src.max[quota_type_i];
|
||||
used[quota_type_i].store(src.used[quota_type_i].load());
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
@ -187,11 +194,12 @@ std::optional<QuotaUsage> EnabledQuota::Intervals::getUsage(std::chrono::system_
|
||||
out.randomize_interval = in.randomize_interval;
|
||||
bool counters_were_reset = false;
|
||||
out.end_of_interval = Impl::getEndOfInterval(in, current_time, counters_were_reset);
|
||||
for (auto resource_type : collections::range(MAX_RESOURCE_TYPE))
|
||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||
{
|
||||
if (in.max[resource_type])
|
||||
out.max[resource_type] = in.max[resource_type];
|
||||
out.used[resource_type] = in.used[resource_type];
|
||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||
if (in.max[quota_type_i])
|
||||
out.max[quota_type_i] = in.max[quota_type_i];
|
||||
out.used[quota_type_i] = in.used[quota_type_i];
|
||||
}
|
||||
}
|
||||
return usage;
|
||||
@ -205,45 +213,45 @@ EnabledQuota::EnabledQuota(const Params & params_) : params(params_)
|
||||
EnabledQuota::~EnabledQuota() = default;
|
||||
|
||||
|
||||
void EnabledQuota::used(ResourceType resource_type, ResourceAmount amount, bool check_exceeded) const
|
||||
void EnabledQuota::used(QuotaType quota_type, QuotaValue value, bool check_exceeded) const
|
||||
{
|
||||
used({resource_type, amount}, check_exceeded);
|
||||
used({quota_type, value}, check_exceeded);
|
||||
}
|
||||
|
||||
|
||||
void EnabledQuota::used(const std::pair<ResourceType, ResourceAmount> & resource, bool check_exceeded) const
|
||||
void EnabledQuota::used(const std::pair<QuotaType, QuotaValue> & usage1, bool check_exceeded) const
|
||||
{
|
||||
auto loaded = intervals.load();
|
||||
auto current_time = std::chrono::system_clock::now();
|
||||
Impl::used(getUserName(), *loaded, resource.first, resource.second, current_time, check_exceeded);
|
||||
Impl::used(getUserName(), *loaded, usage1.first, usage1.second, current_time, check_exceeded);
|
||||
}
|
||||
|
||||
|
||||
void EnabledQuota::used(const std::pair<ResourceType, ResourceAmount> & resource1, const std::pair<ResourceType, ResourceAmount> & resource2, bool check_exceeded) const
|
||||
void EnabledQuota::used(const std::pair<QuotaType, QuotaValue> & usage1, const std::pair<QuotaType, QuotaValue> & usage2, bool check_exceeded) const
|
||||
{
|
||||
auto loaded = intervals.load();
|
||||
auto current_time = std::chrono::system_clock::now();
|
||||
Impl::used(getUserName(), *loaded, resource1.first, resource1.second, current_time, check_exceeded);
|
||||
Impl::used(getUserName(), *loaded, resource2.first, resource2.second, current_time, check_exceeded);
|
||||
Impl::used(getUserName(), *loaded, usage1.first, usage1.second, current_time, check_exceeded);
|
||||
Impl::used(getUserName(), *loaded, usage2.first, usage2.second, current_time, check_exceeded);
|
||||
}
|
||||
|
||||
|
||||
void EnabledQuota::used(const std::pair<ResourceType, ResourceAmount> & resource1, const std::pair<ResourceType, ResourceAmount> & resource2, const std::pair<ResourceType, ResourceAmount> & resource3, bool check_exceeded) const
|
||||
void EnabledQuota::used(const std::pair<QuotaType, QuotaValue> & usage1, const std::pair<QuotaType, QuotaValue> & usage2, const std::pair<QuotaType, QuotaValue> & usage3, bool check_exceeded) const
|
||||
{
|
||||
auto loaded = intervals.load();
|
||||
auto current_time = std::chrono::system_clock::now();
|
||||
Impl::used(getUserName(), *loaded, resource1.first, resource1.second, current_time, check_exceeded);
|
||||
Impl::used(getUserName(), *loaded, resource2.first, resource2.second, current_time, check_exceeded);
|
||||
Impl::used(getUserName(), *loaded, resource3.first, resource3.second, current_time, check_exceeded);
|
||||
Impl::used(getUserName(), *loaded, usage1.first, usage1.second, current_time, check_exceeded);
|
||||
Impl::used(getUserName(), *loaded, usage2.first, usage2.second, current_time, check_exceeded);
|
||||
Impl::used(getUserName(), *loaded, usage3.first, usage3.second, current_time, check_exceeded);
|
||||
}
|
||||
|
||||
|
||||
void EnabledQuota::used(const std::vector<std::pair<ResourceType, ResourceAmount>> & resources, bool check_exceeded) const
|
||||
void EnabledQuota::used(const std::vector<std::pair<QuotaType, QuotaValue>> & usages, bool check_exceeded) const
|
||||
{
|
||||
auto loaded = intervals.load();
|
||||
auto current_time = std::chrono::system_clock::now();
|
||||
for (const auto & resource : resources)
|
||||
Impl::used(getUserName(), *loaded, resource.first, resource.second, current_time, check_exceeded);
|
||||
for (const auto & usage : usages)
|
||||
Impl::used(getUserName(), *loaded, usage.first, usage.second, current_time, check_exceeded);
|
||||
}
|
||||
|
||||
|
||||
@ -254,10 +262,10 @@ void EnabledQuota::checkExceeded() const
|
||||
}
|
||||
|
||||
|
||||
void EnabledQuota::checkExceeded(ResourceType resource_type) const
|
||||
void EnabledQuota::checkExceeded(QuotaType quota_type) const
|
||||
{
|
||||
auto loaded = intervals.load();
|
||||
Impl::checkExceeded(getUserName(), *loaded, resource_type, std::chrono::system_clock::now());
|
||||
Impl::checkExceeded(getUserName(), *loaded, quota_type, std::chrono::system_clock::now());
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,13 +1,15 @@
|
||||
#pragma once
|
||||
|
||||
#include <Access/Quota.h>
|
||||
#include <Access/Common/QuotaDefs.h>
|
||||
#include <Core/UUID.h>
|
||||
#include <Poco/Net/IPAddress.h>
|
||||
#include <boost/container/flat_set.hpp>
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <boost/smart_ptr/atomic_shared_ptr.hpp>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -37,21 +39,18 @@ public:
|
||||
friend bool operator >=(const Params & lhs, const Params & rhs) { return !(lhs < rhs); }
|
||||
};
|
||||
|
||||
using ResourceType = Quota::ResourceType;
|
||||
using ResourceAmount = Quota::ResourceAmount;
|
||||
|
||||
~EnabledQuota();
|
||||
|
||||
/// Tracks resource consumption. If the quota exceeded and `check_exceeded == true`, throws an exception.
|
||||
void used(ResourceType resource_type, ResourceAmount amount, bool check_exceeded = true) const;
|
||||
void used(const std::pair<ResourceType, ResourceAmount> & resource, bool check_exceeded = true) const;
|
||||
void used(const std::pair<ResourceType, ResourceAmount> & resource1, const std::pair<ResourceType, ResourceAmount> & resource2, bool check_exceeded = true) const;
|
||||
void used(const std::pair<ResourceType, ResourceAmount> & resource1, const std::pair<ResourceType, ResourceAmount> & resource2, const std::pair<ResourceType, ResourceAmount> & resource3, bool check_exceeded = true) const;
|
||||
void used(const std::vector<std::pair<ResourceType, ResourceAmount>> & resources, bool check_exceeded = true) const;
|
||||
void used(QuotaType quota_type, QuotaValue value, bool check_exceeded = true) const;
|
||||
void used(const std::pair<QuotaType, QuotaValue> & usage1, bool check_exceeded = true) const;
|
||||
void used(const std::pair<QuotaType, QuotaValue> & usage1, const std::pair<QuotaType, QuotaValue> & usage2, bool check_exceeded = true) const;
|
||||
void used(const std::pair<QuotaType, QuotaValue> & usage1, const std::pair<QuotaType, QuotaValue> & usage2, const std::pair<QuotaType, QuotaValue> & usage3, bool check_exceeded = true) const;
|
||||
void used(const std::vector<std::pair<QuotaType, QuotaValue>> & usages, bool check_exceeded = true) const;
|
||||
|
||||
/// Checks if the quota exceeded. If so, throws an exception.
|
||||
void checkExceeded() const;
|
||||
void checkExceeded(ResourceType resource_type) const;
|
||||
void checkExceeded(QuotaType quota_type) const;
|
||||
|
||||
/// Returns the information about quota consumption.
|
||||
std::optional<QuotaUsage> getUsage() const;
|
||||
@ -66,12 +65,10 @@ private:
|
||||
|
||||
const String & getUserName() const { return params.user_name; }
|
||||
|
||||
static constexpr auto MAX_RESOURCE_TYPE = Quota::MAX_RESOURCE_TYPE;
|
||||
|
||||
struct Interval
|
||||
{
|
||||
mutable std::atomic<ResourceAmount> used[MAX_RESOURCE_TYPE];
|
||||
ResourceAmount max[MAX_RESOURCE_TYPE];
|
||||
mutable std::atomic<QuotaValue> used[static_cast<size_t>(QuotaType::MAX)];
|
||||
QuotaValue max[static_cast<size_t>(QuotaType::MAX)];
|
||||
std::chrono::seconds duration = std::chrono::seconds::zero();
|
||||
bool randomize_interval = false;
|
||||
mutable std::atomic<std::chrono::system_clock::duration> end_of_interval;
|
||||
|
@ -6,9 +6,9 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
size_t EnabledRowPolicies::Hash::operator()(const MixedConditionKey & key) const
|
||||
size_t EnabledRowPolicies::Hash::operator()(const MixedFiltersKey & key) const
|
||||
{
|
||||
return std::hash<std::string_view>{}(key.database) - std::hash<std::string_view>{}(key.table_name) + static_cast<size_t>(key.condition_type);
|
||||
return std::hash<std::string_view>{}(key.database) - std::hash<std::string_view>{}(key.table_name) + static_cast<size_t>(key.filter_type);
|
||||
}
|
||||
|
||||
|
||||
@ -23,36 +23,36 @@ EnabledRowPolicies::EnabledRowPolicies(const Params & params_) : params(params_)
|
||||
EnabledRowPolicies::~EnabledRowPolicies() = default;
|
||||
|
||||
|
||||
ASTPtr EnabledRowPolicies::getCondition(const String & database, const String & table_name, ConditionType condition_type) const
|
||||
ASTPtr EnabledRowPolicies::getFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type) const
|
||||
{
|
||||
/// We don't lock `mutex` here.
|
||||
auto loaded = map_of_mixed_conditions.load();
|
||||
auto it = loaded->find({database, table_name, condition_type});
|
||||
auto loaded = mixed_filters.load();
|
||||
auto it = loaded->find({database, table_name, filter_type});
|
||||
if (it == loaded->end())
|
||||
return {};
|
||||
|
||||
auto condition = it->second.ast;
|
||||
auto filter = it->second.ast;
|
||||
|
||||
bool value;
|
||||
if (tryGetLiteralBool(condition.get(), value) && value)
|
||||
if (tryGetLiteralBool(filter.get(), value) && value)
|
||||
return nullptr; /// The condition is always true, no need to check it.
|
||||
|
||||
return condition;
|
||||
return filter;
|
||||
}
|
||||
|
||||
ASTPtr EnabledRowPolicies::getCondition(const String & database, const String & table_name, ConditionType type, const ASTPtr & extra_condition) const
|
||||
ASTPtr EnabledRowPolicies::getFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type, const ASTPtr & combine_with_expr) const
|
||||
{
|
||||
ASTPtr condition = getCondition(database, table_name, type);
|
||||
if (condition && extra_condition)
|
||||
condition = makeASTForLogicalAnd({condition, extra_condition});
|
||||
else if (!condition)
|
||||
condition = extra_condition;
|
||||
ASTPtr filter = getFilter(database, table_name, filter_type);
|
||||
if (filter && combine_with_expr)
|
||||
filter = makeASTForLogicalAnd({filter, combine_with_expr});
|
||||
else if (!filter)
|
||||
filter = combine_with_expr;
|
||||
|
||||
bool value;
|
||||
if (tryGetLiteralBool(condition.get(), value) && value)
|
||||
if (tryGetLiteralBool(filter.get(), value) && value)
|
||||
return nullptr; /// The condition is always true, no need to check it.
|
||||
|
||||
return condition;
|
||||
return filter;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,8 +1,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <Access/RowPolicy.h>
|
||||
#include <Access/Common/RowPolicyDefs.h>
|
||||
#include <base/types.h>
|
||||
#include <Core/UUID.h>
|
||||
#include <boost/container/flat_set.hpp>
|
||||
#include <boost/smart_ptr/atomic_shared_ptr.hpp>
|
||||
#include <unordered_map>
|
||||
#include <memory>
|
||||
@ -35,43 +36,42 @@ public:
|
||||
EnabledRowPolicies();
|
||||
~EnabledRowPolicies();
|
||||
|
||||
using ConditionType = RowPolicy::ConditionType;
|
||||
|
||||
/// Returns prepared filter for a specific table and operations.
|
||||
/// The function can return nullptr, that means there is no filters applied.
|
||||
/// The returned filter can be a combination of the filters defined by multiple row policies.
|
||||
ASTPtr getCondition(const String & database, const String & table_name, ConditionType type) const;
|
||||
ASTPtr getCondition(const String & database, const String & table_name, ConditionType type, const ASTPtr & extra_condition) const;
|
||||
ASTPtr getFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type) const;
|
||||
ASTPtr getFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type, const ASTPtr & combine_with_expr) const;
|
||||
|
||||
private:
|
||||
friend class RowPolicyCache;
|
||||
EnabledRowPolicies(const Params & params_);
|
||||
|
||||
struct MixedConditionKey
|
||||
struct MixedFiltersKey
|
||||
{
|
||||
std::string_view database;
|
||||
std::string_view table_name;
|
||||
ConditionType condition_type;
|
||||
RowPolicyFilterType filter_type;
|
||||
|
||||
auto toTuple() const { return std::tie(database, table_name, condition_type); }
|
||||
friend bool operator==(const MixedConditionKey & left, const MixedConditionKey & right) { return left.toTuple() == right.toTuple(); }
|
||||
friend bool operator!=(const MixedConditionKey & left, const MixedConditionKey & right) { return left.toTuple() != right.toTuple(); }
|
||||
auto toTuple() const { return std::tie(database, table_name, filter_type); }
|
||||
friend bool operator==(const MixedFiltersKey & left, const MixedFiltersKey & right) { return left.toTuple() == right.toTuple(); }
|
||||
friend bool operator!=(const MixedFiltersKey & left, const MixedFiltersKey & right) { return left.toTuple() != right.toTuple(); }
|
||||
};
|
||||
|
||||
struct Hash
|
||||
{
|
||||
size_t operator()(const MixedConditionKey & key) const;
|
||||
};
|
||||
|
||||
struct MixedCondition
|
||||
struct MixedFiltersResult
|
||||
{
|
||||
ASTPtr ast;
|
||||
std::shared_ptr<const std::pair<String, String>> database_and_table_name;
|
||||
};
|
||||
using MapOfMixedConditions = std::unordered_map<MixedConditionKey, MixedCondition, Hash>;
|
||||
|
||||
struct Hash
|
||||
{
|
||||
size_t operator()(const MixedFiltersKey & key) const;
|
||||
};
|
||||
|
||||
using MixedFiltersMap = std::unordered_map<MixedFiltersKey, MixedFiltersResult, Hash>;
|
||||
|
||||
const Params params;
|
||||
mutable boost::atomic_shared_ptr<const MapOfMixedConditions> map_of_mixed_conditions;
|
||||
mutable boost::atomic_shared_ptr<const MixedFiltersMap> mixed_filters;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1,24 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/types.h>
|
||||
#include <Access/Common/AccessEntityType.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <base/types.h>
|
||||
#include <memory>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNKNOWN_USER;
|
||||
extern const int UNKNOWN_ROLE;
|
||||
extern const int UNKNOWN_ROW_POLICY;
|
||||
extern const int UNKNOWN_QUOTA;
|
||||
extern const int THERE_IS_NO_PROFILE;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
/// Access entity is a set of data which have a name and a type. Access entity control something related to the access control.
|
||||
/// Entities can be stored to a file or another storage, see IAccessStorage.
|
||||
@ -29,41 +18,14 @@ struct IAccessEntity
|
||||
virtual ~IAccessEntity() = default;
|
||||
virtual std::shared_ptr<IAccessEntity> clone() const = 0;
|
||||
|
||||
enum class Type
|
||||
{
|
||||
USER,
|
||||
ROLE,
|
||||
SETTINGS_PROFILE,
|
||||
ROW_POLICY,
|
||||
QUOTA,
|
||||
virtual AccessEntityType getType() const = 0;
|
||||
|
||||
MAX,
|
||||
};
|
||||
|
||||
virtual Type getType() const = 0;
|
||||
|
||||
struct TypeInfo
|
||||
{
|
||||
const char * const raw_name;
|
||||
const char * const plural_raw_name;
|
||||
const String name; /// Uppercased with spaces instead of underscores, e.g. "SETTINGS PROFILE".
|
||||
const String alias; /// Alias of the keyword or empty string, e.g. "PROFILE".
|
||||
const String plural_name; /// Uppercased with spaces plural name, e.g. "SETTINGS PROFILES".
|
||||
const String plural_alias; /// Uppercased with spaces plural name alias, e.g. "PROFILES".
|
||||
const String name_for_output_with_entity_name; /// Lowercased with spaces instead of underscores, e.g. "settings profile".
|
||||
const char unique_char; /// Unique character for this type. E.g. 'P' for SETTINGS_PROFILE.
|
||||
const int not_found_error_code;
|
||||
|
||||
static const TypeInfo & get(Type type_);
|
||||
String outputWithEntityName(const String & entity_name) const;
|
||||
};
|
||||
|
||||
const TypeInfo & getTypeInfo() const { return TypeInfo::get(getType()); }
|
||||
String outputTypeAndName() const { return getTypeInfo().outputWithEntityName(getName()); }
|
||||
const AccessEntityTypeInfo & getTypeInfo() const { return AccessEntityTypeInfo::get(getType()); }
|
||||
String formatTypeWithName() const { return getTypeInfo().formatEntityNameWithType(getName()); }
|
||||
|
||||
template <typename EntityClassT>
|
||||
bool isTypeOf() const { return isTypeOf(EntityClassT::TYPE); }
|
||||
bool isTypeOf(Type type) const { return type == getType(); }
|
||||
bool isTypeOf(AccessEntityType type) const { return type == getType(); }
|
||||
|
||||
virtual void setName(const String & name_) { name = name_; }
|
||||
const String & getName() const { return name; }
|
||||
@ -98,70 +60,4 @@ protected:
|
||||
|
||||
using AccessEntityPtr = std::shared_ptr<const IAccessEntity>;
|
||||
|
||||
|
||||
inline const IAccessEntity::TypeInfo & IAccessEntity::TypeInfo::get(Type type_)
|
||||
{
|
||||
static constexpr auto make_info = [](const char * raw_name_, const char * plural_raw_name_, char unique_char_, int not_found_error_code_)
|
||||
{
|
||||
String init_names[2] = {raw_name_, plural_raw_name_};
|
||||
String init_aliases[2];
|
||||
for (size_t i = 0; i != std::size(init_names); ++i)
|
||||
{
|
||||
String & init_name = init_names[i];
|
||||
String & init_alias = init_aliases[i];
|
||||
boost::to_upper(init_name);
|
||||
boost::replace_all(init_name, "_", " ");
|
||||
if (auto underscore_pos = init_name.find_first_of(" "); underscore_pos != String::npos)
|
||||
init_alias = init_name.substr(underscore_pos + 1);
|
||||
}
|
||||
String init_name_for_output_with_entity_name = init_names[0];
|
||||
boost::to_lower(init_name_for_output_with_entity_name);
|
||||
return TypeInfo{raw_name_, plural_raw_name_, std::move(init_names[0]), std::move(init_aliases[0]), std::move(init_names[1]), std::move(init_aliases[1]), std::move(init_name_for_output_with_entity_name), unique_char_, not_found_error_code_};
|
||||
};
|
||||
|
||||
switch (type_)
|
||||
{
|
||||
case Type::USER:
|
||||
{
|
||||
static const auto info = make_info("USER", "USERS", 'U', ErrorCodes::UNKNOWN_USER);
|
||||
return info;
|
||||
}
|
||||
case Type::ROLE:
|
||||
{
|
||||
static const auto info = make_info("ROLE", "ROLES", 'R', ErrorCodes::UNKNOWN_ROLE);
|
||||
return info;
|
||||
}
|
||||
case Type::SETTINGS_PROFILE:
|
||||
{
|
||||
static const auto info = make_info("SETTINGS_PROFILE", "SETTINGS_PROFILES", 'S', ErrorCodes::THERE_IS_NO_PROFILE);
|
||||
return info;
|
||||
}
|
||||
case Type::ROW_POLICY:
|
||||
{
|
||||
static const auto info = make_info("ROW_POLICY", "ROW_POLICIES", 'P', ErrorCodes::UNKNOWN_ROW_POLICY);
|
||||
return info;
|
||||
}
|
||||
case Type::QUOTA:
|
||||
{
|
||||
static const auto info = make_info("QUOTA", "QUOTAS", 'Q', ErrorCodes::UNKNOWN_QUOTA);
|
||||
return info;
|
||||
}
|
||||
case Type::MAX: break;
|
||||
}
|
||||
throw Exception("Unknown type: " + std::to_string(static_cast<size_t>(type_)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
inline String IAccessEntity::TypeInfo::outputWithEntityName(const String & entity_name) const
|
||||
{
|
||||
String msg = name_for_output_with_entity_name;
|
||||
msg += " ";
|
||||
msg += backQuote(entity_name);
|
||||
return msg;
|
||||
}
|
||||
|
||||
inline String toString(IAccessEntity::Type type)
|
||||
{
|
||||
return IAccessEntity::TypeInfo::get(type).name;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -8,6 +8,8 @@
|
||||
#include <Poco/UUIDGenerator.h>
|
||||
#include <Poco/Logger.h>
|
||||
#include <base/FnTraits.h>
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -26,20 +28,16 @@ namespace ErrorCodes
|
||||
|
||||
namespace
|
||||
{
|
||||
using EntityType = IAccessStorage::EntityType;
|
||||
using EntityTypeInfo = IAccessStorage::EntityTypeInfo;
|
||||
|
||||
|
||||
String outputID(const UUID & id)
|
||||
{
|
||||
return "ID(" + toString(id) + ")";
|
||||
}
|
||||
|
||||
String outputTypeAndNameOrID(const IAccessStorage & storage, const UUID & id)
|
||||
String formatTypeWithNameOrID(const IAccessStorage & storage, const UUID & id)
|
||||
{
|
||||
auto entity = storage.tryRead(id);
|
||||
if (entity)
|
||||
return entity->outputTypeAndName();
|
||||
return entity->formatTypeWithName();
|
||||
return outputID(id);
|
||||
}
|
||||
|
||||
@ -132,19 +130,19 @@ namespace
|
||||
}
|
||||
|
||||
|
||||
std::vector<UUID> IAccessStorage::findAll(EntityType type) const
|
||||
std::vector<UUID> IAccessStorage::findAll(AccessEntityType type) const
|
||||
{
|
||||
return findAllImpl(type);
|
||||
}
|
||||
|
||||
|
||||
std::optional<UUID> IAccessStorage::find(EntityType type, const String & name) const
|
||||
std::optional<UUID> IAccessStorage::find(AccessEntityType type, const String & name) const
|
||||
{
|
||||
return findImpl(type, name);
|
||||
}
|
||||
|
||||
|
||||
std::vector<UUID> IAccessStorage::find(EntityType type, const Strings & names) const
|
||||
std::vector<UUID> IAccessStorage::find(AccessEntityType type, const Strings & names) const
|
||||
{
|
||||
std::vector<UUID> ids;
|
||||
ids.reserve(names.size());
|
||||
@ -158,7 +156,7 @@ std::vector<UUID> IAccessStorage::find(EntityType type, const Strings & names) c
|
||||
}
|
||||
|
||||
|
||||
UUID IAccessStorage::getID(EntityType type, const String & name) const
|
||||
UUID IAccessStorage::getID(AccessEntityType type, const String & name) const
|
||||
{
|
||||
auto id = findImpl(type, name);
|
||||
if (id)
|
||||
@ -167,7 +165,7 @@ UUID IAccessStorage::getID(EntityType type, const String & name) const
|
||||
}
|
||||
|
||||
|
||||
std::vector<UUID> IAccessStorage::getIDs(EntityType type, const Strings & names) const
|
||||
std::vector<UUID> IAccessStorage::getIDs(AccessEntityType type, const Strings & names) const
|
||||
{
|
||||
std::vector<UUID> ids;
|
||||
ids.reserve(names.size());
|
||||
@ -253,7 +251,7 @@ std::vector<UUID> IAccessStorage::insert(const std::vector<AccessEntityPtr> & mu
|
||||
|
||||
if (tracker.errors())
|
||||
{
|
||||
auto get_name_function = [&](size_t i) { return multiple_entities[i]->outputTypeAndName(); };
|
||||
auto get_name_function = [&](size_t i) { return multiple_entities[i]->formatTypeWithName(); };
|
||||
tracker.showErrors("Couldn't insert {failed_names}. Successfully inserted: {succeeded_names}", get_name_function);
|
||||
}
|
||||
|
||||
@ -306,7 +304,7 @@ std::vector<UUID> IAccessStorage::insertOrReplace(const std::vector<AccessEntity
|
||||
|
||||
if (tracker.errors())
|
||||
{
|
||||
auto get_name_function = [&](size_t i) { return multiple_entities[i]->outputTypeAndName(); };
|
||||
auto get_name_function = [&](size_t i) { return multiple_entities[i]->formatTypeWithName(); };
|
||||
tracker.showErrors("Couldn't insert {failed_names}. Successfully inserted: {succeeded_names}", get_name_function);
|
||||
}
|
||||
|
||||
@ -332,7 +330,7 @@ void IAccessStorage::remove(const std::vector<UUID> & ids)
|
||||
|
||||
if (tracker.errors())
|
||||
{
|
||||
auto get_name_function = [&](size_t i) { return outputTypeAndNameOrID(*this, ids[i]); };
|
||||
auto get_name_function = [&](size_t i) { return formatTypeWithNameOrID(*this, ids[i]); };
|
||||
tracker.showErrors("Couldn't remove {failed_names}. Successfully removed: {succeeded_names}", get_name_function);
|
||||
}
|
||||
}
|
||||
@ -376,7 +374,7 @@ void IAccessStorage::update(const std::vector<UUID> & ids, const UpdateFunc & up
|
||||
|
||||
if (tracker.errors())
|
||||
{
|
||||
auto get_name_function = [&](size_t i) { return outputTypeAndNameOrID(*this, ids[i]); };
|
||||
auto get_name_function = [&](size_t i) { return formatTypeWithNameOrID(*this, ids[i]); };
|
||||
tracker.showErrors("Couldn't update {failed_names}. Successfully updated: {succeeded_names}", get_name_function);
|
||||
}
|
||||
}
|
||||
@ -402,7 +400,7 @@ std::vector<UUID> IAccessStorage::tryUpdate(const std::vector<UUID> & ids, const
|
||||
}
|
||||
|
||||
|
||||
scope_guard IAccessStorage::subscribeForChanges(EntityType type, const OnChangedHandler & handler) const
|
||||
scope_guard IAccessStorage::subscribeForChanges(AccessEntityType type, const OnChangedHandler & handler) const
|
||||
{
|
||||
return subscribeForChangesImpl(type, handler);
|
||||
}
|
||||
@ -423,7 +421,7 @@ scope_guard IAccessStorage::subscribeForChanges(const std::vector<UUID> & ids, c
|
||||
}
|
||||
|
||||
|
||||
bool IAccessStorage::hasSubscription(EntityType type) const
|
||||
bool IAccessStorage::hasSubscription(AccessEntityType type) const
|
||||
{
|
||||
return hasSubscriptionImpl(type);
|
||||
}
|
||||
@ -481,7 +479,7 @@ UUID IAccessStorage::loginImpl(
|
||||
return *id;
|
||||
}
|
||||
}
|
||||
throwNotFound(EntityType::USER, credentials.getUserName());
|
||||
throwNotFound(AccessEntityType::USER, credentials.getUserName());
|
||||
}
|
||||
|
||||
|
||||
@ -542,68 +540,68 @@ void IAccessStorage::throwNotFound(const UUID & id) const
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwNotFound(EntityType type, const String & name) const
|
||||
void IAccessStorage::throwNotFound(AccessEntityType type, const String & name) const
|
||||
{
|
||||
int error_code = EntityTypeInfo::get(type).not_found_error_code;
|
||||
throw Exception("There is no " + outputEntityTypeAndName(type, name) + " in " + getStorageName(), error_code);
|
||||
int error_code = AccessEntityTypeInfo::get(type).not_found_error_code;
|
||||
throw Exception("There is no " + formatEntityTypeWithName(type, name) + " in " + getStorageName(), error_code);
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwBadCast(const UUID & id, EntityType type, const String & name, EntityType required_type)
|
||||
void IAccessStorage::throwBadCast(const UUID & id, AccessEntityType type, const String & name, AccessEntityType required_type)
|
||||
{
|
||||
throw Exception(
|
||||
outputID(id) + ": " + outputEntityTypeAndName(type, name) + " expected to be of type " + toString(required_type),
|
||||
outputID(id) + ": " + formatEntityTypeWithName(type, name) + " expected to be of type " + toString(required_type),
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwIDCollisionCannotInsert(const UUID & id, EntityType type, const String & name, EntityType existing_type, const String & existing_name) const
|
||||
void IAccessStorage::throwIDCollisionCannotInsert(const UUID & id, AccessEntityType type, const String & name, AccessEntityType existing_type, const String & existing_name) const
|
||||
{
|
||||
throw Exception(
|
||||
outputEntityTypeAndName(type, name) + ": cannot insert because the " + outputID(id) + " is already used by "
|
||||
+ outputEntityTypeAndName(existing_type, existing_name) + " in " + getStorageName(),
|
||||
formatEntityTypeWithName(type, name) + ": cannot insert because the " + outputID(id) + " is already used by "
|
||||
+ formatEntityTypeWithName(existing_type, existing_name) + " in " + getStorageName(),
|
||||
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwNameCollisionCannotInsert(EntityType type, const String & name) const
|
||||
void IAccessStorage::throwNameCollisionCannotInsert(AccessEntityType type, const String & name) const
|
||||
{
|
||||
throw Exception(
|
||||
outputEntityTypeAndName(type, name) + ": cannot insert because " + outputEntityTypeAndName(type, name) + " already exists in "
|
||||
formatEntityTypeWithName(type, name) + ": cannot insert because " + formatEntityTypeWithName(type, name) + " already exists in "
|
||||
+ getStorageName(),
|
||||
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwNameCollisionCannotRename(EntityType type, const String & old_name, const String & new_name) const
|
||||
void IAccessStorage::throwNameCollisionCannotRename(AccessEntityType type, const String & old_name, const String & new_name) const
|
||||
{
|
||||
throw Exception(
|
||||
outputEntityTypeAndName(type, old_name) + ": cannot rename to " + backQuote(new_name) + " because "
|
||||
+ outputEntityTypeAndName(type, new_name) + " already exists in " + getStorageName(),
|
||||
formatEntityTypeWithName(type, old_name) + ": cannot rename to " + backQuote(new_name) + " because "
|
||||
+ formatEntityTypeWithName(type, new_name) + " already exists in " + getStorageName(),
|
||||
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwReadonlyCannotInsert(EntityType type, const String & name) const
|
||||
void IAccessStorage::throwReadonlyCannotInsert(AccessEntityType type, const String & name) const
|
||||
{
|
||||
throw Exception(
|
||||
"Cannot insert " + outputEntityTypeAndName(type, name) + " to " + getStorageName() + " because this storage is readonly",
|
||||
"Cannot insert " + formatEntityTypeWithName(type, name) + " to " + getStorageName() + " because this storage is readonly",
|
||||
ErrorCodes::ACCESS_STORAGE_READONLY);
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwReadonlyCannotUpdate(EntityType type, const String & name) const
|
||||
void IAccessStorage::throwReadonlyCannotUpdate(AccessEntityType type, const String & name) const
|
||||
{
|
||||
throw Exception(
|
||||
"Cannot update " + outputEntityTypeAndName(type, name) + " in " + getStorageName() + " because this storage is readonly",
|
||||
"Cannot update " + formatEntityTypeWithName(type, name) + " in " + getStorageName() + " because this storage is readonly",
|
||||
ErrorCodes::ACCESS_STORAGE_READONLY);
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwReadonlyCannotRemove(EntityType type, const String & name) const
|
||||
void IAccessStorage::throwReadonlyCannotRemove(AccessEntityType type, const String & name) const
|
||||
{
|
||||
throw Exception(
|
||||
"Cannot remove " + outputEntityTypeAndName(type, name) + " from " + getStorageName() + " because this storage is readonly",
|
||||
"Cannot remove " + formatEntityTypeWithName(type, name) + " from " + getStorageName() + " because this storage is readonly",
|
||||
ErrorCodes::ACCESS_STORAGE_READONLY);
|
||||
}
|
||||
|
||||
|
@ -34,33 +34,30 @@ public:
|
||||
/// Returns a JSON with the parameters of the storage. It's up to the storage type to fill the JSON.
|
||||
virtual String getStorageParamsJSON() const { return "{}"; }
|
||||
|
||||
using EntityType = IAccessEntity::Type;
|
||||
using EntityTypeInfo = IAccessEntity::TypeInfo;
|
||||
|
||||
/// Returns the identifiers of all the entities of a specified type contained in the storage.
|
||||
std::vector<UUID> findAll(EntityType type) const;
|
||||
std::vector<UUID> findAll(AccessEntityType type) const;
|
||||
|
||||
template <typename EntityClassT>
|
||||
std::vector<UUID> findAll() const { return findAll(EntityClassT::TYPE); }
|
||||
|
||||
/// Searches for an entity with specified type and name. Returns std::nullopt if not found.
|
||||
std::optional<UUID> find(EntityType type, const String & name) const;
|
||||
std::optional<UUID> find(AccessEntityType type, const String & name) const;
|
||||
|
||||
template <typename EntityClassT>
|
||||
std::optional<UUID> find(const String & name) const { return find(EntityClassT::TYPE, name); }
|
||||
|
||||
std::vector<UUID> find(EntityType type, const Strings & names) const;
|
||||
std::vector<UUID> find(AccessEntityType type, const Strings & names) const;
|
||||
|
||||
template <typename EntityClassT>
|
||||
std::vector<UUID> find(const Strings & names) const { return find(EntityClassT::TYPE, names); }
|
||||
|
||||
/// Searches for an entity with specified name and type. Throws an exception if not found.
|
||||
UUID getID(EntityType type, const String & name) const;
|
||||
UUID getID(AccessEntityType type, const String & name) const;
|
||||
|
||||
template <typename EntityClassT>
|
||||
UUID getID(const String & name) const { return getID(EntityClassT::TYPE, name); }
|
||||
|
||||
std::vector<UUID> getIDs(EntityType type, const Strings & names) const;
|
||||
std::vector<UUID> getIDs(AccessEntityType type, const Strings & names) const;
|
||||
|
||||
template <typename EntityClassT>
|
||||
std::vector<UUID> getIDs(const Strings & names) const { return getIDs(EntityClassT::TYPE, names); }
|
||||
@ -132,7 +129,7 @@ public:
|
||||
|
||||
/// Subscribes for all changes.
|
||||
/// Can return nullptr if cannot subscribe (identifier not found) or if it doesn't make sense (the storage is read-only).
|
||||
scope_guard subscribeForChanges(EntityType type, const OnChangedHandler & handler) const;
|
||||
scope_guard subscribeForChanges(AccessEntityType type, const OnChangedHandler & handler) const;
|
||||
|
||||
template <typename EntityClassT>
|
||||
scope_guard subscribeForChanges(OnChangedHandler handler) const { return subscribeForChanges(EntityClassT::TYPE, handler); }
|
||||
@ -142,7 +139,7 @@ public:
|
||||
scope_guard subscribeForChanges(const UUID & id, const OnChangedHandler & handler) const;
|
||||
scope_guard subscribeForChanges(const std::vector<UUID> & ids, const OnChangedHandler & handler) const;
|
||||
|
||||
bool hasSubscription(EntityType type) const;
|
||||
bool hasSubscription(AccessEntityType type) const;
|
||||
bool hasSubscription(const UUID & id) const;
|
||||
|
||||
/// Finds a user, check the provided credentials and returns the ID of the user if they are valid.
|
||||
@ -154,8 +151,8 @@ public:
|
||||
UUID getIDOfLoggedUser(const String & user_name) const;
|
||||
|
||||
protected:
|
||||
virtual std::optional<UUID> findImpl(EntityType type, const String & name) const = 0;
|
||||
virtual std::vector<UUID> findAllImpl(EntityType type) const = 0;
|
||||
virtual std::optional<UUID> findImpl(AccessEntityType type, const String & name) const = 0;
|
||||
virtual std::vector<UUID> findAllImpl(AccessEntityType type) const = 0;
|
||||
virtual bool existsImpl(const UUID & id) const = 0;
|
||||
virtual AccessEntityPtr readImpl(const UUID & id) const = 0;
|
||||
virtual String readNameImpl(const UUID & id) const = 0;
|
||||
@ -164,9 +161,9 @@ protected:
|
||||
virtual void removeImpl(const UUID & id) = 0;
|
||||
virtual void updateImpl(const UUID & id, const UpdateFunc & update_func) = 0;
|
||||
virtual scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const = 0;
|
||||
virtual scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const = 0;
|
||||
virtual scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const = 0;
|
||||
virtual bool hasSubscriptionImpl(const UUID & id) const = 0;
|
||||
virtual bool hasSubscriptionImpl(EntityType type) const = 0;
|
||||
virtual bool hasSubscriptionImpl(AccessEntityType type) const = 0;
|
||||
virtual UUID loginImpl(const Credentials & credentials, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators) const;
|
||||
virtual bool areCredentialsValidImpl(const User & user, const Credentials & credentials, const ExternalAuthenticators & external_authenticators) const;
|
||||
virtual bool isAddressAllowedImpl(const User & user, const Poco::Net::IPAddress & address) const;
|
||||
@ -174,17 +171,17 @@ protected:
|
||||
|
||||
static UUID generateRandomID();
|
||||
Poco::Logger * getLogger() const;
|
||||
static String outputEntityTypeAndName(EntityType type, const String & name) { return EntityTypeInfo::get(type).outputWithEntityName(name); }
|
||||
static String formatEntityTypeWithName(AccessEntityType type, const String & name) { return AccessEntityTypeInfo::get(type).formatEntityNameWithType(name); }
|
||||
[[noreturn]] void throwNotFound(const UUID & id) const;
|
||||
[[noreturn]] void throwNotFound(EntityType type, const String & name) const;
|
||||
[[noreturn]] static void throwBadCast(const UUID & id, EntityType type, const String & name, EntityType required_type);
|
||||
[[noreturn]] void throwNotFound(AccessEntityType type, const String & name) const;
|
||||
[[noreturn]] static void throwBadCast(const UUID & id, AccessEntityType type, const String & name, AccessEntityType required_type);
|
||||
[[noreturn]] void throwIDCollisionCannotInsert(
|
||||
const UUID & id, EntityType type, const String & name, EntityType existing_type, const String & existing_name) const;
|
||||
[[noreturn]] void throwNameCollisionCannotInsert(EntityType type, const String & name) const;
|
||||
[[noreturn]] void throwNameCollisionCannotRename(EntityType type, const String & old_name, const String & new_name) const;
|
||||
[[noreturn]] void throwReadonlyCannotInsert(EntityType type, const String & name) const;
|
||||
[[noreturn]] void throwReadonlyCannotUpdate(EntityType type, const String & name) const;
|
||||
[[noreturn]] void throwReadonlyCannotRemove(EntityType type, const String & name) const;
|
||||
const UUID & id, AccessEntityType type, const String & name, AccessEntityType existing_type, const String & existing_name) const;
|
||||
[[noreturn]] void throwNameCollisionCannotInsert(AccessEntityType type, const String & name) const;
|
||||
[[noreturn]] void throwNameCollisionCannotRename(AccessEntityType type, const String & old_name, const String & new_name) const;
|
||||
[[noreturn]] void throwReadonlyCannotInsert(AccessEntityType type, const String & name) const;
|
||||
[[noreturn]] void throwReadonlyCannotUpdate(AccessEntityType type, const String & name) const;
|
||||
[[noreturn]] void throwReadonlyCannotRemove(AccessEntityType type, const String & name) const;
|
||||
[[noreturn]] static void throwAddressNotAllowed(const Poco::Net::IPAddress & address);
|
||||
[[noreturn]] static void throwInvalidCredentials();
|
||||
[[noreturn]] static void throwCannotAuthenticate(const String & user_name);
|
||||
|
@ -412,14 +412,14 @@ String LDAPAccessStorage::getStorageParamsJSON() const
|
||||
}
|
||||
|
||||
|
||||
std::optional<UUID> LDAPAccessStorage::findImpl(EntityType type, const String & name) const
|
||||
std::optional<UUID> LDAPAccessStorage::findImpl(AccessEntityType type, const String & name) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
return memory_storage.find(type, name);
|
||||
}
|
||||
|
||||
|
||||
std::vector<UUID> LDAPAccessStorage::findAllImpl(EntityType type) const
|
||||
std::vector<UUID> LDAPAccessStorage::findAllImpl(AccessEntityType type) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
return memory_storage.findAll(type);
|
||||
@ -482,7 +482,7 @@ scope_guard LDAPAccessStorage::subscribeForChangesImpl(const UUID & id, const On
|
||||
}
|
||||
|
||||
|
||||
scope_guard LDAPAccessStorage::subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const
|
||||
scope_guard LDAPAccessStorage::subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
return memory_storage.subscribeForChanges(type, handler);
|
||||
@ -496,7 +496,7 @@ bool LDAPAccessStorage::hasSubscriptionImpl(const UUID & id) const
|
||||
}
|
||||
|
||||
|
||||
bool LDAPAccessStorage::hasSubscriptionImpl(EntityType type) const
|
||||
bool LDAPAccessStorage::hasSubscriptionImpl(AccessEntityType type) const
|
||||
{
|
||||
std::scoped_lock lock(mutex);
|
||||
return memory_storage.hasSubscription(type);
|
||||
|
@ -42,8 +42,8 @@ public: // IAccessStorage implementations.
|
||||
virtual String getStorageParamsJSON() const override;
|
||||
|
||||
private: // IAccessStorage implementations.
|
||||
virtual std::optional<UUID> findImpl(EntityType type, const String & name) const override;
|
||||
virtual std::vector<UUID> findAllImpl(EntityType type) const override;
|
||||
virtual std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
|
||||
virtual std::vector<UUID> findAllImpl(AccessEntityType type) const override;
|
||||
virtual bool existsImpl(const UUID & id) const override;
|
||||
virtual AccessEntityPtr readImpl(const UUID & id) const override;
|
||||
virtual String readNameImpl(const UUID & id) const override;
|
||||
@ -52,9 +52,9 @@ private: // IAccessStorage implementations.
|
||||
virtual void removeImpl(const UUID & id) override;
|
||||
virtual void updateImpl(const UUID & id, const UpdateFunc & update_func) override;
|
||||
virtual scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override;
|
||||
virtual scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const override;
|
||||
virtual scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const override;
|
||||
virtual bool hasSubscriptionImpl(const UUID & id) const override;
|
||||
virtual bool hasSubscriptionImpl(EntityType type) const override;
|
||||
virtual bool hasSubscriptionImpl(AccessEntityType type) const override;
|
||||
virtual UUID loginImpl(const Credentials & credentials, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators) const override;
|
||||
virtual UUID getIDOfLoggedUserImpl(const String & user_name) const override;
|
||||
|
||||
|
@ -13,7 +13,7 @@ MemoryAccessStorage::MemoryAccessStorage(const String & storage_name_)
|
||||
}
|
||||
|
||||
|
||||
std::optional<UUID> MemoryAccessStorage::findImpl(EntityType type, const String & name) const
|
||||
std::optional<UUID> MemoryAccessStorage::findImpl(AccessEntityType type, const String & name) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
const auto & entries_by_name = entries_by_name_and_type[static_cast<size_t>(type)];
|
||||
@ -26,7 +26,7 @@ std::optional<UUID> MemoryAccessStorage::findImpl(EntityType type, const String
|
||||
}
|
||||
|
||||
|
||||
std::vector<UUID> MemoryAccessStorage::findAllImpl(EntityType type) const
|
||||
std::vector<UUID> MemoryAccessStorage::findAllImpl(AccessEntityType type) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
std::vector<UUID> result;
|
||||
@ -77,7 +77,7 @@ UUID MemoryAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool re
|
||||
void MemoryAccessStorage::insertNoLock(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, Notifications & notifications)
|
||||
{
|
||||
const String & name = new_entity->getName();
|
||||
EntityType type = new_entity->getType();
|
||||
AccessEntityType type = new_entity->getType();
|
||||
|
||||
/// Check that we can insert.
|
||||
auto it = entries_by_id.find(id);
|
||||
@ -125,7 +125,7 @@ void MemoryAccessStorage::removeNoLock(const UUID & id, Notifications & notifica
|
||||
|
||||
Entry & entry = it->second;
|
||||
const String & name = entry.entity->getName();
|
||||
EntityType type = entry.entity->getType();
|
||||
AccessEntityType type = entry.entity->getType();
|
||||
|
||||
prepareNotifications(entry, true, notifications);
|
||||
|
||||
@ -266,7 +266,7 @@ void MemoryAccessStorage::prepareNotifications(const Entry & entry, bool remove,
|
||||
}
|
||||
|
||||
|
||||
scope_guard MemoryAccessStorage::subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const
|
||||
scope_guard MemoryAccessStorage::subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto & handlers = handlers_by_type[static_cast<size_t>(type)];
|
||||
@ -317,7 +317,7 @@ bool MemoryAccessStorage::hasSubscriptionImpl(const UUID & id) const
|
||||
}
|
||||
|
||||
|
||||
bool MemoryAccessStorage::hasSubscriptionImpl(EntityType type) const
|
||||
bool MemoryAccessStorage::hasSubscriptionImpl(AccessEntityType type) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
const auto & handlers = handlers_by_type[static_cast<size_t>(type)];
|
||||
|
@ -24,8 +24,8 @@ public:
|
||||
void setAll(const std::vector<std::pair<UUID, AccessEntityPtr>> & all_entities);
|
||||
|
||||
private:
|
||||
std::optional<UUID> findImpl(EntityType type, const String & name) const override;
|
||||
std::vector<UUID> findAllImpl(EntityType type) const override;
|
||||
std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
|
||||
std::vector<UUID> findAllImpl(AccessEntityType type) const override;
|
||||
bool existsImpl(const UUID & id) const override;
|
||||
AccessEntityPtr readImpl(const UUID & id) const override;
|
||||
String readNameImpl(const UUID & id) const override;
|
||||
@ -34,9 +34,9 @@ private:
|
||||
void removeImpl(const UUID & id) override;
|
||||
void updateImpl(const UUID & id, const UpdateFunc & update_func) override;
|
||||
scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override;
|
||||
scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const override;
|
||||
scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const override;
|
||||
bool hasSubscriptionImpl(const UUID & id) const override;
|
||||
bool hasSubscriptionImpl(EntityType type) const override;
|
||||
bool hasSubscriptionImpl(AccessEntityType type) const override;
|
||||
|
||||
struct Entry
|
||||
{
|
||||
@ -53,7 +53,7 @@ private:
|
||||
|
||||
mutable std::recursive_mutex mutex;
|
||||
std::unordered_map<UUID, Entry> entries_by_id; /// We want to search entries both by ID and by the pair of name and type.
|
||||
std::unordered_map<String, Entry *> entries_by_name_and_type[static_cast<size_t>(EntityType::MAX)];
|
||||
mutable std::list<OnChangedHandler> handlers_by_type[static_cast<size_t>(EntityType::MAX)];
|
||||
std::unordered_map<String, Entry *> entries_by_name_and_type[static_cast<size_t>(AccessEntityType::MAX)];
|
||||
mutable std::list<OnChangedHandler> handlers_by_type[static_cast<size_t>(AccessEntityType::MAX)];
|
||||
};
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <Access/MultipleAccessStorage.h>
|
||||
#include <Access/Credentials.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <base/range.h>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <boost/range/adaptor/reversed.hpp>
|
||||
@ -98,7 +99,7 @@ std::shared_ptr<const Storages> MultipleAccessStorage::getStoragesInternal() con
|
||||
}
|
||||
|
||||
|
||||
std::optional<UUID> MultipleAccessStorage::findImpl(EntityType type, const String & name) const
|
||||
std::optional<UUID> MultipleAccessStorage::findImpl(AccessEntityType type, const String & name) const
|
||||
{
|
||||
auto storages = getStoragesInternal();
|
||||
for (const auto & storage : *storages)
|
||||
@ -115,7 +116,7 @@ std::optional<UUID> MultipleAccessStorage::findImpl(EntityType type, const Strin
|
||||
}
|
||||
|
||||
|
||||
std::vector<UUID> MultipleAccessStorage::findAllImpl(EntityType type) const
|
||||
std::vector<UUID> MultipleAccessStorage::findAllImpl(AccessEntityType type) const
|
||||
{
|
||||
std::vector<UUID> all_ids;
|
||||
auto storages = getStoragesInternal();
|
||||
@ -219,7 +220,7 @@ UUID MultipleAccessStorage::insertImpl(const AccessEntityPtr & entity, bool repl
|
||||
}
|
||||
|
||||
if (!storage_for_insertion)
|
||||
throw Exception("Not found a storage to insert " + entity->outputTypeAndName(), ErrorCodes::ACCESS_STORAGE_FOR_INSERTION_NOT_FOUND);
|
||||
throw Exception("Not found a storage to insert " + entity->formatTypeWithName(), ErrorCodes::ACCESS_STORAGE_FOR_INSERTION_NOT_FOUND);
|
||||
|
||||
auto id = replace_if_exists ? storage_for_insertion->insertOrReplace(entity) : storage_for_insertion->insert(entity);
|
||||
std::lock_guard lock{mutex};
|
||||
@ -253,8 +254,8 @@ void MultipleAccessStorage::updateImpl(const UUID & id, const UpdateFunc & updat
|
||||
if (storage->find(new_entity->getType(), new_entity->getName()))
|
||||
{
|
||||
throw Exception(
|
||||
old_entity->outputTypeAndName() + ": cannot rename to " + backQuote(new_entity->getName()) + " because "
|
||||
+ new_entity->outputTypeAndName() + " already exists in " + storage->getStorageName(),
|
||||
old_entity->formatTypeWithName() + ": cannot rename to " + backQuote(new_entity->getName()) + " because "
|
||||
+ new_entity->formatTypeWithName() + " already exists in " + storage->getStorageName(),
|
||||
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
|
||||
}
|
||||
}
|
||||
@ -286,7 +287,7 @@ bool MultipleAccessStorage::hasSubscriptionImpl(const UUID & id) const
|
||||
}
|
||||
|
||||
|
||||
scope_guard MultipleAccessStorage::subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const
|
||||
scope_guard MultipleAccessStorage::subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const
|
||||
{
|
||||
std::unique_lock lock{mutex};
|
||||
auto & handlers = handlers_by_type[static_cast<size_t>(type)];
|
||||
@ -306,7 +307,7 @@ scope_guard MultipleAccessStorage::subscribeForChangesImpl(EntityType type, cons
|
||||
}
|
||||
|
||||
|
||||
bool MultipleAccessStorage::hasSubscriptionImpl(EntityType type) const
|
||||
bool MultipleAccessStorage::hasSubscriptionImpl(AccessEntityType type) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
const auto & handlers = handlers_by_type[static_cast<size_t>(type)];
|
||||
@ -321,10 +322,10 @@ void MultipleAccessStorage::updateSubscriptionsToNestedStorages(std::unique_lock
|
||||
{
|
||||
/// lock is already locked.
|
||||
|
||||
std::vector<std::pair<StoragePtr, scope_guard>> added_subscriptions[static_cast<size_t>(EntityType::MAX)];
|
||||
std::vector<std::pair<StoragePtr, scope_guard>> added_subscriptions[static_cast<size_t>(AccessEntityType::MAX)];
|
||||
std::vector<scope_guard> removed_subscriptions;
|
||||
|
||||
for (auto type : collections::range(EntityType::MAX))
|
||||
for (auto type : collections::range(AccessEntityType::MAX))
|
||||
{
|
||||
auto & handlers = handlers_by_type[static_cast<size_t>(type)];
|
||||
auto & subscriptions = subscriptions_to_nested_storages[static_cast<size_t>(type)];
|
||||
@ -364,7 +365,7 @@ void MultipleAccessStorage::updateSubscriptionsToNestedStorages(std::unique_lock
|
||||
lock.unlock();
|
||||
removed_subscriptions.clear();
|
||||
|
||||
for (auto type : collections::range(EntityType::MAX))
|
||||
for (auto type : collections::range(AccessEntityType::MAX))
|
||||
{
|
||||
if (!added_subscriptions[static_cast<size_t>(type)].empty())
|
||||
{
|
||||
@ -384,7 +385,7 @@ void MultipleAccessStorage::updateSubscriptionsToNestedStorages(std::unique_lock
|
||||
/// Lock the mutex again to store added subscriptions to the nested storages.
|
||||
lock.lock();
|
||||
|
||||
for (auto type : collections::range(EntityType::MAX))
|
||||
for (auto type : collections::range(AccessEntityType::MAX))
|
||||
{
|
||||
if (!added_subscriptions[static_cast<size_t>(type)].empty())
|
||||
{
|
||||
@ -418,7 +419,7 @@ UUID MultipleAccessStorage::loginImpl(const Credentials & credentials, const Poc
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (!storage->find(EntityType::USER, credentials.getUserName()))
|
||||
if (!storage->find(AccessEntityType::USER, credentials.getUserName()))
|
||||
{
|
||||
/// The authentication failed because there no users with such name in the `storage`
|
||||
/// thus we can try to search in other nested storages.
|
||||
@ -427,7 +428,7 @@ UUID MultipleAccessStorage::loginImpl(const Credentials & credentials, const Poc
|
||||
throw;
|
||||
}
|
||||
}
|
||||
throwNotFound(EntityType::USER, credentials.getUserName());
|
||||
throwNotFound(AccessEntityType::USER, credentials.getUserName());
|
||||
}
|
||||
|
||||
|
||||
@ -445,7 +446,7 @@ UUID MultipleAccessStorage::getIDOfLoggedUserImpl(const String & user_name) cons
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (!storage->find(EntityType::USER, user_name))
|
||||
if (!storage->find(AccessEntityType::USER, user_name))
|
||||
{
|
||||
/// The authentication failed because there no users with such name in the `storage`
|
||||
/// thus we can try to search in other nested storages.
|
||||
@ -454,7 +455,7 @@ UUID MultipleAccessStorage::getIDOfLoggedUserImpl(const String & user_name) cons
|
||||
throw;
|
||||
}
|
||||
}
|
||||
throwNotFound(EntityType::USER, user_name);
|
||||
throwNotFound(AccessEntityType::USER, user_name);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -35,8 +35,8 @@ public:
|
||||
StoragePtr getStorage(const UUID & id);
|
||||
|
||||
protected:
|
||||
std::optional<UUID> findImpl(EntityType type, const String & name) const override;
|
||||
std::vector<UUID> findAllImpl(EntityType type) const override;
|
||||
std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
|
||||
std::vector<UUID> findAllImpl(AccessEntityType type) const override;
|
||||
bool existsImpl(const UUID & id) const override;
|
||||
AccessEntityPtr readImpl(const UUID & id) const override;
|
||||
String readNameImpl(const UUID &id) const override;
|
||||
@ -45,9 +45,9 @@ protected:
|
||||
void removeImpl(const UUID & id) override;
|
||||
void updateImpl(const UUID & id, const UpdateFunc & update_func) override;
|
||||
scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override;
|
||||
scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const override;
|
||||
scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const override;
|
||||
bool hasSubscriptionImpl(const UUID & id) const override;
|
||||
bool hasSubscriptionImpl(EntityType type) const override;
|
||||
bool hasSubscriptionImpl(AccessEntityType type) const override;
|
||||
UUID loginImpl(const Credentials & credentials, const Poco::Net::IPAddress & address, const ExternalAuthenticators & external_authenticators) const override;
|
||||
UUID getIDOfLoggedUserImpl(const String & user_name) const override;
|
||||
|
||||
@ -58,8 +58,8 @@ private:
|
||||
|
||||
std::shared_ptr<const Storages> nested_storages;
|
||||
mutable LRUCache<UUID, Storage> ids_cache;
|
||||
mutable std::list<OnChangedHandler> handlers_by_type[static_cast<size_t>(EntityType::MAX)];
|
||||
mutable std::unordered_map<StoragePtr, scope_guard> subscriptions_to_nested_storages[static_cast<size_t>(EntityType::MAX)];
|
||||
mutable std::list<OnChangedHandler> handlers_by_type[static_cast<size_t>(AccessEntityType::MAX)];
|
||||
mutable std::unordered_map<StoragePtr, scope_guard> subscriptions_to_nested_storages[static_cast<size_t>(AccessEntityType::MAX)];
|
||||
mutable std::mutex mutex;
|
||||
};
|
||||
|
||||
|
@ -1,20 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include <Access/IAccessEntity.h>
|
||||
#include <Access/Common/QuotaDefs.h>
|
||||
#include <Access/RolesOrUsersSet.h>
|
||||
#include <base/range.h>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <chrono>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
/** Quota for resources consumption for specific interval.
|
||||
* Used to limit resource usage by user.
|
||||
@ -26,40 +19,10 @@ namespace ErrorCodes
|
||||
*/
|
||||
struct Quota : public IAccessEntity
|
||||
{
|
||||
using ResourceAmount = UInt64;
|
||||
|
||||
enum ResourceType
|
||||
{
|
||||
QUERIES, /// Number of queries.
|
||||
QUERY_SELECTS, /// Number of select queries.
|
||||
QUERY_INSERTS, /// Number of inserts queries.
|
||||
ERRORS, /// Number of queries with exceptions.
|
||||
RESULT_ROWS, /// Number of rows returned as result.
|
||||
RESULT_BYTES, /// Number of bytes returned as result.
|
||||
READ_ROWS, /// Number of rows read from tables.
|
||||
READ_BYTES, /// Number of bytes read from tables.
|
||||
EXECUTION_TIME, /// Total amount of query execution time in nanoseconds.
|
||||
|
||||
MAX_RESOURCE_TYPE
|
||||
};
|
||||
|
||||
struct ResourceTypeInfo
|
||||
{
|
||||
const char * const raw_name = "";
|
||||
const String name; /// Lowercased with underscores, e.g. "result_rows".
|
||||
const String keyword; /// Uppercased with spaces, e.g. "RESULT ROWS".
|
||||
const bool output_as_float = false;
|
||||
const UInt64 output_denominator = 1;
|
||||
String amountToString(ResourceAmount amount) const;
|
||||
ResourceAmount amountFromString(const String & str) const;
|
||||
String outputWithAmount(ResourceAmount amount) const;
|
||||
static const ResourceTypeInfo & get(ResourceType type);
|
||||
};
|
||||
|
||||
/// Amount of resources available to consume for each duration.
|
||||
struct Limits
|
||||
{
|
||||
std::optional<ResourceAmount> max[MAX_RESOURCE_TYPE];
|
||||
std::optional<QuotaValue> max[static_cast<size_t>(QuotaType::MAX)];
|
||||
std::chrono::seconds duration = std::chrono::seconds::zero();
|
||||
|
||||
/// Intervals can be randomized (to avoid DoS if intervals for many users end at one time).
|
||||
@ -73,206 +36,16 @@ struct Quota : public IAccessEntity
|
||||
|
||||
/// Key to share quota consumption.
|
||||
/// Users with the same key share the same amount of resource.
|
||||
enum class KeyType
|
||||
{
|
||||
NONE, /// All users share the same quota.
|
||||
USER_NAME, /// Connections with the same user name share the same quota.
|
||||
IP_ADDRESS, /// Connections from the same IP share the same quota.
|
||||
FORWARDED_IP_ADDRESS, /// Use X-Forwarded-For HTTP header instead of IP address.
|
||||
CLIENT_KEY, /// Client should explicitly supply a key to use.
|
||||
CLIENT_KEY_OR_USER_NAME, /// Same as CLIENT_KEY, but use USER_NAME if the client doesn't supply a key.
|
||||
CLIENT_KEY_OR_IP_ADDRESS, /// Same as CLIENT_KEY, but use IP_ADDRESS if the client doesn't supply a key.
|
||||
|
||||
MAX
|
||||
};
|
||||
|
||||
struct KeyTypeInfo
|
||||
{
|
||||
const char * const raw_name;
|
||||
const String name; /// Lowercased with underscores, e.g. "client_key".
|
||||
const std::vector<KeyType> base_types; /// For combined types keeps base types, e.g. for CLIENT_KEY_OR_USER_NAME it keeps [KeyType::CLIENT_KEY, KeyType::USER_NAME].
|
||||
static const KeyTypeInfo & get(KeyType type);
|
||||
};
|
||||
|
||||
KeyType key_type = KeyType::NONE;
|
||||
QuotaKeyType key_type = QuotaKeyType::NONE;
|
||||
|
||||
/// Which roles or users should use this quota.
|
||||
RolesOrUsersSet to_roles;
|
||||
|
||||
bool equal(const IAccessEntity & other) const override;
|
||||
std::shared_ptr<IAccessEntity> clone() const override { return cloneImpl<Quota>(); }
|
||||
static constexpr const Type TYPE = Type::QUOTA;
|
||||
Type getType() const override { return TYPE; }
|
||||
static constexpr const auto TYPE = AccessEntityType::QUOTA;
|
||||
AccessEntityType getType() const override { return TYPE; }
|
||||
};
|
||||
|
||||
|
||||
inline String Quota::ResourceTypeInfo::amountToString(ResourceAmount amount) const
|
||||
{
|
||||
if (!(amount % output_denominator))
|
||||
return std::to_string(amount / output_denominator);
|
||||
else
|
||||
return boost::lexical_cast<std::string>(static_cast<double>(amount) / output_denominator);
|
||||
}
|
||||
|
||||
inline Quota::ResourceAmount Quota::ResourceTypeInfo::amountFromString(const String & str) const
|
||||
{
|
||||
if (output_denominator == 1)
|
||||
return static_cast<ResourceAmount>(std::strtoul(str.c_str(), nullptr, 10));
|
||||
else
|
||||
return static_cast<ResourceAmount>(std::strtod(str.c_str(), nullptr) * output_denominator);
|
||||
}
|
||||
|
||||
inline String Quota::ResourceTypeInfo::outputWithAmount(ResourceAmount amount) const
|
||||
{
|
||||
String res = name;
|
||||
res += " = ";
|
||||
res += amountToString(amount);
|
||||
return res;
|
||||
}
|
||||
|
||||
inline String toString(Quota::ResourceType type)
|
||||
{
|
||||
return Quota::ResourceTypeInfo::get(type).raw_name;
|
||||
}
|
||||
|
||||
inline const Quota::ResourceTypeInfo & Quota::ResourceTypeInfo::get(ResourceType type)
|
||||
{
|
||||
static constexpr auto make_info = [](const char * raw_name_, UInt64 output_denominator_)
|
||||
{
|
||||
String init_name = raw_name_;
|
||||
boost::to_lower(init_name);
|
||||
String init_keyword = raw_name_;
|
||||
boost::replace_all(init_keyword, "_", " ");
|
||||
bool init_output_as_float = (output_denominator_ != 1);
|
||||
return ResourceTypeInfo{raw_name_, std::move(init_name), std::move(init_keyword), init_output_as_float, output_denominator_};
|
||||
};
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case Quota::QUERIES:
|
||||
{
|
||||
static const auto info = make_info("QUERIES", 1);
|
||||
return info;
|
||||
}
|
||||
case Quota::QUERY_SELECTS:
|
||||
{
|
||||
static const auto info = make_info("QUERY_SELECTS", 1);
|
||||
return info;
|
||||
}
|
||||
case Quota::QUERY_INSERTS:
|
||||
{
|
||||
static const auto info = make_info("QUERY_INSERTS", 1);
|
||||
return info;
|
||||
}
|
||||
case Quota::ERRORS:
|
||||
{
|
||||
static const auto info = make_info("ERRORS", 1);
|
||||
return info;
|
||||
}
|
||||
case Quota::RESULT_ROWS:
|
||||
{
|
||||
static const auto info = make_info("RESULT_ROWS", 1);
|
||||
return info;
|
||||
}
|
||||
case Quota::RESULT_BYTES:
|
||||
{
|
||||
static const auto info = make_info("RESULT_BYTES", 1);
|
||||
return info;
|
||||
}
|
||||
case Quota::READ_ROWS:
|
||||
{
|
||||
static const auto info = make_info("READ_ROWS", 1);
|
||||
return info;
|
||||
}
|
||||
case Quota::READ_BYTES:
|
||||
{
|
||||
static const auto info = make_info("READ_BYTES", 1);
|
||||
return info;
|
||||
}
|
||||
case Quota::EXECUTION_TIME:
|
||||
{
|
||||
static const auto info = make_info("EXECUTION_TIME", 1000000000 /* execution_time is stored in nanoseconds */);
|
||||
return info;
|
||||
}
|
||||
case Quota::MAX_RESOURCE_TYPE: break;
|
||||
}
|
||||
throw Exception("Unexpected resource type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
|
||||
inline String toString(Quota::KeyType type)
|
||||
{
|
||||
return Quota::KeyTypeInfo::get(type).raw_name;
|
||||
}
|
||||
|
||||
inline const Quota::KeyTypeInfo & Quota::KeyTypeInfo::get(KeyType type)
|
||||
{
|
||||
static constexpr auto make_info = [](const char * raw_name_)
|
||||
{
|
||||
String init_name = raw_name_;
|
||||
boost::to_lower(init_name);
|
||||
std::vector<KeyType> init_base_types;
|
||||
String replaced = boost::algorithm::replace_all_copy(init_name, "_or_", "|");
|
||||
Strings tokens;
|
||||
boost::algorithm::split(tokens, replaced, boost::is_any_of("|"));
|
||||
if (tokens.size() > 1)
|
||||
{
|
||||
for (const auto & token : tokens)
|
||||
{
|
||||
for (auto kt : collections::range(KeyType::MAX))
|
||||
{
|
||||
if (KeyTypeInfo::get(kt).name == token)
|
||||
{
|
||||
init_base_types.push_back(kt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return KeyTypeInfo{raw_name_, std::move(init_name), std::move(init_base_types)};
|
||||
};
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case KeyType::NONE:
|
||||
{
|
||||
static const auto info = make_info("NONE");
|
||||
return info;
|
||||
}
|
||||
case KeyType::USER_NAME:
|
||||
{
|
||||
static const auto info = make_info("USER_NAME");
|
||||
return info;
|
||||
}
|
||||
case KeyType::IP_ADDRESS:
|
||||
{
|
||||
static const auto info = make_info("IP_ADDRESS");
|
||||
return info;
|
||||
}
|
||||
case KeyType::FORWARDED_IP_ADDRESS:
|
||||
{
|
||||
static const auto info = make_info("FORWARDED_IP_ADDRESS");
|
||||
return info;
|
||||
}
|
||||
case KeyType::CLIENT_KEY:
|
||||
{
|
||||
static const auto info = make_info("CLIENT_KEY");
|
||||
return info;
|
||||
}
|
||||
case KeyType::CLIENT_KEY_OR_USER_NAME:
|
||||
{
|
||||
static const auto info = make_info("CLIENT_KEY_OR_USER_NAME");
|
||||
return info;
|
||||
}
|
||||
case KeyType::CLIENT_KEY_OR_IP_ADDRESS:
|
||||
{
|
||||
static const auto info = make_info("CLIENT_KEY_OR_IP_ADDRESS");
|
||||
return info;
|
||||
}
|
||||
case KeyType::MAX: break;
|
||||
}
|
||||
throw Exception("Unexpected quota key type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
using QuotaPtr = std::shared_ptr<const Quota>;
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include <Access/EnabledQuota.h>
|
||||
#include <Access/Quota.h>
|
||||
#include <Access/QuotaCache.h>
|
||||
#include <Access/QuotaUsage.h>
|
||||
#include <Access/AccessControl.h>
|
||||
@ -44,26 +45,25 @@ void QuotaCache::QuotaInfo::setQuota(const QuotaPtr & quota_, const UUID & quota
|
||||
String QuotaCache::QuotaInfo::calculateKey(const EnabledQuota & enabled) const
|
||||
{
|
||||
const auto & params = enabled.params;
|
||||
using KeyType = Quota::KeyType;
|
||||
switch (quota->key_type)
|
||||
{
|
||||
case KeyType::NONE:
|
||||
case QuotaKeyType::NONE:
|
||||
{
|
||||
return "";
|
||||
}
|
||||
case KeyType::USER_NAME:
|
||||
case QuotaKeyType::USER_NAME:
|
||||
{
|
||||
return params.user_name;
|
||||
}
|
||||
case KeyType::IP_ADDRESS:
|
||||
case QuotaKeyType::IP_ADDRESS:
|
||||
{
|
||||
return params.client_address.toString();
|
||||
}
|
||||
case KeyType::FORWARDED_IP_ADDRESS:
|
||||
case QuotaKeyType::FORWARDED_IP_ADDRESS:
|
||||
{
|
||||
return params.forwarded_address;
|
||||
}
|
||||
case KeyType::CLIENT_KEY:
|
||||
case QuotaKeyType::CLIENT_KEY:
|
||||
{
|
||||
if (!params.client_key.empty())
|
||||
return params.client_key;
|
||||
@ -71,19 +71,19 @@ String QuotaCache::QuotaInfo::calculateKey(const EnabledQuota & enabled) const
|
||||
"Quota " + quota->getName() + " (for user " + params.user_name + ") requires a client supplied key.",
|
||||
ErrorCodes::QUOTA_REQUIRES_CLIENT_KEY);
|
||||
}
|
||||
case KeyType::CLIENT_KEY_OR_USER_NAME:
|
||||
case QuotaKeyType::CLIENT_KEY_OR_USER_NAME:
|
||||
{
|
||||
if (!params.client_key.empty())
|
||||
return params.client_key;
|
||||
return params.user_name;
|
||||
}
|
||||
case KeyType::CLIENT_KEY_OR_IP_ADDRESS:
|
||||
case QuotaKeyType::CLIENT_KEY_OR_IP_ADDRESS:
|
||||
{
|
||||
if (!params.client_key.empty())
|
||||
return params.client_key;
|
||||
return params.client_address.toString();
|
||||
}
|
||||
case KeyType::MAX: break;
|
||||
case QuotaKeyType::MAX: break;
|
||||
}
|
||||
throw Exception("Unexpected quota key type: " + std::to_string(static_cast<int>(quota->key_type)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
@ -113,7 +113,6 @@ boost::shared_ptr<const EnabledQuota::Intervals> QuotaCache::QuotaInfo::rebuildI
|
||||
new_intervals->quota_key = key;
|
||||
auto & intervals = new_intervals->intervals;
|
||||
intervals.reserve(quota->all_limits.size());
|
||||
static constexpr auto MAX_RESOURCE_TYPE = Quota::MAX_RESOURCE_TYPE;
|
||||
for (const auto & limits : quota->all_limits)
|
||||
{
|
||||
intervals.emplace_back();
|
||||
@ -124,11 +123,12 @@ boost::shared_ptr<const EnabledQuota::Intervals> QuotaCache::QuotaInfo::rebuildI
|
||||
if (limits.randomize_interval)
|
||||
end_of_interval += randomDuration(limits.duration);
|
||||
interval.end_of_interval = end_of_interval.time_since_epoch();
|
||||
for (auto resource_type : collections::range(MAX_RESOURCE_TYPE))
|
||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||
{
|
||||
if (limits.max[resource_type])
|
||||
interval.max[resource_type] = *limits.max[resource_type];
|
||||
interval.used[resource_type] = 0;
|
||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||
if (limits.max[quota_type_i])
|
||||
interval.max[quota_type_i] = *limits.max[quota_type_i];
|
||||
interval.used[quota_type_i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -159,9 +159,10 @@ boost::shared_ptr<const EnabledQuota::Intervals> QuotaCache::QuotaInfo::rebuildI
|
||||
|
||||
/// Found an interval with the same duration, we need to copy its usage information to `result`.
|
||||
const auto & current_interval = *lower_bound;
|
||||
for (auto resource_type : collections::range(MAX_RESOURCE_TYPE))
|
||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||
{
|
||||
new_interval.used[resource_type].store(current_interval.used[resource_type].load());
|
||||
auto quota_type_i = static_cast<size_t>(quota_type);
|
||||
new_interval.used[quota_type_i].store(current_interval.used[quota_type_i].load());
|
||||
new_interval.end_of_interval.store(current_interval.end_of_interval.load());
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,9 @@
|
||||
namespace DB
|
||||
{
|
||||
class AccessControl;
|
||||
|
||||
struct Quota;
|
||||
using QuotaPtr = std::shared_ptr<const Quota>;
|
||||
struct RolesOrUsersSet;
|
||||
|
||||
/// Stores information how much amount of resources have been consumed and how much are left.
|
||||
class QuotaCache
|
||||
|
@ -1,7 +1,8 @@
|
||||
#pragma once
|
||||
|
||||
#include <Access/Quota.h>
|
||||
#include <Access/Common/QuotaDefs.h>
|
||||
#include <chrono>
|
||||
#include <optional>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -9,14 +10,10 @@ namespace DB
|
||||
/// The information about a quota consumption.
|
||||
struct QuotaUsage
|
||||
{
|
||||
using ResourceType = Quota::ResourceType;
|
||||
using ResourceAmount = Quota::ResourceAmount;
|
||||
static constexpr auto MAX_RESOURCE_TYPE = Quota::MAX_RESOURCE_TYPE;
|
||||
|
||||
struct Interval
|
||||
{
|
||||
ResourceAmount used[MAX_RESOURCE_TYPE];
|
||||
std::optional<ResourceAmount> max[MAX_RESOURCE_TYPE];
|
||||
QuotaValue used[static_cast<size_t>(QuotaType::MAX)];
|
||||
std::optional<QuotaValue> max[static_cast<size_t>(QuotaType::MAX)];
|
||||
std::chrono::seconds duration = std::chrono::seconds::zero();
|
||||
bool randomize_interval = false;
|
||||
std::chrono::system_clock::time_point end_of_interval;
|
||||
|
@ -94,7 +94,7 @@ static void retryOnZooKeeperUserError(size_t attempts, Func && function)
|
||||
UUID ReplicatedAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool replace_if_exists)
|
||||
{
|
||||
const UUID id = generateRandomID();
|
||||
const EntityTypeInfo type_info = EntityTypeInfo::get(new_entity->getType());
|
||||
const AccessEntityTypeInfo type_info = AccessEntityTypeInfo::get(new_entity->getType());
|
||||
const String & name = new_entity->getName();
|
||||
LOG_DEBUG(getLogger(), "Inserting entity of type {} named {} with id {}", type_info.name, name, toString(id));
|
||||
|
||||
@ -113,8 +113,8 @@ void ReplicatedAccessStorage::insertZooKeeper(
|
||||
const zkutil::ZooKeeperPtr & zookeeper, const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists)
|
||||
{
|
||||
const String & name = new_entity->getName();
|
||||
const EntityType type = new_entity->getType();
|
||||
const EntityTypeInfo type_info = EntityTypeInfo::get(type);
|
||||
const AccessEntityType type = new_entity->getType();
|
||||
const AccessEntityTypeInfo type_info = AccessEntityTypeInfo::get(type);
|
||||
|
||||
const String entity_uuid = toString(id);
|
||||
/// The entity data will be stored here, this ensures all entities have unique ids
|
||||
@ -143,7 +143,7 @@ void ReplicatedAccessStorage::insertZooKeeper(
|
||||
String existing_entity_definition = zookeeper->get(entity_path);
|
||||
|
||||
AccessEntityPtr existing_entity = deserializeAccessEntity(existing_entity_definition, entity_path);
|
||||
EntityType existing_type = existing_entity->getType();
|
||||
AccessEntityType existing_type = existing_entity->getType();
|
||||
String existing_name = existing_entity->getName();
|
||||
throwIDCollisionCannotInsert(id, type, name, existing_type, existing_name);
|
||||
}
|
||||
@ -204,7 +204,7 @@ void ReplicatedAccessStorage::removeZooKeeper(const zkutil::ZooKeeperPtr & zooke
|
||||
throwNotFound(id);
|
||||
|
||||
const AccessEntityPtr entity = deserializeAccessEntity(entity_definition, entity_path);
|
||||
const EntityTypeInfo type_info = EntityTypeInfo::get(entity->getType());
|
||||
const AccessEntityTypeInfo type_info = AccessEntityTypeInfo::get(entity->getType());
|
||||
const String & name = entity->getName();
|
||||
|
||||
const String entity_name_path = zookeeper_path + "/" + type_info.unique_char + "/" + escapeForFileName(name);
|
||||
@ -248,7 +248,7 @@ void ReplicatedAccessStorage::updateZooKeeper(const zkutil::ZooKeeperPtr & zooke
|
||||
if (!new_entity->isTypeOf(old_entity->getType()))
|
||||
throwBadCast(id, new_entity->getType(), new_entity->getName(), old_entity->getType());
|
||||
|
||||
const EntityTypeInfo type_info = EntityTypeInfo::get(new_entity->getType());
|
||||
const AccessEntityTypeInfo type_info = AccessEntityTypeInfo::get(new_entity->getType());
|
||||
|
||||
Coordination::Requests ops;
|
||||
const String new_entity_definition = serializeAccessEntity(*new_entity);
|
||||
@ -309,7 +309,7 @@ void ReplicatedAccessStorage::resetAfterError()
|
||||
while (refresh_queue.tryPop(id)) {}
|
||||
|
||||
std::lock_guard lock{mutex};
|
||||
for (const auto type : collections::range(EntityType::MAX))
|
||||
for (const auto type : collections::range(AccessEntityType::MAX))
|
||||
entries_by_name_and_type[static_cast<size_t>(type)].clear();
|
||||
entries_by_id.clear();
|
||||
}
|
||||
@ -334,10 +334,10 @@ void ReplicatedAccessStorage::createRootNodes(const zkutil::ZooKeeperPtr & zooke
|
||||
zookeeper->createAncestors(zookeeper_path);
|
||||
zookeeper->createIfNotExists(zookeeper_path, "");
|
||||
zookeeper->createIfNotExists(zookeeper_path + "/uuid", "");
|
||||
for (const auto type : collections::range(EntityType::MAX))
|
||||
for (const auto type : collections::range(AccessEntityType::MAX))
|
||||
{
|
||||
/// Create a znode for each type of AccessEntity
|
||||
const auto type_info = EntityTypeInfo::get(type);
|
||||
const auto type_info = AccessEntityTypeInfo::get(type);
|
||||
zookeeper->createIfNotExists(zookeeper_path + "/" + type_info.unique_char, "");
|
||||
}
|
||||
}
|
||||
@ -440,7 +440,7 @@ void ReplicatedAccessStorage::refreshEntityNoLock(const zkutil::ZooKeeperPtr & z
|
||||
void ReplicatedAccessStorage::setEntityNoLock(const UUID & id, const AccessEntityPtr & entity, Notifications & notifications)
|
||||
{
|
||||
LOG_DEBUG(getLogger(), "Setting id {} to entity named {}", toString(id), entity->getName());
|
||||
const EntityType type = entity->getType();
|
||||
const AccessEntityType type = entity->getType();
|
||||
const String & name = entity->getName();
|
||||
|
||||
/// If the type+name already exists and is a different entity, remove old entity
|
||||
@ -454,7 +454,7 @@ void ReplicatedAccessStorage::setEntityNoLock(const UUID & id, const AccessEntit
|
||||
if (auto it = entries_by_id.find(id); it != entries_by_id.end())
|
||||
{
|
||||
const AccessEntityPtr & existing_entity = it->second.entity;
|
||||
const EntityType existing_type = existing_entity->getType();
|
||||
const AccessEntityType existing_type = existing_entity->getType();
|
||||
const String & existing_name = existing_entity->getName();
|
||||
if (existing_type != type || existing_name != name)
|
||||
{
|
||||
@ -482,7 +482,7 @@ void ReplicatedAccessStorage::removeEntityNoLock(const UUID & id, Notifications
|
||||
}
|
||||
|
||||
const Entry & entry = it->second;
|
||||
const EntityType type = entry.entity->getType();
|
||||
const AccessEntityType type = entry.entity->getType();
|
||||
const String & name = entry.entity->getName();
|
||||
prepareNotifications(entry, true, notifications);
|
||||
|
||||
@ -500,7 +500,7 @@ void ReplicatedAccessStorage::removeEntityNoLock(const UUID & id, Notifications
|
||||
}
|
||||
|
||||
|
||||
std::optional<UUID> ReplicatedAccessStorage::findImpl(EntityType type, const String & name) const
|
||||
std::optional<UUID> ReplicatedAccessStorage::findImpl(AccessEntityType type, const String & name) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
const auto & entries_by_name = entries_by_name_and_type[static_cast<size_t>(type)];
|
||||
@ -513,7 +513,7 @@ std::optional<UUID> ReplicatedAccessStorage::findImpl(EntityType type, const Str
|
||||
}
|
||||
|
||||
|
||||
std::vector<UUID> ReplicatedAccessStorage::findAllImpl(EntityType type) const
|
||||
std::vector<UUID> ReplicatedAccessStorage::findAllImpl(AccessEntityType type) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
std::vector<UUID> result;
|
||||
@ -560,7 +560,7 @@ void ReplicatedAccessStorage::prepareNotifications(const Entry & entry, bool rem
|
||||
}
|
||||
|
||||
|
||||
scope_guard ReplicatedAccessStorage::subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const
|
||||
scope_guard ReplicatedAccessStorage::subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
auto & handlers = handlers_by_type[static_cast<size_t>(type)];
|
||||
@ -611,7 +611,7 @@ bool ReplicatedAccessStorage::hasSubscriptionImpl(const UUID & id) const
|
||||
}
|
||||
|
||||
|
||||
bool ReplicatedAccessStorage::hasSubscriptionImpl(EntityType type) const
|
||||
bool ReplicatedAccessStorage::hasSubscriptionImpl(AccessEntityType type) const
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
const auto & handlers = handlers_by_type[static_cast<size_t>(type)];
|
||||
|
@ -69,8 +69,8 @@ private:
|
||||
mutable std::list<OnChangedHandler> handlers_by_id;
|
||||
};
|
||||
|
||||
std::optional<UUID> findImpl(EntityType type, const String & name) const override;
|
||||
std::vector<UUID> findAllImpl(EntityType type) const override;
|
||||
std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
|
||||
std::vector<UUID> findAllImpl(AccessEntityType type) const override;
|
||||
bool existsImpl(const UUID & id) const override;
|
||||
AccessEntityPtr readImpl(const UUID & id) const override;
|
||||
String readNameImpl(const UUID & id) const override;
|
||||
@ -78,13 +78,13 @@ private:
|
||||
|
||||
void prepareNotifications(const Entry & entry, bool remove, Notifications & notifications) const;
|
||||
scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override;
|
||||
scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const override;
|
||||
scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const override;
|
||||
bool hasSubscriptionImpl(const UUID & id) const override;
|
||||
bool hasSubscriptionImpl(EntityType type) const override;
|
||||
bool hasSubscriptionImpl(AccessEntityType type) const override;
|
||||
|
||||
mutable std::mutex mutex;
|
||||
std::unordered_map<UUID, Entry> entries_by_id;
|
||||
std::unordered_map<String, Entry *> entries_by_name_and_type[static_cast<size_t>(EntityType::MAX)];
|
||||
mutable std::list<OnChangedHandler> handlers_by_type[static_cast<size_t>(EntityType::MAX)];
|
||||
std::unordered_map<String, Entry *> entries_by_name_and_type[static_cast<size_t>(AccessEntityType::MAX)];
|
||||
mutable std::list<OnChangedHandler> handlers_by_type[static_cast<size_t>(AccessEntityType::MAX)];
|
||||
};
|
||||
}
|
||||
|
@ -17,8 +17,8 @@ struct Role : public IAccessEntity
|
||||
|
||||
bool equal(const IAccessEntity & other) const override;
|
||||
std::shared_ptr<IAccessEntity> clone() const override { return cloneImpl<Role>(); }
|
||||
static constexpr const Type TYPE = Type::ROLE;
|
||||
Type getType() const override { return TYPE; }
|
||||
static constexpr const auto TYPE = AccessEntityType::ROLE;
|
||||
AccessEntityType getType() const override { return TYPE; }
|
||||
};
|
||||
|
||||
using RolePtr = std::shared_ptr<const Role>;
|
||||
|
@ -13,34 +13,34 @@ namespace ErrorCodes
|
||||
|
||||
void RowPolicy::setDatabase(const String & database)
|
||||
{
|
||||
name_parts.database = database;
|
||||
IAccessEntity::setName(name_parts.getName());
|
||||
full_name.database = database;
|
||||
IAccessEntity::setName(full_name.toString());
|
||||
}
|
||||
|
||||
void RowPolicy::setTableName(const String & table_name)
|
||||
{
|
||||
name_parts.table_name = table_name;
|
||||
IAccessEntity::setName(name_parts.getName());
|
||||
full_name.table_name = table_name;
|
||||
IAccessEntity::setName(full_name.toString());
|
||||
}
|
||||
|
||||
void RowPolicy::setShortName(const String & short_name)
|
||||
{
|
||||
name_parts.short_name = short_name;
|
||||
IAccessEntity::setName(name_parts.getName());
|
||||
full_name.short_name = short_name;
|
||||
IAccessEntity::setName(full_name.toString());
|
||||
}
|
||||
|
||||
void RowPolicy::setNameParts(const String & short_name, const String & database, const String & table_name)
|
||||
void RowPolicy::setFullName(const String & short_name, const String & database, const String & table_name)
|
||||
{
|
||||
name_parts.short_name = short_name;
|
||||
name_parts.database = database;
|
||||
name_parts.table_name = table_name;
|
||||
IAccessEntity::setName(name_parts.getName());
|
||||
full_name.short_name = short_name;
|
||||
full_name.database = database;
|
||||
full_name.table_name = table_name;
|
||||
IAccessEntity::setName(full_name.toString());
|
||||
}
|
||||
|
||||
void RowPolicy::setNameParts(const NameParts & name_parts_)
|
||||
void RowPolicy::setFullName(const RowPolicyName & full_name_)
|
||||
{
|
||||
name_parts = name_parts_;
|
||||
IAccessEntity::setName(name_parts.getName());
|
||||
full_name = full_name_;
|
||||
IAccessEntity::setName(full_name.toString());
|
||||
}
|
||||
|
||||
void RowPolicy::setName(const String &)
|
||||
@ -54,7 +54,7 @@ bool RowPolicy::equal(const IAccessEntity & other) const
|
||||
if (!IAccessEntity::equal(other))
|
||||
return false;
|
||||
const auto & other_policy = typeid_cast<const RowPolicy &>(other);
|
||||
return (name_parts == other_policy.name_parts) && boost::range::equal(conditions, other_policy.conditions)
|
||||
return (full_name == other_policy.full_name) && boost::range::equal(filters, other_policy.filters)
|
||||
&& restrictive == other_policy.restrictive && (to_roles == other_policy.to_roles);
|
||||
}
|
||||
|
||||
|
@ -2,77 +2,32 @@
|
||||
|
||||
#include <Access/IAccessEntity.h>
|
||||
#include <Access/RolesOrUsersSet.h>
|
||||
#include <Access/Common/RowPolicyDefs.h>
|
||||
#include <Core/Types.h>
|
||||
#include <array>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
|
||||
/** Represents a row level security policy for a table.
|
||||
*/
|
||||
struct RowPolicy : public IAccessEntity
|
||||
{
|
||||
struct NameParts
|
||||
{
|
||||
String short_name;
|
||||
String database;
|
||||
String table_name;
|
||||
|
||||
bool empty() const { return short_name.empty(); }
|
||||
String getName() const;
|
||||
String toString() const { return getName(); }
|
||||
auto toTuple() const { return std::tie(short_name, database, table_name); }
|
||||
friend bool operator ==(const NameParts & left, const NameParts & right) { return left.toTuple() == right.toTuple(); }
|
||||
friend bool operator !=(const NameParts & left, const NameParts & right) { return left.toTuple() != right.toTuple(); }
|
||||
};
|
||||
|
||||
void setShortName(const String & short_name);
|
||||
void setDatabase(const String & database);
|
||||
void setTableName(const String & table_name);
|
||||
void setNameParts(const String & short_name, const String & database, const String & table_name);
|
||||
void setNameParts(const NameParts & name_parts);
|
||||
void setFullName(const String & short_name, const String & database, const String & table_name);
|
||||
void setFullName(const RowPolicyName & full_name_);
|
||||
|
||||
const String & getDatabase() const { return name_parts.database; }
|
||||
const String & getTableName() const { return name_parts.table_name; }
|
||||
const String & getShortName() const { return name_parts.short_name; }
|
||||
const NameParts & getNameParts() const { return name_parts; }
|
||||
const String & getDatabase() const { return full_name.database; }
|
||||
const String & getTableName() const { return full_name.table_name; }
|
||||
const String & getShortName() const { return full_name.short_name; }
|
||||
const RowPolicyName & getFullName() const { return full_name; }
|
||||
|
||||
/// Filter is a SQL conditional expression used to figure out which rows should be visible
|
||||
/// for user or available for modification. If the expression returns NULL or false for some rows
|
||||
/// those rows are silently suppressed.
|
||||
/// Check is a SQL condition expression used to check whether a row can be written into
|
||||
/// the table. If the expression returns NULL or false an exception is thrown.
|
||||
/// If a conditional expression here is empty it means no filtering is applied.
|
||||
enum ConditionType
|
||||
{
|
||||
SELECT_FILTER,
|
||||
|
||||
#if 0 /// Row-level security for INSERT, UPDATE, DELETE is not implemented yet.
|
||||
INSERT_CHECK,
|
||||
UPDATE_FILTER,
|
||||
UPDATE_CHECK,
|
||||
DELETE_FILTER,
|
||||
#endif
|
||||
|
||||
MAX_CONDITION_TYPE
|
||||
};
|
||||
|
||||
struct ConditionTypeInfo
|
||||
{
|
||||
const char * const raw_name;
|
||||
const String name; /// Lowercased with underscores, e.g. "select_filter".
|
||||
const String command; /// Uppercased without last word, e.g. "SELECT".
|
||||
const bool is_check; /// E.g. false for SELECT_FILTER.
|
||||
static const ConditionTypeInfo & get(ConditionType type);
|
||||
};
|
||||
|
||||
std::array<String, MAX_CONDITION_TYPE> conditions;
|
||||
/// A SQL conditional expression used to figure out which rows should be visible
|
||||
/// for user or available for modification.
|
||||
std::array<String, static_cast<size_t>(RowPolicyFilterType::MAX)> filters;
|
||||
|
||||
/// Sets that the policy is permissive.
|
||||
/// A row is only accessible if at least one of the permissive policies passes,
|
||||
@ -88,88 +43,19 @@ struct RowPolicy : public IAccessEntity
|
||||
|
||||
bool equal(const IAccessEntity & other) const override;
|
||||
std::shared_ptr<IAccessEntity> clone() const override { return cloneImpl<RowPolicy>(); }
|
||||
static constexpr const Type TYPE = Type::ROW_POLICY;
|
||||
Type getType() const override { return TYPE; }
|
||||
static constexpr const auto TYPE = AccessEntityType::ROW_POLICY;
|
||||
AccessEntityType getType() const override { return TYPE; }
|
||||
|
||||
/// Which roles or users should use this row policy.
|
||||
RolesOrUsersSet to_roles;
|
||||
|
||||
private:
|
||||
void setName(const String & name_) override;
|
||||
void setName(const String &) override;
|
||||
|
||||
NameParts name_parts;
|
||||
RowPolicyName full_name;
|
||||
bool restrictive = false;
|
||||
};
|
||||
|
||||
using RowPolicyPtr = std::shared_ptr<const RowPolicy>;
|
||||
|
||||
|
||||
inline const RowPolicy::ConditionTypeInfo & RowPolicy::ConditionTypeInfo::get(ConditionType type_)
|
||||
{
|
||||
static constexpr auto make_info = [](const char * raw_name_)
|
||||
{
|
||||
String init_name = raw_name_;
|
||||
boost::to_lower(init_name);
|
||||
size_t underscore_pos = init_name.find('_');
|
||||
String init_command = init_name.substr(0, underscore_pos);
|
||||
boost::to_upper(init_command);
|
||||
bool init_is_check = (std::string_view{init_name}.substr(underscore_pos + 1) == "check");
|
||||
return ConditionTypeInfo{raw_name_, std::move(init_name), std::move(init_command), init_is_check};
|
||||
};
|
||||
|
||||
switch (type_)
|
||||
{
|
||||
case SELECT_FILTER:
|
||||
{
|
||||
static const ConditionTypeInfo info = make_info("SELECT_FILTER");
|
||||
return info;
|
||||
}
|
||||
#if 0 /// Row-level security for INSERT, UPDATE, DELETE is not implemented yet.
|
||||
case INSERT_CHECK:
|
||||
{
|
||||
static const ConditionTypeInfo info = make_info("INSERT_CHECK");
|
||||
return info;
|
||||
}
|
||||
case UPDATE_FILTER:
|
||||
{
|
||||
static const ConditionTypeInfo info = make_info("UPDATE_FILTER");
|
||||
return info;
|
||||
}
|
||||
case UPDATE_CHECK:
|
||||
{
|
||||
static const ConditionTypeInfo info = make_info("UPDATE_CHECK");
|
||||
return info;
|
||||
}
|
||||
case DELETE_FILTER:
|
||||
{
|
||||
static const ConditionTypeInfo info = make_info("DELETE_FILTER");
|
||||
return info;
|
||||
}
|
||||
#endif
|
||||
case MAX_CONDITION_TYPE: break;
|
||||
}
|
||||
throw Exception("Unknown type: " + std::to_string(static_cast<size_t>(type_)), ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
inline String toString(RowPolicy::ConditionType type)
|
||||
{
|
||||
return RowPolicy::ConditionTypeInfo::get(type).raw_name;
|
||||
}
|
||||
|
||||
|
||||
inline String RowPolicy::NameParts::getName() const
|
||||
{
|
||||
String name;
|
||||
name.reserve(database.length() + table_name.length() + short_name.length() + 6);
|
||||
name += backQuoteIfNeed(short_name);
|
||||
name += " ON ";
|
||||
if (!database.empty())
|
||||
{
|
||||
name += backQuoteIfNeed(database);
|
||||
name += '.';
|
||||
}
|
||||
name += backQuoteIfNeed(table_name);
|
||||
return name;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <Access/RowPolicyCache.h>
|
||||
#include <Access/EnabledRowPolicies.h>
|
||||
#include <Access/AccessControl.h>
|
||||
#include <Access/EnabledRowPolicies.h>
|
||||
#include <Access/RowPolicy.h>
|
||||
#include <Parsers/ExpressionListParsers.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/makeASTForLogicalFunction.h>
|
||||
@ -15,35 +16,31 @@ namespace DB
|
||||
{
|
||||
namespace
|
||||
{
|
||||
using ConditionType = RowPolicy::ConditionType;
|
||||
constexpr auto MAX_CONDITION_TYPE = RowPolicy::MAX_CONDITION_TYPE;
|
||||
|
||||
|
||||
/// Accumulates conditions from multiple row policies and joins them using the AND logical operation.
|
||||
class ConditionsMixer
|
||||
/// Accumulates filters from multiple row policies and joins them using the AND logical operation.
|
||||
class FiltersMixer
|
||||
{
|
||||
public:
|
||||
void add(const ASTPtr & condition, bool is_restrictive)
|
||||
void add(const ASTPtr & filter, bool is_restrictive)
|
||||
{
|
||||
if (is_restrictive)
|
||||
restrictions.push_back(condition);
|
||||
restrictions.push_back(filter);
|
||||
else
|
||||
permissions.push_back(condition);
|
||||
permissions.push_back(filter);
|
||||
}
|
||||
|
||||
ASTPtr getResult() &&
|
||||
{
|
||||
/// Process permissive conditions.
|
||||
/// Process permissive filters.
|
||||
restrictions.push_back(makeASTForLogicalOr(std::move(permissions)));
|
||||
|
||||
/// Process restrictive conditions.
|
||||
auto condition = makeASTForLogicalAnd(std::move(restrictions));
|
||||
/// Process restrictive filters.
|
||||
auto result = makeASTForLogicalAnd(std::move(restrictions));
|
||||
|
||||
bool value;
|
||||
if (tryGetLiteralBool(condition.get(), value) && value)
|
||||
condition = nullptr; /// The condition is always true, no need to check it.
|
||||
if (tryGetLiteralBool(result.get(), value) && value)
|
||||
result = nullptr; /// The condition is always true, no need to check it.
|
||||
|
||||
return condition;
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
@ -59,33 +56,34 @@ void RowPolicyCache::PolicyInfo::setPolicy(const RowPolicyPtr & policy_)
|
||||
roles = &policy->to_roles;
|
||||
database_and_table_name = std::make_shared<std::pair<String, String>>(policy->getDatabase(), policy->getTableName());
|
||||
|
||||
for (auto type : collections::range(0, MAX_CONDITION_TYPE))
|
||||
for (auto filter_type : collections::range(0, RowPolicyFilterType::MAX))
|
||||
{
|
||||
parsed_conditions[type] = nullptr;
|
||||
const String & condition = policy->conditions[type];
|
||||
if (condition.empty())
|
||||
auto filter_type_i = static_cast<size_t>(filter_type);
|
||||
parsed_filters[filter_type_i] = nullptr;
|
||||
const String & filter = policy->filters[filter_type_i];
|
||||
if (filter.empty())
|
||||
continue;
|
||||
|
||||
auto previous_range = std::pair(std::begin(policy->conditions), std::begin(policy->conditions) + type);
|
||||
const auto * previous_it = std::find(previous_range.first, previous_range.second, condition);
|
||||
auto previous_range = std::pair(std::begin(policy->filters), std::begin(policy->filters) + filter_type_i);
|
||||
const auto * previous_it = std::find(previous_range.first, previous_range.second, filter);
|
||||
if (previous_it != previous_range.second)
|
||||
{
|
||||
/// The condition is already parsed before.
|
||||
parsed_conditions[type] = parsed_conditions[previous_it - previous_range.first];
|
||||
/// The filter is already parsed before.
|
||||
parsed_filters[filter_type_i] = parsed_filters[previous_it - previous_range.first];
|
||||
continue;
|
||||
}
|
||||
|
||||
/// Try to parse the condition.
|
||||
/// Try to parse the filter.
|
||||
try
|
||||
{
|
||||
ParserExpression parser;
|
||||
parsed_conditions[type] = parseQuery(parser, condition, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
||||
parsed_filters[filter_type_i] = parseQuery(parser, filter, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(
|
||||
&Poco::Logger::get("RowPolicy"),
|
||||
String("Could not parse the condition ") + toString(type) + " of row policy "
|
||||
String("Could not parse the condition ") + toString(filter_type) + " of row policy "
|
||||
+ backQuote(policy->getName()));
|
||||
}
|
||||
}
|
||||
@ -119,7 +117,7 @@ std::shared_ptr<const EnabledRowPolicies> RowPolicyCache::getEnabledRowPolicies(
|
||||
|
||||
auto res = std::shared_ptr<EnabledRowPolicies>(new EnabledRowPolicies(params));
|
||||
enabled_row_policies.emplace(std::move(params), res);
|
||||
mixConditionsFor(*res);
|
||||
mixFiltersFor(*res);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -165,7 +163,7 @@ void RowPolicyCache::rowPolicyAddedOrChanged(const UUID & policy_id, const RowPo
|
||||
|
||||
auto & info = it->second;
|
||||
info.setPolicy(new_policy);
|
||||
mixConditions();
|
||||
mixFilters();
|
||||
}
|
||||
|
||||
|
||||
@ -173,11 +171,11 @@ void RowPolicyCache::rowPolicyRemoved(const UUID & policy_id)
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
all_policies.erase(policy_id);
|
||||
mixConditions();
|
||||
mixFilters();
|
||||
}
|
||||
|
||||
|
||||
void RowPolicyCache::mixConditions()
|
||||
void RowPolicyCache::mixFilters()
|
||||
{
|
||||
/// `mutex` is already locked.
|
||||
for (auto i = enabled_row_policies.begin(), e = enabled_row_policies.end(); i != e;)
|
||||
@ -187,58 +185,59 @@ void RowPolicyCache::mixConditions()
|
||||
i = enabled_row_policies.erase(i);
|
||||
else
|
||||
{
|
||||
mixConditionsFor(*elem);
|
||||
mixFiltersFor(*elem);
|
||||
++i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void RowPolicyCache::mixConditionsFor(EnabledRowPolicies & enabled)
|
||||
void RowPolicyCache::mixFiltersFor(EnabledRowPolicies & enabled)
|
||||
{
|
||||
/// `mutex` is already locked.
|
||||
|
||||
using MapOfMixedConditions = EnabledRowPolicies::MapOfMixedConditions;
|
||||
using MixedConditionKey = EnabledRowPolicies::MixedConditionKey;
|
||||
using MixedFiltersMap = EnabledRowPolicies::MixedFiltersMap;
|
||||
using MixedFiltersKey = EnabledRowPolicies::MixedFiltersKey;
|
||||
using Hash = EnabledRowPolicies::Hash;
|
||||
|
||||
struct MixerWithNames
|
||||
{
|
||||
ConditionsMixer mixer;
|
||||
FiltersMixer mixer;
|
||||
std::shared_ptr<const std::pair<String, String>> database_and_table_name;
|
||||
};
|
||||
|
||||
std::unordered_map<MixedConditionKey, MixerWithNames, Hash> map_of_mixers;
|
||||
std::unordered_map<MixedFiltersKey, MixerWithNames, Hash> mixers;
|
||||
|
||||
for (const auto & [policy_id, info] : all_policies)
|
||||
{
|
||||
const auto & policy = *info.policy;
|
||||
bool match = info.roles->match(enabled.params.user_id, enabled.params.enabled_roles);
|
||||
MixedConditionKey key;
|
||||
MixedFiltersKey key;
|
||||
key.database = info.database_and_table_name->first;
|
||||
key.table_name = info.database_and_table_name->second;
|
||||
for (auto type : collections::range(0, MAX_CONDITION_TYPE))
|
||||
for (auto filter_type : collections::range(0, RowPolicyFilterType::MAX))
|
||||
{
|
||||
if (info.parsed_conditions[type])
|
||||
auto filter_type_i = static_cast<size_t>(filter_type);
|
||||
if (info.parsed_filters[filter_type_i])
|
||||
{
|
||||
key.condition_type = type;
|
||||
auto & mixer = map_of_mixers[key];
|
||||
key.filter_type = filter_type;
|
||||
auto & mixer = mixers[key];
|
||||
mixer.database_and_table_name = info.database_and_table_name;
|
||||
if (match)
|
||||
mixer.mixer.add(info.parsed_conditions[type], policy.isRestrictive());
|
||||
mixer.mixer.add(info.parsed_filters[filter_type_i], policy.isRestrictive());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto map_of_mixed_conditions = boost::make_shared<MapOfMixedConditions>();
|
||||
for (auto & [key, mixer] : map_of_mixers)
|
||||
auto mixed_filters = boost::make_shared<MixedFiltersMap>();
|
||||
for (auto & [key, mixer] : mixers)
|
||||
{
|
||||
auto & mixed_condition = (*map_of_mixed_conditions)[key];
|
||||
mixed_condition.database_and_table_name = mixer.database_and_table_name;
|
||||
mixed_condition.ast = std::move(mixer.mixer).getResult();
|
||||
auto & mixed_filter = (*mixed_filters)[key];
|
||||
mixed_filter.database_and_table_name = mixer.database_and_table_name;
|
||||
mixed_filter.ast = std::move(mixer.mixer).getResult();
|
||||
}
|
||||
|
||||
enabled.map_of_mixed_conditions.store(map_of_mixed_conditions);
|
||||
enabled.mixed_filters.store(mixed_filters);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -10,6 +10,9 @@
|
||||
namespace DB
|
||||
{
|
||||
class AccessControl;
|
||||
struct RolesOrUsersSet;
|
||||
struct RowPolicy;
|
||||
using RowPolicyPtr = std::shared_ptr<const RowPolicy>;
|
||||
|
||||
/// Stores read and parsed row policies.
|
||||
class RowPolicyCache
|
||||
@ -29,14 +32,14 @@ private:
|
||||
RowPolicyPtr policy;
|
||||
const RolesOrUsersSet * roles = nullptr;
|
||||
std::shared_ptr<const std::pair<String, String>> database_and_table_name;
|
||||
ASTPtr parsed_conditions[RowPolicy::MAX_CONDITION_TYPE];
|
||||
ASTPtr parsed_filters[static_cast<size_t>(RowPolicyFilterType::MAX)];
|
||||
};
|
||||
|
||||
void ensureAllRowPoliciesRead();
|
||||
void rowPolicyAddedOrChanged(const UUID & policy_id, const RowPolicyPtr & new_policy);
|
||||
void rowPolicyRemoved(const UUID & policy_id);
|
||||
void mixConditions();
|
||||
void mixConditionsFor(EnabledRowPolicies & enabled);
|
||||
void mixFilters();
|
||||
void mixFiltersFor(EnabledRowPolicies & enabled);
|
||||
|
||||
const AccessControl & access_control;
|
||||
std::unordered_map<UUID, PolicyInfo> all_policies;
|
||||
|
@ -18,8 +18,8 @@ struct SettingsProfile : public IAccessEntity
|
||||
|
||||
bool equal(const IAccessEntity & other) const override;
|
||||
std::shared_ptr<IAccessEntity> clone() const override { return cloneImpl<SettingsProfile>(); }
|
||||
static constexpr const Type TYPE = Type::SETTINGS_PROFILE;
|
||||
Type getType() const override { return TYPE; }
|
||||
static constexpr const auto TYPE = AccessEntityType::SETTINGS_PROFILE;
|
||||
AccessEntityType getType() const override { return TYPE; }
|
||||
};
|
||||
|
||||
using SettingsProfilePtr = std::shared_ptr<const SettingsProfile>;
|
||||
|
@ -26,8 +26,8 @@ struct User : public IAccessEntity
|
||||
|
||||
bool equal(const IAccessEntity & other) const override;
|
||||
std::shared_ptr<IAccessEntity> clone() const override { return cloneImpl<User>(); }
|
||||
static constexpr const Type TYPE = Type::USER;
|
||||
Type getType() const override { return TYPE; }
|
||||
static constexpr const auto TYPE = AccessEntityType::USER;
|
||||
AccessEntityType getType() const override { return TYPE; }
|
||||
};
|
||||
|
||||
using UserPtr = std::shared_ptr<const User>;
|
||||
|
@ -33,15 +33,12 @@ namespace ErrorCodes
|
||||
|
||||
namespace
|
||||
{
|
||||
using EntityType = IAccessStorage::EntityType;
|
||||
using EntityTypeInfo = IAccessStorage::EntityTypeInfo;
|
||||
|
||||
UUID generateID(EntityType type, const String & name)
|
||||
UUID generateID(AccessEntityType type, const String & name)
|
||||
{
|
||||
Poco::MD5Engine md5;
|
||||
md5.update(name);
|
||||
char type_storage_chars[] = " USRSXML";
|
||||
type_storage_chars[0] = EntityTypeInfo::get(type).unique_char;
|
||||
type_storage_chars[0] = AccessEntityTypeInfo::get(type).unique_char;
|
||||
md5.update(type_storage_chars, strlen(type_storage_chars));
|
||||
UUID result;
|
||||
memcpy(&result, md5.digest().data(), md5.digestLength());
|
||||
@ -114,7 +111,7 @@ namespace
|
||||
{
|
||||
auto profile_name = config.getString(profile_name_config);
|
||||
SettingsProfileElement profile_element;
|
||||
profile_element.parent_profile = generateID(EntityType::SETTINGS_PROFILE, profile_name);
|
||||
profile_element.parent_profile = generateID(AccessEntityType::SETTINGS_PROFILE, profile_name);
|
||||
user->settings.push_back(std::move(profile_element));
|
||||
}
|
||||
|
||||
@ -223,16 +220,15 @@ namespace
|
||||
auto quota = std::make_shared<Quota>();
|
||||
quota->setName(quota_name);
|
||||
|
||||
using KeyType = Quota::KeyType;
|
||||
String quota_config = "quotas." + quota_name;
|
||||
if (config.has(quota_config + ".keyed_by_ip"))
|
||||
quota->key_type = KeyType::IP_ADDRESS;
|
||||
quota->key_type = QuotaKeyType::IP_ADDRESS;
|
||||
else if (config.has(quota_config + ".keyed_by_forwarded_ip"))
|
||||
quota->key_type = KeyType::FORWARDED_IP_ADDRESS;
|
||||
quota->key_type = QuotaKeyType::FORWARDED_IP_ADDRESS;
|
||||
else if (config.has(quota_config + ".keyed"))
|
||||
quota->key_type = KeyType::CLIENT_KEY_OR_USER_NAME;
|
||||
quota->key_type = QuotaKeyType::CLIENT_KEY_OR_USER_NAME;
|
||||
else
|
||||
quota->key_type = KeyType::USER_NAME;
|
||||
quota->key_type = QuotaKeyType::USER_NAME;
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys interval_keys;
|
||||
config.keys(quota_config, interval_keys);
|
||||
@ -252,12 +248,12 @@ namespace
|
||||
limits.duration = duration;
|
||||
limits.randomize_interval = config.getBool(interval_config + ".randomize", false);
|
||||
|
||||
for (auto resource_type : collections::range(Quota::MAX_RESOURCE_TYPE))
|
||||
for (auto quota_type : collections::range(QuotaType::MAX))
|
||||
{
|
||||
const auto & type_info = Quota::ResourceTypeInfo::get(resource_type);
|
||||
const auto & type_info = QuotaTypeInfo::get(quota_type);
|
||||
auto value = config.getString(interval_config + "." + type_info.name, "0");
|
||||
if (value != "0")
|
||||
limits.max[resource_type] = type_info.amountFromString(value);
|
||||
limits.max[static_cast<size_t>(quota_type)] = type_info.stringToValue(value);
|
||||
}
|
||||
}
|
||||
|
||||
@ -274,7 +270,7 @@ namespace
|
||||
for (const auto & user_name : user_names)
|
||||
{
|
||||
if (config.has("users." + user_name + ".quota"))
|
||||
quota_to_user_ids[config.getString("users." + user_name + ".quota")].push_back(generateID(EntityType::USER, user_name));
|
||||
quota_to_user_ids[config.getString("users." + user_name + ".quota")].push_back(generateID(AccessEntityType::USER, user_name));
|
||||
}
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys quota_names;
|
||||
@ -351,9 +347,9 @@ namespace
|
||||
String filter = (it != user_to_filters.end()) ? it->second : "1";
|
||||
|
||||
auto policy = std::make_shared<RowPolicy>();
|
||||
policy->setNameParts(user_name, database, table_name);
|
||||
policy->conditions[RowPolicy::SELECT_FILTER] = filter;
|
||||
policy->to_roles.add(generateID(EntityType::USER, user_name));
|
||||
policy->setFullName(user_name, database, table_name);
|
||||
policy->filters[static_cast<size_t>(RowPolicyFilterType::SELECT_FILTER)] = filter;
|
||||
policy->to_roles.add(generateID(AccessEntityType::USER, user_name));
|
||||
policies.push_back(policy);
|
||||
}
|
||||
}
|
||||
@ -415,7 +411,7 @@ namespace
|
||||
{
|
||||
String parent_profile_name = config.getString(profile_config + "." + key);
|
||||
SettingsProfileElement profile_element;
|
||||
profile_element.parent_profile = generateID(EntityType::SETTINGS_PROFILE, parent_profile_name);
|
||||
profile_element.parent_profile = generateID(AccessEntityType::SETTINGS_PROFILE, parent_profile_name);
|
||||
profile->elements.emplace_back(std::move(profile_element));
|
||||
continue;
|
||||
}
|
||||
@ -552,13 +548,13 @@ void UsersConfigAccessStorage::startPeriodicReloading()
|
||||
config_reloader->start();
|
||||
}
|
||||
|
||||
std::optional<UUID> UsersConfigAccessStorage::findImpl(EntityType type, const String & name) const
|
||||
std::optional<UUID> UsersConfigAccessStorage::findImpl(AccessEntityType type, const String & name) const
|
||||
{
|
||||
return memory_storage.find(type, name);
|
||||
}
|
||||
|
||||
|
||||
std::vector<UUID> UsersConfigAccessStorage::findAllImpl(EntityType type) const
|
||||
std::vector<UUID> UsersConfigAccessStorage::findAllImpl(AccessEntityType type) const
|
||||
{
|
||||
return memory_storage.findAll(type);
|
||||
}
|
||||
@ -608,7 +604,7 @@ scope_guard UsersConfigAccessStorage::subscribeForChangesImpl(const UUID & id, c
|
||||
}
|
||||
|
||||
|
||||
scope_guard UsersConfigAccessStorage::subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const
|
||||
scope_guard UsersConfigAccessStorage::subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const
|
||||
{
|
||||
return memory_storage.subscribeForChanges(type, handler);
|
||||
}
|
||||
@ -620,7 +616,7 @@ bool UsersConfigAccessStorage::hasSubscriptionImpl(const UUID & id) const
|
||||
}
|
||||
|
||||
|
||||
bool UsersConfigAccessStorage::hasSubscriptionImpl(EntityType type) const
|
||||
bool UsersConfigAccessStorage::hasSubscriptionImpl(AccessEntityType type) const
|
||||
{
|
||||
return memory_storage.hasSubscription(type);
|
||||
}
|
||||
|
@ -43,8 +43,8 @@ public:
|
||||
private:
|
||||
void parseFromConfig(const Poco::Util::AbstractConfiguration & config);
|
||||
|
||||
std::optional<UUID> findImpl(EntityType type, const String & name) const override;
|
||||
std::vector<UUID> findAllImpl(EntityType type) const override;
|
||||
std::optional<UUID> findImpl(AccessEntityType type, const String & name) const override;
|
||||
std::vector<UUID> findAllImpl(AccessEntityType type) const override;
|
||||
bool existsImpl(const UUID & id) const override;
|
||||
AccessEntityPtr readImpl(const UUID & id) const override;
|
||||
String readNameImpl(const UUID & id) const override;
|
||||
@ -53,9 +53,9 @@ private:
|
||||
void removeImpl(const UUID & id) override;
|
||||
void updateImpl(const UUID & id, const UpdateFunc & update_func) override;
|
||||
scope_guard subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const override;
|
||||
scope_guard subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const override;
|
||||
scope_guard subscribeForChangesImpl(AccessEntityType type, const OnChangedHandler & handler) const override;
|
||||
bool hasSubscriptionImpl(const UUID & id) const override;
|
||||
bool hasSubscriptionImpl(EntityType type) const override;
|
||||
bool hasSubscriptionImpl(AccessEntityType type) const override;
|
||||
|
||||
MemoryAccessStorage memory_storage;
|
||||
CheckSettingNameFunction check_setting_name_function;
|
||||
|
@ -312,11 +312,11 @@ namespace
|
||||
String getDataPathInBackup(const IAST & create_query)
|
||||
{
|
||||
const auto & create = create_query.as<const ASTCreateQuery &>();
|
||||
if (create.table.empty())
|
||||
if (!create.table)
|
||||
return {};
|
||||
if (create.temporary)
|
||||
return getDataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.table});
|
||||
return getDataPathInBackup({create.database, create.table});
|
||||
return getDataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.getTable()});
|
||||
return getDataPathInBackup({create.getDatabase(), create.getTable()});
|
||||
}
|
||||
|
||||
String getMetadataPathInBackup(const DatabaseAndTableName & table_name)
|
||||
@ -336,11 +336,11 @@ namespace
|
||||
String getMetadataPathInBackup(const IAST & create_query)
|
||||
{
|
||||
const auto & create = create_query.as<const ASTCreateQuery &>();
|
||||
if (create.table.empty())
|
||||
return getMetadataPathInBackup(create.database);
|
||||
if (!create.table)
|
||||
return getMetadataPathInBackup(create.getDatabase());
|
||||
if (create.temporary)
|
||||
return getMetadataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.table});
|
||||
return getMetadataPathInBackup({create.database, create.table});
|
||||
return getMetadataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.getTable()});
|
||||
return getMetadataPathInBackup({create.getDatabase(), create.getTable()});
|
||||
}
|
||||
|
||||
void backupCreateQuery(const IAST & create_query, BackupEntries & backup_entries)
|
||||
@ -419,7 +419,7 @@ namespace
|
||||
|
||||
/// We create and execute `create` query for the database name.
|
||||
auto create_query = std::make_shared<ASTCreateQuery>();
|
||||
create_query->database = database_name;
|
||||
create_query->setDatabase(database_name);
|
||||
create_query->if_not_exists = true;
|
||||
InterpreterCreateQuery create_interpreter{create_query, context};
|
||||
create_interpreter.execute();
|
||||
@ -460,7 +460,7 @@ namespace
|
||||
|
||||
restore_tasks.emplace_back([table_name, new_create_query, partitions, context, backup]() -> RestoreDataTasks
|
||||
{
|
||||
DatabaseAndTableName new_table_name{new_create_query->database, new_create_query->table};
|
||||
DatabaseAndTableName new_table_name{new_create_query->getDatabase(), new_create_query->getTable()};
|
||||
if (new_create_query->temporary)
|
||||
new_table_name.first = DatabaseCatalog::TEMPORARY_DATABASE;
|
||||
|
||||
@ -536,7 +536,7 @@ namespace
|
||||
|
||||
restore_tasks.emplace_back([database_name, new_create_query, except_list, context, backup, renaming_config]() -> RestoreDataTasks
|
||||
{
|
||||
const String & new_database_name = new_create_query->database;
|
||||
const String & new_database_name = new_create_query->getDatabase();
|
||||
context->checkAccess(AccessType::SHOW_TABLES, new_database_name);
|
||||
|
||||
if (!DatabaseCatalog::instance().isDatabaseExist(new_database_name))
|
||||
|
@ -48,21 +48,23 @@ namespace
|
||||
{
|
||||
if (create.temporary)
|
||||
{
|
||||
if (create.table.empty())
|
||||
if (!create.table)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Table name specified in the CREATE TEMPORARY TABLE query must not be empty");
|
||||
create.table = data.renaming_config->getNewTemporaryTableName(create.table);
|
||||
create.setTable(data.renaming_config->getNewTemporaryTableName(create.getTable()));
|
||||
}
|
||||
else if (create.table.empty())
|
||||
else if (!create.table)
|
||||
{
|
||||
if (create.database.empty())
|
||||
if (!create.database)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name specified in the CREATE DATABASE query must not be empty");
|
||||
create.database = data.renaming_config->getNewDatabaseName(create.database);
|
||||
create.setDatabase(data.renaming_config->getNewDatabaseName(create.getDatabase()));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (create.database.empty())
|
||||
if (!create.database)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name specified in the CREATE TABLE query must not be empty");
|
||||
std::tie(create.database, create.table) = data.renaming_config->getNewTableName({create.database, create.table});
|
||||
auto table_and_database_name = data.renaming_config->getNewTableName({create.getDatabase(), create.getTable()});
|
||||
create.setDatabase(table_and_database_name.first);
|
||||
create.setTable(table_and_database_name.second);
|
||||
}
|
||||
|
||||
create.uuid = UUIDHelpers::Nil;
|
||||
|
@ -56,6 +56,8 @@
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
#include <IO/CompressionMethod.h>
|
||||
#include <Client/InternalTextLogs.h>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
using namespace std::literals;
|
||||
@ -405,7 +407,7 @@ void ClientBase::initBlockOutputStream(const Block & block, ASTPtr parsed_query)
|
||||
output_format = global_context->getOutputFormat(
|
||||
current_format, out_file_buf ? *out_file_buf : *out_buf, block);
|
||||
|
||||
output_format->doWritePrefix();
|
||||
output_format->setAutoFlush();
|
||||
}
|
||||
}
|
||||
|
||||
@ -490,7 +492,7 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa
|
||||
ReplaceQueryParameterVisitor visitor(query_parameters);
|
||||
visitor.visit(parsed_query);
|
||||
|
||||
/// Get new query after substitutions. Note that it cannot be done for INSERT query with embedded data.
|
||||
/// Get new query after substitutions.
|
||||
query = serializeAST(*parsed_query);
|
||||
}
|
||||
|
||||
@ -685,7 +687,7 @@ void ClientBase::onEndOfStream()
|
||||
progress_indication.clearProgressOutput();
|
||||
|
||||
if (output_format)
|
||||
output_format->doWriteSuffix();
|
||||
output_format->finalize();
|
||||
|
||||
resetOutput();
|
||||
|
||||
@ -824,6 +826,17 @@ bool ClientBase::receiveSampleBlock(Block & out, ColumnsDescription & columns_de
|
||||
|
||||
void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr parsed_query)
|
||||
{
|
||||
auto query = query_to_execute;
|
||||
if (!query_parameters.empty())
|
||||
{
|
||||
/// Replace ASTQueryParameter with ASTLiteral for prepared statements.
|
||||
ReplaceQueryParameterVisitor visitor(query_parameters);
|
||||
visitor.visit(parsed_query);
|
||||
|
||||
/// Get new query after substitutions.
|
||||
query = serializeAST(*parsed_query);
|
||||
}
|
||||
|
||||
/// Process the query that requires transferring data blocks to the server.
|
||||
const auto parsed_insert_query = parsed_query->as<ASTInsertQuery &>();
|
||||
if ((!parsed_insert_query.data && !parsed_insert_query.infile) && (is_interactive || (!stdin_is_a_tty && std_in.eof())))
|
||||
@ -831,7 +844,7 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars
|
||||
|
||||
connection->sendQuery(
|
||||
connection_parameters.timeouts,
|
||||
query_to_execute,
|
||||
query,
|
||||
global_context->getCurrentQueryId(),
|
||||
query_processing_stage,
|
||||
&global_context->getSettingsRef(),
|
||||
@ -884,8 +897,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des
|
||||
/// Get name of this file (path to file)
|
||||
const auto & in_file_node = parsed_insert_query->infile->as<ASTLiteral &>();
|
||||
const auto in_file = in_file_node.value.safeGet<std::string>();
|
||||
/// Get name of table
|
||||
const auto table_name = parsed_insert_query->table_id.getTableName();
|
||||
|
||||
std::string compression_method;
|
||||
/// Compression method can be specified in query
|
||||
if (parsed_insert_query->compression)
|
||||
@ -1321,6 +1333,12 @@ bool ClientBase::processQueryText(const String & text)
|
||||
}
|
||||
|
||||
|
||||
String ClientBase::prompt() const
|
||||
{
|
||||
return boost::replace_all_copy(prompt_by_server_display_name, "{database}", config().getString("database", "default"));
|
||||
}
|
||||
|
||||
|
||||
void ClientBase::runInteractive()
|
||||
{
|
||||
if (config().has("query_id"))
|
||||
@ -1703,6 +1721,7 @@ void ClientBase::init(int argc, char ** argv)
|
||||
("profile-events-delay-ms", po::value<UInt64>()->default_value(profile_events.delay_ms), "Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet)")
|
||||
|
||||
("interactive", "Process queries-file or --query query and start interactive mode")
|
||||
("pager", po::value<std::string>(), "Pipe all output into this command (less or similar)")
|
||||
;
|
||||
|
||||
addOptions(options_description);
|
||||
@ -1774,6 +1793,8 @@ void ClientBase::init(int argc, char ** argv)
|
||||
config().setBool("verbose", true);
|
||||
if (options.count("interactive"))
|
||||
config().setBool("interactive", true);
|
||||
if (options.count("pager"))
|
||||
config().setString("pager", options["pager"].as<std::string>());
|
||||
|
||||
if (options.count("log-level"))
|
||||
Poco::Logger::root().setLevel(options["log-level"].as<std::string>());
|
||||
|
@ -128,10 +128,7 @@ private:
|
||||
void initBlockOutputStream(const Block & block, ASTPtr parsed_query);
|
||||
void initLogsOutputStream();
|
||||
|
||||
inline String prompt() const
|
||||
{
|
||||
return boost::replace_all_copy(prompt_by_server_display_name, "{database}", config().getString("database", "default"));
|
||||
}
|
||||
String prompt() const;
|
||||
|
||||
void resetOutput();
|
||||
void outputQueryInfo(bool echo_query_);
|
||||
|
@ -12,6 +12,8 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/setThreadName.h>
|
||||
|
||||
#define THREAD_NAME_SIZE 16
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -23,13 +25,13 @@ namespace ErrorCodes
|
||||
|
||||
|
||||
/// Cache thread_name to avoid prctl(PR_GET_NAME) for query_log/text_log
|
||||
static thread_local std::string thread_name;
|
||||
static thread_local char thread_name[THREAD_NAME_SIZE]{};
|
||||
|
||||
|
||||
void setThreadName(const char * name)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
if (strlen(name) > 15)
|
||||
if (strlen(name) > THREAD_NAME_SIZE - 1)
|
||||
throw DB::Exception("Thread name cannot be longer than 15 bytes", DB::ErrorCodes::PTHREAD_ERROR);
|
||||
#endif
|
||||
|
||||
@ -45,28 +47,25 @@ void setThreadName(const char * name)
|
||||
#endif
|
||||
DB::throwFromErrno("Cannot set thread name with prctl(PR_SET_NAME, ...)", DB::ErrorCodes::PTHREAD_ERROR);
|
||||
|
||||
thread_name = name;
|
||||
memcpy(thread_name, name, 1 + strlen(name));
|
||||
}
|
||||
|
||||
const std::string & getThreadName()
|
||||
const char * getThreadName()
|
||||
{
|
||||
if (!thread_name.empty())
|
||||
if (thread_name[0])
|
||||
return thread_name;
|
||||
|
||||
thread_name.resize(16);
|
||||
|
||||
#if defined(__APPLE__) || defined(OS_SUNOS)
|
||||
if (pthread_getname_np(pthread_self(), thread_name.data(), thread_name.size()))
|
||||
if (pthread_getname_np(pthread_self(), thread_name, THREAD_NAME_SIZE))
|
||||
throw DB::Exception("Cannot get thread name with pthread_getname_np()", DB::ErrorCodes::PTHREAD_ERROR);
|
||||
#elif defined(__FreeBSD__)
|
||||
// TODO: make test. freebsd will have this function soon https://freshbsd.org/commit/freebsd/r337983
|
||||
// if (pthread_get_name_np(pthread_self(), thread_name.data(), thread_name.size()))
|
||||
// if (pthread_get_name_np(pthread_self(), thread_name, THREAD_NAME_SIZE))
|
||||
// throw DB::Exception("Cannot get thread name with pthread_get_name_np()", DB::ErrorCodes::PTHREAD_ERROR);
|
||||
#else
|
||||
if (0 != prctl(PR_GET_NAME, thread_name.data(), 0, 0, 0))
|
||||
if (0 != prctl(PR_GET_NAME, thread_name, 0, 0, 0))
|
||||
DB::throwFromErrno("Cannot get thread name with prctl(PR_GET_NAME)", DB::ErrorCodes::PTHREAD_ERROR);
|
||||
#endif
|
||||
|
||||
thread_name.resize(std::strlen(thread_name.data()));
|
||||
return thread_name;
|
||||
}
|
||||
|
@ -7,4 +7,4 @@
|
||||
*/
|
||||
void setThreadName(const char * name);
|
||||
|
||||
const std::string & getThreadName();
|
||||
const char * getThreadName();
|
||||
|
@ -46,7 +46,7 @@
|
||||
/// The boundary on which the blocks for asynchronous file operations should be aligned.
|
||||
#define DEFAULT_AIO_FILE_BLOCK_SIZE 4096
|
||||
|
||||
#define DEFAULT_HTTP_READ_BUFFER_TIMEOUT 1800
|
||||
#define DEFAULT_HTTP_READ_BUFFER_TIMEOUT 180
|
||||
#define DEFAULT_HTTP_READ_BUFFER_CONNECTION_TIMEOUT 1
|
||||
/// Maximum number of http-connections between two endpoints
|
||||
/// the number is unmotivated
|
||||
|
@ -20,15 +20,6 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
ASTPtr dataTypeConvertToQuery(const DataTypePtr & data_type)
|
||||
{
|
||||
WhichDataType which(data_type);
|
||||
|
||||
if (!which.isNullable())
|
||||
return std::make_shared<ASTIdentifier>(data_type->getName());
|
||||
|
||||
return makeASTFunction("Nullable", dataTypeConvertToQuery(typeid_cast<const DataTypeNullable *>(data_type.get())->getNestedType()));
|
||||
}
|
||||
|
||||
DataTypePtr convertMySQLDataType(MultiEnum<MySQLDataTypesSupport> type_support,
|
||||
const std::string & mysql_data_type,
|
||||
|
@ -9,11 +9,6 @@ namespace DB
|
||||
{
|
||||
enum class MySQLDataTypesSupport;
|
||||
|
||||
/// Convert data type to query. for example
|
||||
/// DataTypeUInt8 -> ASTIdentifier(UInt8)
|
||||
/// DataTypeNullable(DataTypeUInt8) -> ASTFunction(ASTIdentifier(UInt8))
|
||||
ASTPtr dataTypeConvertToQuery(const DataTypePtr & data_type);
|
||||
|
||||
/// Convert MySQL type to ClickHouse data type.
|
||||
DataTypePtr convertMySQLDataType(MultiEnum<MySQLDataTypesSupport> type_support, const std::string & mysql_data_type, bool is_nullable, bool is_unsigned, size_t length, size_t precision, size_t scale);
|
||||
|
||||
|
@ -295,9 +295,9 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora
|
||||
try
|
||||
{
|
||||
std::unique_lock lock{mutex};
|
||||
if (query.database != database_name)
|
||||
if (query.getDatabase() != database_name)
|
||||
throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed to `{}`, cannot create table in `{}`",
|
||||
database_name, query.database);
|
||||
database_name, query.getDatabase());
|
||||
/// Do some checks before renaming file from .tmp to .sql
|
||||
not_in_use = cleanupDetachedTables();
|
||||
assertDetachedTableNotInUse(query.uuid);
|
||||
@ -314,8 +314,8 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora
|
||||
|
||||
/// It throws if `table_metadata_path` already exists (it's possible if table was detached)
|
||||
renameNoReplace(table_metadata_tmp_path, table_metadata_path); /// Commit point (a sort of)
|
||||
attachTableUnlocked(query.table, table, lock); /// Should never throw
|
||||
table_name_to_path.emplace(query.table, table_data_path);
|
||||
attachTableUnlocked(query.getTable(), table, lock); /// Should never throw
|
||||
table_name_to_path.emplace(query.getTable(), table_data_path);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
@ -325,7 +325,7 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora
|
||||
throw;
|
||||
}
|
||||
if (table->storesDataOnDisk())
|
||||
tryCreateSymlink(query.table, table_data_path);
|
||||
tryCreateSymlink(query.getTable(), table_data_path);
|
||||
}
|
||||
|
||||
void DatabaseAtomic::commitAlterTable(const StorageID & table_id, const String & table_metadata_tmp_path, const String & table_metadata_path,
|
||||
|
@ -103,7 +103,7 @@ static inline ValueType safeGetLiteralValue(const ASTPtr &ast, const String &eng
|
||||
DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context)
|
||||
{
|
||||
auto * engine_define = create.storage;
|
||||
const String & database_name = create.database;
|
||||
const String & database_name = create.getDatabase();
|
||||
const String & engine_name = engine_define->engine->name;
|
||||
const UUID & uuid = create.uuid;
|
||||
|
||||
|
@ -75,7 +75,7 @@ void DatabaseMemory::dropTable(
|
||||
ASTPtr DatabaseMemory::getCreateDatabaseQuery() const
|
||||
{
|
||||
auto create_query = std::make_shared<ASTCreateQuery>();
|
||||
create_query->database = getDatabaseName();
|
||||
create_query->setDatabase(getDatabaseName());
|
||||
create_query->set(create_query->storage, std::make_shared<ASTStorage>());
|
||||
create_query->storage->set(create_query->storage->engine, makeASTFunction(getEngineName()));
|
||||
|
||||
|
@ -42,7 +42,7 @@ public:
|
||||
/// TODO May be it's better to use DiskMemory for such tables.
|
||||
/// To save data on disk it's possible to explicitly CREATE DATABASE db ENGINE=Ordinary in clickhouse-local.
|
||||
String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; }
|
||||
String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.table); }
|
||||
String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); }
|
||||
|
||||
UUID tryGetTableUUID(const String & table_name) const override;
|
||||
|
||||
|
@ -51,7 +51,7 @@ std::pair<String, StoragePtr> createTableFromAST(
|
||||
bool force_restore)
|
||||
{
|
||||
ast_create_query.attach = true;
|
||||
ast_create_query.database = database_name;
|
||||
ast_create_query.setDatabase(database_name);
|
||||
|
||||
if (ast_create_query.as_table_function)
|
||||
{
|
||||
@ -60,9 +60,9 @@ std::pair<String, StoragePtr> createTableFromAST(
|
||||
ColumnsDescription columns;
|
||||
if (ast_create_query.columns_list && ast_create_query.columns_list->columns)
|
||||
columns = InterpreterCreateQuery::getColumnsDescription(*ast_create_query.columns_list->columns, context, true);
|
||||
StoragePtr storage = table_function->execute(ast_create_query.as_table_function, context, ast_create_query.table, std::move(columns));
|
||||
StoragePtr storage = table_function->execute(ast_create_query.as_table_function, context, ast_create_query.getTable(), std::move(columns));
|
||||
storage->renameInMemory(ast_create_query);
|
||||
return {ast_create_query.table, storage};
|
||||
return {ast_create_query.getTable(), storage};
|
||||
}
|
||||
|
||||
ColumnsDescription columns;
|
||||
@ -82,7 +82,7 @@ std::pair<String, StoragePtr> createTableFromAST(
|
||||
|
||||
return
|
||||
{
|
||||
ast_create_query.table,
|
||||
ast_create_query.getTable(),
|
||||
StorageFactory::instance().get(
|
||||
ast_create_query,
|
||||
table_data_path_relative,
|
||||
@ -112,7 +112,7 @@ String getObjectDefinitionFromCreateQuery(const ASTPtr & query)
|
||||
|
||||
/// We remove everything that is not needed for ATTACH from the query.
|
||||
assert(!create->temporary);
|
||||
create->database.clear();
|
||||
create->database.reset();
|
||||
create->as_database.clear();
|
||||
create->as_table.clear();
|
||||
create->if_not_exists = false;
|
||||
@ -129,7 +129,7 @@ String getObjectDefinitionFromCreateQuery(const ASTPtr & query)
|
||||
create->out_file = nullptr;
|
||||
|
||||
if (create->uuid != UUIDHelpers::Nil)
|
||||
create->table = TABLE_WITH_UUID_NAME_PLACEHOLDER;
|
||||
create->setTable(TABLE_WITH_UUID_NAME_PLACEHOLDER);
|
||||
|
||||
WriteBufferFromOwnString statement_buf;
|
||||
formatAST(*create, statement_buf, false);
|
||||
@ -161,7 +161,7 @@ void DatabaseOnDisk::createTable(
|
||||
{
|
||||
const auto & settings = local_context->getSettingsRef();
|
||||
const auto & create = query->as<ASTCreateQuery &>();
|
||||
assert(table_name == create.table);
|
||||
assert(table_name == create.getTable());
|
||||
|
||||
/// Create a file with metadata if necessary - if the query is not ATTACH.
|
||||
/// Write the query of `ATTACH table` to it.
|
||||
@ -251,7 +251,7 @@ void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const Stora
|
||||
try
|
||||
{
|
||||
/// Add a table to the map of known tables.
|
||||
attachTable(query.table, table, getTableDataPath(query));
|
||||
attachTable(query.getTable(), table, getTableDataPath(query));
|
||||
|
||||
/// If it was ATTACH query and file with table metadata already exist
|
||||
/// (so, ATTACH is done after DETACH), then rename atomically replaces old file with new one.
|
||||
@ -382,8 +382,8 @@ void DatabaseOnDisk::renameTable(
|
||||
table_metadata_path = getObjectMetadataPath(table_name);
|
||||
attach_query = parseQueryFromMetadata(log, local_context, table_metadata_path);
|
||||
auto & create = attach_query->as<ASTCreateQuery &>();
|
||||
create.database = to_database.getDatabaseName();
|
||||
create.table = to_table_name;
|
||||
create.setDatabase(to_database.getDatabaseName());
|
||||
create.setTable(to_table_name);
|
||||
if (from_ordinary_to_atomic)
|
||||
create.uuid = UUIDHelpers::generateV4();
|
||||
if (from_atomic_to_ordinary)
|
||||
@ -430,7 +430,11 @@ void DatabaseOnDisk::renameTable(
|
||||
ASTPtr DatabaseOnDisk::getCreateTableQueryImpl(const String & table_name, ContextPtr, bool throw_on_error) const
|
||||
{
|
||||
ASTPtr ast;
|
||||
bool has_table = tryGetTable(table_name, getContext()) != nullptr;
|
||||
StoragePtr storage = tryGetTable(table_name, getContext());
|
||||
bool has_table = storage != nullptr;
|
||||
bool is_system_storage = false;
|
||||
if (has_table)
|
||||
is_system_storage = storage->isSystemStorage();
|
||||
auto table_metadata_path = getObjectMetadataPath(table_name);
|
||||
try
|
||||
{
|
||||
@ -441,6 +445,8 @@ ASTPtr DatabaseOnDisk::getCreateTableQueryImpl(const String & table_name, Contex
|
||||
if (!has_table && e.code() == ErrorCodes::FILE_DOESNT_EXIST && throw_on_error)
|
||||
throw Exception{"Table " + backQuote(table_name) + " doesn't exist",
|
||||
ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY};
|
||||
else if (is_system_storage)
|
||||
ast = getCreateQueryFromStorage(table_name, storage, throw_on_error);
|
||||
else if (throw_on_error)
|
||||
throw;
|
||||
}
|
||||
@ -458,7 +464,7 @@ ASTPtr DatabaseOnDisk::getCreateDatabaseQuery() const
|
||||
ast = parseQueryFromMetadata(log, getContext(), database_metadata_path, true);
|
||||
auto & ast_create_query = ast->as<ASTCreateQuery &>();
|
||||
ast_create_query.attach = false;
|
||||
ast_create_query.database = database_name;
|
||||
ast_create_query.setDatabase(database_name);
|
||||
}
|
||||
if (!ast)
|
||||
{
|
||||
@ -642,18 +648,18 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata(
|
||||
return nullptr;
|
||||
|
||||
auto & create = ast->as<ASTCreateQuery &>();
|
||||
if (!create.table.empty() && create.uuid != UUIDHelpers::Nil)
|
||||
if (create.table && create.uuid != UUIDHelpers::Nil)
|
||||
{
|
||||
String table_name = unescapeForFileName(fs::path(metadata_file_path).stem());
|
||||
|
||||
if (create.table != TABLE_WITH_UUID_NAME_PLACEHOLDER && logger)
|
||||
if (create.getTable() != TABLE_WITH_UUID_NAME_PLACEHOLDER && logger)
|
||||
LOG_WARNING(
|
||||
logger,
|
||||
"File {} contains both UUID and table name. Will use name `{}` instead of `{}`",
|
||||
metadata_file_path,
|
||||
table_name,
|
||||
create.table);
|
||||
create.table = table_name;
|
||||
create.getTable());
|
||||
create.setTable(table_name);
|
||||
}
|
||||
|
||||
return ast;
|
||||
@ -667,12 +673,38 @@ ASTPtr DatabaseOnDisk::getCreateQueryFromMetadata(const String & database_metada
|
||||
{
|
||||
auto & ast_create_query = ast->as<ASTCreateQuery &>();
|
||||
ast_create_query.attach = false;
|
||||
ast_create_query.database = getDatabaseName();
|
||||
ast_create_query.setDatabase(getDatabaseName());
|
||||
}
|
||||
|
||||
return ast;
|
||||
}
|
||||
|
||||
ASTPtr DatabaseOnDisk::getCreateQueryFromStorage(const String & table_name, const StoragePtr & storage, bool throw_on_error) const
|
||||
{
|
||||
auto metadata_ptr = storage->getInMemoryMetadataPtr();
|
||||
if (metadata_ptr == nullptr)
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw Exception(ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY, "Cannot get metadata of {}.{}", backQuote(getDatabaseName()), backQuote(table_name));
|
||||
else
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// setup create table query storage info.
|
||||
auto ast_engine = std::make_shared<ASTFunction>();
|
||||
ast_engine->name = storage->getName();
|
||||
auto ast_storage = std::make_shared<ASTStorage>();
|
||||
ast_storage->set(ast_storage->engine, ast_engine);
|
||||
|
||||
auto create_table_query = DB::getCreateQueryFromStorage(storage, ast_storage, false,
|
||||
getContext()->getSettingsRef().max_parser_depth, throw_on_error);
|
||||
|
||||
create_table_query->set(create_table_query->as<ASTCreateQuery>()->comment,
|
||||
std::make_shared<ASTLiteral>("SYSTEM TABLE is built on the fly."));
|
||||
|
||||
return create_table_query;
|
||||
}
|
||||
|
||||
void DatabaseOnDisk::modifySettingsMetadata(const SettingsChanges & settings_changes, ContextPtr query_context)
|
||||
{
|
||||
std::lock_guard lock(modify_settings_mutex);
|
||||
|
@ -63,7 +63,7 @@ public:
|
||||
|
||||
String getDataPath() const override { return data_path; }
|
||||
String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; }
|
||||
String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.table); }
|
||||
String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); }
|
||||
String getMetadataPath() const override { return metadata_path; }
|
||||
|
||||
static ASTPtr parseQueryFromMetadata(Poco::Logger * log, ContextPtr context, const String & metadata_file_path, bool throw_on_error = true, bool remove_empty = false);
|
||||
@ -89,6 +89,7 @@ protected:
|
||||
bool throw_on_error) const override;
|
||||
|
||||
ASTPtr getCreateQueryFromMetadata(const String & metadata_path, bool throw_on_error) const;
|
||||
ASTPtr getCreateQueryFromStorage(const String & table_name, const StoragePtr & storage, bool throw_on_error) const;
|
||||
|
||||
virtual void commitCreateTable(const ASTCreateQuery & query, const StoragePtr & table,
|
||||
const String & table_metadata_tmp_path, const String & table_metadata_path, ContextPtr query_context);
|
||||
|
@ -55,7 +55,7 @@ namespace
|
||||
catch (Exception & e)
|
||||
{
|
||||
e.addMessage(
|
||||
"Cannot attach table " + backQuote(database_name) + "." + backQuote(query.table) + " from metadata file " + metadata_path
|
||||
"Cannot attach table " + backQuote(database_name) + "." + backQuote(query.getTable()) + " from metadata file " + metadata_path
|
||||
+ " from query " + serializeAST(query));
|
||||
throw;
|
||||
}
|
||||
@ -168,7 +168,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables
|
||||
if (ast)
|
||||
{
|
||||
auto * create_query = ast->as<ASTCreateQuery>();
|
||||
create_query->database = database_name;
|
||||
create_query->setDatabase(database_name);
|
||||
|
||||
if (fs::exists(full_path.string() + detached_suffix))
|
||||
{
|
||||
@ -182,7 +182,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables
|
||||
}
|
||||
|
||||
TableNamesSet loading_dependencies = getDependenciesSetFromCreateQuery(getContext(), ast);
|
||||
QualifiedTableName qualified_name{database_name, create_query->table};
|
||||
QualifiedTableName qualified_name{database_name, create_query->getTable()};
|
||||
|
||||
std::lock_guard lock{metadata.mutex};
|
||||
metadata.parsed_tables[qualified_name] = ParsedTableMetadata{full_path.string(), ast};
|
||||
|
@ -349,9 +349,9 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_
|
||||
/// Replicas will set correct name of current database in query context (database name can be different on replicas)
|
||||
if (auto * ddl_query = dynamic_cast<ASTQueryWithTableAndOutput *>(query.get()))
|
||||
{
|
||||
if (ddl_query->database != getDatabaseName())
|
||||
if (ddl_query->getDatabase() != getDatabaseName())
|
||||
throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed");
|
||||
ddl_query->database.clear();
|
||||
ddl_query->database.reset();
|
||||
|
||||
if (auto * create = query->as<ASTCreateQuery>())
|
||||
{
|
||||
@ -391,7 +391,7 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_
|
||||
/// NOTE: we cannot check here that substituted values will be actually different on shards and replicas.
|
||||
|
||||
Macros::MacroExpansionInfo info;
|
||||
info.table_id = {getDatabaseName(), create->table, create->uuid};
|
||||
info.table_id = {getDatabaseName(), create->getTable(), create->uuid};
|
||||
query_context->getMacros()->expand(maybe_path, info);
|
||||
bool maybe_shard_macros = info.expanded_other;
|
||||
info.expanded_other = false;
|
||||
@ -715,13 +715,13 @@ ASTPtr DatabaseReplicated::parseQueryFromMetadataInZooKeeper(const String & node
|
||||
auto ast = parseQuery(parser, query, description, 0, getContext()->getSettingsRef().max_parser_depth);
|
||||
|
||||
auto & create = ast->as<ASTCreateQuery &>();
|
||||
if (create.uuid == UUIDHelpers::Nil || create.table != TABLE_WITH_UUID_NAME_PLACEHOLDER || !create.database.empty())
|
||||
if (create.uuid == UUIDHelpers::Nil || create.getTable() != TABLE_WITH_UUID_NAME_PLACEHOLDER || create.database)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Got unexpected query from {}: {}", node_name, query);
|
||||
|
||||
bool is_materialized_view_with_inner_table = create.is_materialized_view && create.to_table_id.empty();
|
||||
|
||||
create.database = getDatabaseName();
|
||||
create.table = unescapeForFileName(node_name);
|
||||
create.setDatabase(getDatabaseName());
|
||||
create.setTable(unescapeForFileName(node_name));
|
||||
create.attach = is_materialized_view_with_inner_table;
|
||||
|
||||
return ast;
|
||||
@ -811,7 +811,7 @@ void DatabaseReplicated::commitCreateTable(const ASTCreateQuery & query, const S
|
||||
assert(!ddl_worker->isCurrentlyActive() || txn);
|
||||
if (txn && txn->isInitialQuery())
|
||||
{
|
||||
String metadata_zk_path = zookeeper_path + "/metadata/" + escapeForFileName(query.table);
|
||||
String metadata_zk_path = zookeeper_path + "/metadata/" + escapeForFileName(query.getTable());
|
||||
String statement = getObjectDefinitionFromCreateQuery(query.clone());
|
||||
/// zk::multi(...) will throw if `metadata_zk_path` exists
|
||||
txn->addOp(zkutil::makeCreateRequest(metadata_zk_path, statement, zkutil::CreateMode::Persistent));
|
||||
|
@ -20,6 +20,8 @@ namespace ErrorCodes
|
||||
extern const int UNKNOWN_TABLE;
|
||||
extern const int UNKNOWN_DATABASE;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int CANNOT_GET_CREATE_TABLE_QUERY;
|
||||
}
|
||||
|
||||
void applyMetadataChangesToCreateQuery(const ASTPtr & query, const StorageInMemoryMetadata & metadata)
|
||||
@ -29,7 +31,7 @@ void applyMetadataChangesToCreateQuery(const ASTPtr & query, const StorageInMemo
|
||||
bool has_structure = ast_create_query.columns_list && ast_create_query.columns_list->columns;
|
||||
if (ast_create_query.as_table_function && !has_structure)
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot alter table {} because it was created AS table function"
|
||||
" and doesn't have structure in metadata", backQuote(ast_create_query.table));
|
||||
" and doesn't have structure in metadata", backQuote(ast_create_query.getTable()));
|
||||
|
||||
assert(has_structure);
|
||||
ASTPtr new_columns = InterpreterCreateQuery::formatColumns(metadata.columns);
|
||||
@ -85,6 +87,66 @@ void applyMetadataChangesToCreateQuery(const ASTPtr & query, const StorageInMemo
|
||||
}
|
||||
|
||||
|
||||
ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & ast_storage, bool only_ordinary, uint32_t max_parser_depth, bool throw_on_error)
|
||||
{
|
||||
auto table_id = storage->getStorageID();
|
||||
auto metadata_ptr = storage->getInMemoryMetadataPtr();
|
||||
if (metadata_ptr == nullptr)
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw Exception(ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY, "Cannot get metadata of {}.{}", backQuote(table_id.database_name), backQuote(table_id.table_name));
|
||||
else
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto create_table_query = std::make_shared<ASTCreateQuery>();
|
||||
create_table_query->attach = false;
|
||||
create_table_query->setTable(table_id.table_name);
|
||||
create_table_query->setDatabase(table_id.database_name);
|
||||
create_table_query->set(create_table_query->storage, ast_storage);
|
||||
|
||||
/// setup create table query columns info.
|
||||
{
|
||||
auto ast_columns_list = std::make_shared<ASTColumns>();
|
||||
auto ast_expression_list = std::make_shared<ASTExpressionList>();
|
||||
NamesAndTypesList columns;
|
||||
if (only_ordinary)
|
||||
columns = metadata_ptr->columns.getOrdinary();
|
||||
else
|
||||
columns = metadata_ptr->columns.getAll();
|
||||
for (const auto & column_name_and_type: columns)
|
||||
{
|
||||
const auto & ast_column_declaration = std::make_shared<ASTColumnDeclaration>();
|
||||
ast_column_declaration->name = column_name_and_type.name;
|
||||
/// parser typename
|
||||
{
|
||||
ASTPtr ast_type;
|
||||
auto type_name = column_name_and_type.type->getName();
|
||||
const auto * string_end = type_name.c_str() + type_name.length();
|
||||
Expected expected;
|
||||
expected.max_parsed_pos = string_end;
|
||||
Tokens tokens(type_name.c_str(), string_end);
|
||||
IParser::Pos pos(tokens, max_parser_depth);
|
||||
ParserDataType parser;
|
||||
if (!parser.parse(pos, ast_type, expected))
|
||||
{
|
||||
if (throw_on_error)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot parser metadata of {}.{}", backQuote(table_id.database_name), backQuote(table_id.table_name));
|
||||
else
|
||||
return nullptr;
|
||||
}
|
||||
ast_column_declaration->type = ast_type;
|
||||
}
|
||||
ast_expression_list->children.emplace_back(ast_column_declaration);
|
||||
}
|
||||
|
||||
ast_columns_list->set(ast_columns_list->columns, ast_expression_list);
|
||||
create_table_query->set(create_table_query->columns_list, ast_columns_list);
|
||||
}
|
||||
return create_table_query;
|
||||
}
|
||||
|
||||
|
||||
DatabaseWithOwnTablesBase::DatabaseWithOwnTablesBase(const String & name_, const String & logger, ContextPtr context_)
|
||||
: IDatabase(name_), WithContext(context_->getGlobalContext()), log(&Poco::Logger::get(logger))
|
||||
{
|
||||
|
@ -14,6 +14,7 @@ namespace DB
|
||||
{
|
||||
|
||||
void applyMetadataChangesToCreateQuery(const ASTPtr & query, const StorageInMemoryMetadata & metadata);
|
||||
ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & ast_storage, bool only_ordinary, uint32_t max_parser_depth, bool throw_on_error);
|
||||
|
||||
class Context;
|
||||
|
||||
|
@ -113,53 +113,6 @@ StoragePtr DatabaseMySQL::tryGetTable(const String & mysql_table_name, ContextPt
|
||||
return StoragePtr{};
|
||||
}
|
||||
|
||||
static ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr & database_engine_define)
|
||||
{
|
||||
auto create_table_query = std::make_shared<ASTCreateQuery>();
|
||||
|
||||
auto table_storage_define = database_engine_define->clone();
|
||||
create_table_query->set(create_table_query->storage, table_storage_define);
|
||||
|
||||
auto columns_declare_list = std::make_shared<ASTColumns>();
|
||||
auto columns_expression_list = std::make_shared<ASTExpressionList>();
|
||||
|
||||
columns_declare_list->set(columns_declare_list->columns, columns_expression_list);
|
||||
create_table_query->set(create_table_query->columns_list, columns_declare_list);
|
||||
|
||||
{
|
||||
/// init create query.
|
||||
auto table_id = storage->getStorageID();
|
||||
create_table_query->table = table_id.table_name;
|
||||
create_table_query->database = table_id.database_name;
|
||||
|
||||
auto metadata_snapshot = storage->getInMemoryMetadataPtr();
|
||||
for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary())
|
||||
{
|
||||
const auto & column_declaration = std::make_shared<ASTColumnDeclaration>();
|
||||
column_declaration->name = column_type_and_name.name;
|
||||
column_declaration->type = dataTypeConvertToQuery(column_type_and_name.type);
|
||||
columns_expression_list->children.emplace_back(column_declaration);
|
||||
}
|
||||
|
||||
ASTStorage * ast_storage = table_storage_define->as<ASTStorage>();
|
||||
ASTs storage_children = ast_storage->children;
|
||||
auto storage_engine_arguments = ast_storage->engine->arguments;
|
||||
|
||||
/// Add table_name to engine arguments
|
||||
auto mysql_table_name = std::make_shared<ASTLiteral>(table_id.table_name);
|
||||
storage_engine_arguments->children.insert(storage_engine_arguments->children.begin() + 2, mysql_table_name);
|
||||
|
||||
/// Unset settings
|
||||
storage_children.erase(
|
||||
std::remove_if(storage_children.begin(), storage_children.end(),
|
||||
[&](const ASTPtr & element) { return element.get() == ast_storage->settings; }),
|
||||
storage_children.end());
|
||||
ast_storage->settings = nullptr;
|
||||
}
|
||||
|
||||
return create_table_query;
|
||||
}
|
||||
|
||||
ASTPtr DatabaseMySQL::getCreateTableQueryImpl(const String & table_name, ContextPtr local_context, bool throw_on_error) const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
@ -174,7 +127,27 @@ ASTPtr DatabaseMySQL::getCreateTableQueryImpl(const String & table_name, Context
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return getCreateQueryFromStorage(local_tables_cache[table_name].second, database_engine_define);
|
||||
auto storage = local_tables_cache[table_name].second;
|
||||
auto table_storage_define = database_engine_define->clone();
|
||||
{
|
||||
ASTStorage * ast_storage = table_storage_define->as<ASTStorage>();
|
||||
ASTs storage_children = ast_storage->children;
|
||||
auto storage_engine_arguments = ast_storage->engine->arguments;
|
||||
|
||||
/// Add table_name to engine arguments
|
||||
auto mysql_table_name = std::make_shared<ASTLiteral>(table_name);
|
||||
storage_engine_arguments->children.insert(storage_engine_arguments->children.begin() + 2, mysql_table_name);
|
||||
|
||||
/// Unset settings
|
||||
storage_children.erase(
|
||||
std::remove_if(storage_children.begin(), storage_children.end(),
|
||||
[&](const ASTPtr & element) { return element.get() == ast_storage->settings; }),
|
||||
storage_children.end());
|
||||
ast_storage->settings = nullptr;
|
||||
}
|
||||
auto create_table_query = DB::getCreateQueryFromStorage(storage, table_storage_define, true,
|
||||
getContext()->getSettingsRef().max_parser_depth, throw_on_error);
|
||||
return create_table_query;
|
||||
}
|
||||
|
||||
time_t DatabaseMySQL::getObjectMetadataModificationTime(const String & table_name) const
|
||||
@ -192,7 +165,7 @@ time_t DatabaseMySQL::getObjectMetadataModificationTime(const String & table_nam
|
||||
ASTPtr DatabaseMySQL::getCreateDatabaseQuery() const
|
||||
{
|
||||
const auto & create_query = std::make_shared<ASTCreateQuery>();
|
||||
create_query->database = getDatabaseName();
|
||||
create_query->setDatabase(getDatabaseName());
|
||||
create_query->set(create_query->storage, database_engine_define);
|
||||
|
||||
if (const auto comment_value = getDatabaseComment(); !comment_value.empty())
|
||||
|
@ -3,26 +3,27 @@
|
||||
#if USE_MYSQL
|
||||
|
||||
#include <Databases/MySQL/MaterializedMySQLSyncThread.h>
|
||||
# include <cstdlib>
|
||||
# include <random>
|
||||
# include <Columns/ColumnTuple.h>
|
||||
# include <Columns/ColumnDecimal.h>
|
||||
# include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
# include <Processors/Executors/PullingPipelineExecutor.h>
|
||||
# include <Processors/Executors/CompletedPipelineExecutor.h>
|
||||
# include <Processors/Sources/SourceFromSingleChunk.h>
|
||||
# include <Processors/Transforms/CountingTransform.h>
|
||||
# include <Databases/MySQL/DatabaseMaterializedMySQL.h>
|
||||
# include <Databases/MySQL/MaterializeMetadata.h>
|
||||
# include <Processors/Sources/MySQLSource.h>
|
||||
# include <IO/ReadBufferFromString.h>
|
||||
# include <Interpreters/Context.h>
|
||||
# include <Interpreters/executeQuery.h>
|
||||
# include <Storages/StorageMergeTree.h>
|
||||
# include <Common/quoteString.h>
|
||||
# include <Common/setThreadName.h>
|
||||
# include <base/sleep.h>
|
||||
# include <base/bit_cast.h>
|
||||
#include <cstdlib>
|
||||
#include <random>
|
||||
#include <string_view>
|
||||
#include <Columns/ColumnTuple.h>
|
||||
#include <Columns/ColumnDecimal.h>
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
#include <Processors/Executors/PullingPipelineExecutor.h>
|
||||
#include <Processors/Executors/CompletedPipelineExecutor.h>
|
||||
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||
#include <Processors/Transforms/CountingTransform.h>
|
||||
#include <Databases/MySQL/DatabaseMaterializedMySQL.h>
|
||||
#include <Databases/MySQL/MaterializeMetadata.h>
|
||||
#include <Processors/Sources/MySQLSource.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/executeQuery.h>
|
||||
#include <Storages/StorageMergeTree.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <base/sleep.h>
|
||||
#include <base/bit_cast.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -765,7 +766,7 @@ void MaterializedMySQLSyncThread::executeDDLAtomic(const QueryEvent & query_even
|
||||
|
||||
bool MaterializedMySQLSyncThread::isMySQLSyncThread()
|
||||
{
|
||||
return getThreadName() == MYSQL_BACKGROUND_THREAD_NAME;
|
||||
return getThreadName() == std::string_view(MYSQL_BACKGROUND_THREAD_NAME);
|
||||
}
|
||||
|
||||
void MaterializedMySQLSyncThread::setSynchronizationThreadException(const std::exception_ptr & exception)
|
||||
|
@ -238,7 +238,7 @@ ASTPtr DatabaseMaterializedPostgreSQL::createAlterSettingsQuery(const SettingCha
|
||||
auto * alter = query->as<ASTAlterQuery>();
|
||||
|
||||
alter->alter_object = ASTAlterQuery::AlterObjectType::DATABASE;
|
||||
alter->database = database_name;
|
||||
alter->setDatabase(database_name);
|
||||
alter->set(alter->command_list, command_list);
|
||||
|
||||
return query;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user