mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 00:22:29 +00:00
Merge branch 'master' into merge-tree-parallel-replicas-custom-key
This commit is contained in:
commit
010ce20bd1
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -48,9 +48,7 @@ At a minimum, the following information should be added (but add more as needed)
|
|||||||
- [ ] <!---ci_include_stateful--> Allow: Stateful tests
|
- [ ] <!---ci_include_stateful--> Allow: Stateful tests
|
||||||
- [ ] <!---ci_include_integration--> Allow: Integration Tests
|
- [ ] <!---ci_include_integration--> Allow: Integration Tests
|
||||||
- [ ] <!---ci_include_performance--> Allow: Performance tests
|
- [ ] <!---ci_include_performance--> Allow: Performance tests
|
||||||
- [ ] <!---ci_set_normal_builds--> Allow: Normal Builds
|
- [ ] <!---ci_set_builds--> Allow: All Builds
|
||||||
- [ ] <!---ci_set_special_builds--> Allow: Special Builds
|
|
||||||
- [ ] <!---ci_set_non_required--> Allow: All NOT Required Checks
|
|
||||||
- [ ] <!---batch_0_1--> Allow: batch 1, 2 for multi-batch jobs
|
- [ ] <!---batch_0_1--> Allow: batch 1, 2 for multi-batch jobs
|
||||||
- [ ] <!---batch_2_3--> Allow: batch 3, 4, 5, 6 for multi-batch jobs
|
- [ ] <!---batch_2_3--> Allow: batch 3, 4, 5, 6 for multi-batch jobs
|
||||||
---
|
---
|
||||||
@ -61,6 +59,7 @@ At a minimum, the following information should be added (but add more as needed)
|
|||||||
- [ ] <!---ci_exclude_aarch64|release|debug--> Exclude: All with aarch64, release, debug
|
- [ ] <!---ci_exclude_aarch64|release|debug--> Exclude: All with aarch64, release, debug
|
||||||
---
|
---
|
||||||
- [ ] <!---do_not_test--> Do not test
|
- [ ] <!---do_not_test--> Do not test
|
||||||
|
- [ ] <!---woolen_wolfdog--> Woolen Wolfdog
|
||||||
- [ ] <!---upload_all--> Upload binaries for special builds
|
- [ ] <!---upload_all--> Upload binaries for special builds
|
||||||
- [ ] <!---no_merge_commit--> Disable merge-commit
|
- [ ] <!---no_merge_commit--> Disable merge-commit
|
||||||
- [ ] <!---no_ci_cache--> Disable CI cache
|
- [ ] <!---no_ci_cache--> Disable CI cache
|
||||||
|
6
.github/workflows/backport_branches.yml
vendored
6
.github/workflows/backport_branches.yml
vendored
@ -70,7 +70,7 @@ jobs:
|
|||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Compatibility check (amd64)
|
test_name: Compatibility check (release)
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
CompatibilityCheckAarch64:
|
CompatibilityCheckAarch64:
|
||||||
@ -194,7 +194,7 @@ jobs:
|
|||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Install packages (amd64)
|
test_name: Install packages (release)
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
run_command: |
|
||||||
@ -204,7 +204,7 @@ jobs:
|
|||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Install packages (arm64)
|
test_name: Install packages (aarch64)
|
||||||
runner_type: style-checker-aarch64
|
runner_type: style-checker-aarch64
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
run_command: |
|
||||||
|
20
.github/workflows/master.yml
vendored
20
.github/workflows/master.yml
vendored
@ -104,10 +104,9 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
stage: Tests_2
|
stage: Tests_2
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
# stage for jobs that do not prohibit merge
|
|
||||||
Tests_3:
|
Tests_3:
|
||||||
# Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there.
|
# Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there.
|
||||||
needs: [RunConfig, Builds_1, Builds_2]
|
needs: [RunConfig, Builds_1]
|
||||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }}
|
||||||
uses: ./.github/workflows/reusable_test_stage.yml
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
with:
|
with:
|
||||||
@ -115,25 +114,16 @@ jobs:
|
|||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
################################# Reports #################################
|
################################# Reports #################################
|
||||||
# Reports should be run even if Builds_1/2 failed - put them separately in wf (not in Tests_1/2)
|
# Reports should run even if Builds_1/2 fail - run them separately, not in Tests_1/2/3
|
||||||
Builds_1_Report:
|
Builds_Report:
|
||||||
# run report check for failed builds to indicate the CI error
|
# run report check for failed builds to indicate the CI error
|
||||||
if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse build check') }}
|
if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse build check') }}
|
||||||
needs: [RunConfig, Builds_1]
|
needs: [RunConfig, Builds_1, Builds_2]
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: ClickHouse build check
|
test_name: ClickHouse build check
|
||||||
runner_type: style-checker-aarch64
|
runner_type: style-checker-aarch64
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
Builds_2_Report:
|
|
||||||
# run report check for failed builds to indicate the CI error
|
|
||||||
if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse special build check') }}
|
|
||||||
needs: [RunConfig, Builds_2]
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: ClickHouse special build check
|
|
||||||
runner_type: style-checker-aarch64
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
|
|
||||||
MarkReleaseReady:
|
MarkReleaseReady:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -165,7 +155,7 @@ jobs:
|
|||||||
|
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs: [RunConfig, Builds_1, Builds_2, Builds_1_Report, Builds_2_Report, Tests_1, Tests_2, Tests_3]
|
needs: [RunConfig, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
|
31
.github/workflows/pull_request.yml
vendored
31
.github/workflows/pull_request.yml
vendored
@ -126,16 +126,16 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
stage: Builds_2
|
stage: Builds_2
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
# stage for running non-required checks without being blocked by required checks (Test_1) if corresponding settings is selected
|
||||||
Tests_2:
|
Tests_2:
|
||||||
needs: [RunConfig, Builds_2]
|
needs: [RunConfig, Builds_1]
|
||||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
||||||
uses: ./.github/workflows/reusable_test_stage.yml
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
with:
|
with:
|
||||||
stage: Tests_2
|
stage: Tests_2
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
# stage for jobs that do not prohibit merge
|
|
||||||
Tests_3:
|
Tests_3:
|
||||||
needs: [RunConfig, Builds_1, Tests_1, Builds_2, Tests_2]
|
needs: [RunConfig, Builds_1, Tests_1]
|
||||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }}
|
||||||
uses: ./.github/workflows/reusable_test_stage.yml
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
with:
|
with:
|
||||||
@ -143,29 +143,21 @@ jobs:
|
|||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
################################# Reports #################################
|
################################# Reports #################################
|
||||||
# Reports should by run even if Builds_1/2 fail, so put them separately in wf (not in Tests_1/2)
|
# Reports should run even if Builds_1/2 fail - run them separately (not in Tests_1/2/3)
|
||||||
Builds_1_Report:
|
Builds_Report:
|
||||||
# run report check for failed builds to indicate the CI error
|
# run report check for failed builds to indicate the CI error
|
||||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse build check') }}
|
if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse build check') }}
|
||||||
needs: [RunConfig, StyleCheck, Builds_1]
|
needs: [RunConfig, StyleCheck, Builds_1, Builds_2]
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: ClickHouse build check
|
test_name: ClickHouse build check
|
||||||
runner_type: style-checker-aarch64
|
runner_type: style-checker-aarch64
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
Builds_2_Report:
|
|
||||||
# run report check for failed builds to indicate the CI error
|
|
||||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse special build check') }}
|
|
||||||
needs: [RunConfig, StyleCheck, Builds_2]
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
|
||||||
with:
|
|
||||||
test_name: ClickHouse special build check
|
|
||||||
runner_type: style-checker-aarch64
|
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
|
||||||
|
|
||||||
CheckReadyForMerge:
|
CheckReadyForMerge:
|
||||||
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
|
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_1_Report, Builds_2_Report, Tests_1, Tests_2]
|
# Test_2 or Test_3 must not have jobs required for Mergeable check
|
||||||
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
@ -181,7 +173,7 @@ jobs:
|
|||||||
#
|
#
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_1_Report, Builds_2_Report, Tests_1, Tests_2, Tests_3]
|
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2, Tests_3]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
@ -204,8 +196,7 @@ jobs:
|
|||||||
concurrency:
|
concurrency:
|
||||||
group: jepsen
|
group: jepsen
|
||||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse Keeper Jepsen') }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse Keeper Jepsen') }}
|
||||||
# jepsen needs binary_release build which is in Builds_2
|
needs: [RunConfig, Builds_1]
|
||||||
needs: [RunConfig, Builds_2]
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: ClickHouse Keeper Jepsen
|
test_name: ClickHouse Keeper Jepsen
|
||||||
|
6
.github/workflows/release_branches.yml
vendored
6
.github/workflows/release_branches.yml
vendored
@ -65,7 +65,7 @@ jobs:
|
|||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Compatibility check (amd64)
|
test_name: Compatibility check (release)
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
CompatibilityCheckAarch64:
|
CompatibilityCheckAarch64:
|
||||||
@ -244,7 +244,7 @@ jobs:
|
|||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Install packages (amd64)
|
test_name: Install packages (release)
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
run_command: |
|
||||||
@ -254,7 +254,7 @@ jobs:
|
|||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Install packages (arm64)
|
test_name: Install packages (aarch64)
|
||||||
runner_type: style-checker-aarch64
|
runner_type: style-checker-aarch64
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
run_command: |
|
||||||
|
@ -23,9 +23,6 @@
|
|||||||
#include <openssl/conf.h>
|
#include <openssl/conf.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __has_feature(address_sanitizer)
|
|
||||||
#include <sanitizer/lsan_interface.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
using Poco::RandomInputStream;
|
using Poco::RandomInputStream;
|
||||||
using Poco::Thread;
|
using Poco::Thread;
|
||||||
@ -70,18 +67,12 @@ void OpenSSLInitializer::initialize()
|
|||||||
SSL_library_init();
|
SSL_library_init();
|
||||||
SSL_load_error_strings();
|
SSL_load_error_strings();
|
||||||
OpenSSL_add_all_algorithms();
|
OpenSSL_add_all_algorithms();
|
||||||
|
|
||||||
char seed[SEEDSIZE];
|
char seed[SEEDSIZE];
|
||||||
RandomInputStream rnd;
|
RandomInputStream rnd;
|
||||||
rnd.read(seed, sizeof(seed));
|
rnd.read(seed, sizeof(seed));
|
||||||
{
|
RAND_seed(seed, SEEDSIZE);
|
||||||
# if __has_feature(address_sanitizer)
|
|
||||||
/// Leak sanitizer (part of address sanitizer) thinks that a few bytes of memory in OpenSSL are allocated during but never released.
|
|
||||||
__lsan::ScopedDisabler lsan_disabler;
|
|
||||||
#endif
|
|
||||||
RAND_seed(seed, SEEDSIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
int nMutexes = CRYPTO_num_locks();
|
int nMutexes = CRYPTO_num_locks();
|
||||||
_mutexes = new Poco::FastMutex[nMutexes];
|
_mutexes = new Poco::FastMutex[nMutexes];
|
||||||
CRYPTO_set_locking_callback(&OpenSSLInitializer::lock);
|
CRYPTO_set_locking_callback(&OpenSSLInitializer::lock);
|
||||||
@ -89,8 +80,8 @@ void OpenSSLInitializer::initialize()
|
|||||||
// https://sourceforge.net/p/poco/bugs/110/
|
// https://sourceforge.net/p/poco/bugs/110/
|
||||||
//
|
//
|
||||||
// From http://www.openssl.org/docs/crypto/threads.html :
|
// From http://www.openssl.org/docs/crypto/threads.html :
|
||||||
// "If the application does not register such a callback using CRYPTO_THREADID_set_callback(),
|
// "If the application does not register such a callback using CRYPTO_THREADID_set_callback(),
|
||||||
// then a default implementation is used - on Windows and BeOS this uses the system's
|
// then a default implementation is used - on Windows and BeOS this uses the system's
|
||||||
// default thread identifying APIs"
|
// default thread identifying APIs"
|
||||||
CRYPTO_set_id_callback(&OpenSSLInitializer::id);
|
CRYPTO_set_id_callback(&OpenSSLInitializer::id);
|
||||||
CRYPTO_set_dynlock_create_callback(&OpenSSLInitializer::dynlockCreate);
|
CRYPTO_set_dynlock_create_callback(&OpenSSLInitializer::dynlockCreate);
|
||||||
@ -109,7 +100,7 @@ void OpenSSLInitializer::uninitialize()
|
|||||||
CRYPTO_set_locking_callback(0);
|
CRYPTO_set_locking_callback(0);
|
||||||
CRYPTO_set_id_callback(0);
|
CRYPTO_set_id_callback(0);
|
||||||
delete [] _mutexes;
|
delete [] _mutexes;
|
||||||
|
|
||||||
CONF_modules_free();
|
CONF_modules_free();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -213,6 +213,7 @@ target_compile_definitions (_poco_foundation
|
|||||||
)
|
)
|
||||||
|
|
||||||
target_include_directories (_poco_foundation SYSTEM PUBLIC "include")
|
target_include_directories (_poco_foundation SYSTEM PUBLIC "include")
|
||||||
|
target_link_libraries (_poco_foundation PRIVATE clickhouse_common_io)
|
||||||
|
|
||||||
target_link_libraries (_poco_foundation
|
target_link_libraries (_poco_foundation
|
||||||
PRIVATE
|
PRIVATE
|
||||||
|
@ -48,7 +48,13 @@ class Foundation_API ThreadPool
|
|||||||
/// from the pool.
|
/// from the pool.
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
ThreadPool(int minCapacity = 2, int maxCapacity = 16, int idleTime = 60, int stackSize = POCO_THREAD_STACK_SIZE);
|
explicit ThreadPool(
|
||||||
|
int minCapacity = 2,
|
||||||
|
int maxCapacity = 16,
|
||||||
|
int idleTime = 60,
|
||||||
|
int stackSize = POCO_THREAD_STACK_SIZE,
|
||||||
|
size_t global_profiler_real_time_period_ns_ = 0,
|
||||||
|
size_t global_profiler_cpu_time_period_ns_ = 0);
|
||||||
/// Creates a thread pool with minCapacity threads.
|
/// Creates a thread pool with minCapacity threads.
|
||||||
/// If required, up to maxCapacity threads are created
|
/// If required, up to maxCapacity threads are created
|
||||||
/// a NoThreadAvailableException exception is thrown.
|
/// a NoThreadAvailableException exception is thrown.
|
||||||
@ -56,8 +62,14 @@ public:
|
|||||||
/// and more than minCapacity threads are running, the thread
|
/// and more than minCapacity threads are running, the thread
|
||||||
/// is killed. Threads are created with given stack size.
|
/// is killed. Threads are created with given stack size.
|
||||||
|
|
||||||
ThreadPool(
|
explicit ThreadPool(
|
||||||
const std::string & name, int minCapacity = 2, int maxCapacity = 16, int idleTime = 60, int stackSize = POCO_THREAD_STACK_SIZE);
|
const std::string & name,
|
||||||
|
int minCapacity = 2,
|
||||||
|
int maxCapacity = 16,
|
||||||
|
int idleTime = 60,
|
||||||
|
int stackSize = POCO_THREAD_STACK_SIZE,
|
||||||
|
size_t global_profiler_real_time_period_ns_ = 0,
|
||||||
|
size_t global_profiler_cpu_time_period_ns_ = 0);
|
||||||
/// Creates a thread pool with the given name and minCapacity threads.
|
/// Creates a thread pool with the given name and minCapacity threads.
|
||||||
/// If required, up to maxCapacity threads are created
|
/// If required, up to maxCapacity threads are created
|
||||||
/// a NoThreadAvailableException exception is thrown.
|
/// a NoThreadAvailableException exception is thrown.
|
||||||
@ -171,6 +183,8 @@ private:
|
|||||||
int _serial;
|
int _serial;
|
||||||
int _age;
|
int _age;
|
||||||
int _stackSize;
|
int _stackSize;
|
||||||
|
size_t _globalProfilerRealTimePeriodNs;
|
||||||
|
size_t _globalProfilerCPUTimePeriodNs;
|
||||||
ThreadVec _threads;
|
ThreadVec _threads;
|
||||||
mutable FastMutex _mutex;
|
mutable FastMutex _mutex;
|
||||||
};
|
};
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
#include "Poco/ErrorHandler.h"
|
#include "Poco/ErrorHandler.h"
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
|
#include <Common/ThreadPool.h>
|
||||||
|
|
||||||
|
|
||||||
namespace Poco {
|
namespace Poco {
|
||||||
@ -28,7 +29,11 @@ namespace Poco {
|
|||||||
class PooledThread: public Runnable
|
class PooledThread: public Runnable
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
PooledThread(const std::string& name, int stackSize = POCO_THREAD_STACK_SIZE);
|
explicit PooledThread(
|
||||||
|
const std::string& name,
|
||||||
|
int stackSize = POCO_THREAD_STACK_SIZE,
|
||||||
|
size_t globalProfilerRealTimePeriodNs_ = 0,
|
||||||
|
size_t globalProfilerCPUTimePeriodNs_ = 0);
|
||||||
~PooledThread();
|
~PooledThread();
|
||||||
|
|
||||||
void start();
|
void start();
|
||||||
@ -51,16 +56,24 @@ private:
|
|||||||
Event _targetCompleted;
|
Event _targetCompleted;
|
||||||
Event _started;
|
Event _started;
|
||||||
FastMutex _mutex;
|
FastMutex _mutex;
|
||||||
|
size_t _globalProfilerRealTimePeriodNs;
|
||||||
|
size_t _globalProfilerCPUTimePeriodNs;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
PooledThread::PooledThread(const std::string& name, int stackSize):
|
PooledThread::PooledThread(
|
||||||
_idle(true),
|
const std::string& name,
|
||||||
_idleTime(0),
|
int stackSize,
|
||||||
_pTarget(0),
|
size_t globalProfilerRealTimePeriodNs_,
|
||||||
_name(name),
|
size_t globalProfilerCPUTimePeriodNs_) :
|
||||||
|
_idle(true),
|
||||||
|
_idleTime(0),
|
||||||
|
_pTarget(0),
|
||||||
|
_name(name),
|
||||||
_thread(name),
|
_thread(name),
|
||||||
_targetCompleted(false)
|
_targetCompleted(false),
|
||||||
|
_globalProfilerRealTimePeriodNs(globalProfilerRealTimePeriodNs_),
|
||||||
|
_globalProfilerCPUTimePeriodNs(globalProfilerCPUTimePeriodNs_)
|
||||||
{
|
{
|
||||||
poco_assert_dbg (stackSize >= 0);
|
poco_assert_dbg (stackSize >= 0);
|
||||||
_thread.setStackSize(stackSize);
|
_thread.setStackSize(stackSize);
|
||||||
@ -83,7 +96,7 @@ void PooledThread::start()
|
|||||||
void PooledThread::start(Thread::Priority priority, Runnable& target)
|
void PooledThread::start(Thread::Priority priority, Runnable& target)
|
||||||
{
|
{
|
||||||
FastMutex::ScopedLock lock(_mutex);
|
FastMutex::ScopedLock lock(_mutex);
|
||||||
|
|
||||||
poco_assert (_pTarget == 0);
|
poco_assert (_pTarget == 0);
|
||||||
|
|
||||||
_pTarget = ⌖
|
_pTarget = ⌖
|
||||||
@ -109,7 +122,7 @@ void PooledThread::start(Thread::Priority priority, Runnable& target, const std:
|
|||||||
}
|
}
|
||||||
_thread.setName(fullName);
|
_thread.setName(fullName);
|
||||||
_thread.setPriority(priority);
|
_thread.setPriority(priority);
|
||||||
|
|
||||||
poco_assert (_pTarget == 0);
|
poco_assert (_pTarget == 0);
|
||||||
|
|
||||||
_pTarget = ⌖
|
_pTarget = ⌖
|
||||||
@ -145,7 +158,7 @@ void PooledThread::join()
|
|||||||
void PooledThread::activate()
|
void PooledThread::activate()
|
||||||
{
|
{
|
||||||
FastMutex::ScopedLock lock(_mutex);
|
FastMutex::ScopedLock lock(_mutex);
|
||||||
|
|
||||||
poco_assert (_idle);
|
poco_assert (_idle);
|
||||||
_idle = false;
|
_idle = false;
|
||||||
_targetCompleted.reset();
|
_targetCompleted.reset();
|
||||||
@ -155,7 +168,7 @@ void PooledThread::activate()
|
|||||||
void PooledThread::release()
|
void PooledThread::release()
|
||||||
{
|
{
|
||||||
const long JOIN_TIMEOUT = 10000;
|
const long JOIN_TIMEOUT = 10000;
|
||||||
|
|
||||||
_mutex.lock();
|
_mutex.lock();
|
||||||
_pTarget = 0;
|
_pTarget = 0;
|
||||||
_mutex.unlock();
|
_mutex.unlock();
|
||||||
@ -174,6 +187,10 @@ void PooledThread::release()
|
|||||||
|
|
||||||
void PooledThread::run()
|
void PooledThread::run()
|
||||||
{
|
{
|
||||||
|
DB::ThreadStatus thread_status;
|
||||||
|
if (unlikely(_globalProfilerRealTimePeriodNs != 0 || _globalProfilerCPUTimePeriodNs != 0))
|
||||||
|
thread_status.initGlobalProfiler(_globalProfilerRealTimePeriodNs, _globalProfilerCPUTimePeriodNs);
|
||||||
|
|
||||||
_started.set();
|
_started.set();
|
||||||
for (;;)
|
for (;;)
|
||||||
{
|
{
|
||||||
@ -220,13 +237,17 @@ void PooledThread::run()
|
|||||||
ThreadPool::ThreadPool(int minCapacity,
|
ThreadPool::ThreadPool(int minCapacity,
|
||||||
int maxCapacity,
|
int maxCapacity,
|
||||||
int idleTime,
|
int idleTime,
|
||||||
int stackSize):
|
int stackSize,
|
||||||
_minCapacity(minCapacity),
|
size_t globalProfilerRealTimePeriodNs_,
|
||||||
_maxCapacity(maxCapacity),
|
size_t globalProfilerCPUTimePeriodNs_) :
|
||||||
|
_minCapacity(minCapacity),
|
||||||
|
_maxCapacity(maxCapacity),
|
||||||
_idleTime(idleTime),
|
_idleTime(idleTime),
|
||||||
_serial(0),
|
_serial(0),
|
||||||
_age(0),
|
_age(0),
|
||||||
_stackSize(stackSize)
|
_stackSize(stackSize),
|
||||||
|
_globalProfilerRealTimePeriodNs(globalProfilerRealTimePeriodNs_),
|
||||||
|
_globalProfilerCPUTimePeriodNs(globalProfilerCPUTimePeriodNs_)
|
||||||
{
|
{
|
||||||
poco_assert (minCapacity >= 1 && maxCapacity >= minCapacity && idleTime > 0);
|
poco_assert (minCapacity >= 1 && maxCapacity >= minCapacity && idleTime > 0);
|
||||||
|
|
||||||
@ -243,14 +264,18 @@ ThreadPool::ThreadPool(const std::string& name,
|
|||||||
int minCapacity,
|
int minCapacity,
|
||||||
int maxCapacity,
|
int maxCapacity,
|
||||||
int idleTime,
|
int idleTime,
|
||||||
int stackSize):
|
int stackSize,
|
||||||
|
size_t globalProfilerRealTimePeriodNs_,
|
||||||
|
size_t globalProfilerCPUTimePeriodNs_) :
|
||||||
_name(name),
|
_name(name),
|
||||||
_minCapacity(minCapacity),
|
_minCapacity(minCapacity),
|
||||||
_maxCapacity(maxCapacity),
|
_maxCapacity(maxCapacity),
|
||||||
_idleTime(idleTime),
|
_idleTime(idleTime),
|
||||||
_serial(0),
|
_serial(0),
|
||||||
_age(0),
|
_age(0),
|
||||||
_stackSize(stackSize)
|
_stackSize(stackSize),
|
||||||
|
_globalProfilerRealTimePeriodNs(globalProfilerRealTimePeriodNs_),
|
||||||
|
_globalProfilerCPUTimePeriodNs(globalProfilerCPUTimePeriodNs_)
|
||||||
{
|
{
|
||||||
poco_assert (minCapacity >= 1 && maxCapacity >= minCapacity && idleTime > 0);
|
poco_assert (minCapacity >= 1 && maxCapacity >= minCapacity && idleTime > 0);
|
||||||
|
|
||||||
@ -393,15 +418,15 @@ void ThreadPool::housekeep()
|
|||||||
ThreadVec activeThreads;
|
ThreadVec activeThreads;
|
||||||
idleThreads.reserve(_threads.size());
|
idleThreads.reserve(_threads.size());
|
||||||
activeThreads.reserve(_threads.size());
|
activeThreads.reserve(_threads.size());
|
||||||
|
|
||||||
for (ThreadVec::iterator it = _threads.begin(); it != _threads.end(); ++it)
|
for (ThreadVec::iterator it = _threads.begin(); it != _threads.end(); ++it)
|
||||||
{
|
{
|
||||||
if ((*it)->idle())
|
if ((*it)->idle())
|
||||||
{
|
{
|
||||||
if ((*it)->idleTime() < _idleTime)
|
if ((*it)->idleTime() < _idleTime)
|
||||||
idleThreads.push_back(*it);
|
idleThreads.push_back(*it);
|
||||||
else
|
else
|
||||||
expiredThreads.push_back(*it);
|
expiredThreads.push_back(*it);
|
||||||
}
|
}
|
||||||
else activeThreads.push_back(*it);
|
else activeThreads.push_back(*it);
|
||||||
}
|
}
|
||||||
@ -463,7 +488,7 @@ PooledThread* ThreadPool::createThread()
|
|||||||
{
|
{
|
||||||
std::ostringstream name;
|
std::ostringstream name;
|
||||||
name << _name << "[#" << ++_serial << "]";
|
name << _name << "[#" << ++_serial << "]";
|
||||||
return new PooledThread(name.str(), _stackSize);
|
return new PooledThread(name.str(), _stackSize, _globalProfilerRealTimePeriodNs, _globalProfilerCPUTimePeriodNs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -481,7 +506,7 @@ public:
|
|||||||
ThreadPool* pool()
|
ThreadPool* pool()
|
||||||
{
|
{
|
||||||
FastMutex::ScopedLock lock(_mutex);
|
FastMutex::ScopedLock lock(_mutex);
|
||||||
|
|
||||||
if (!_pPool)
|
if (!_pPool)
|
||||||
{
|
{
|
||||||
_pPool = new ThreadPool("default");
|
_pPool = new ThreadPool("default");
|
||||||
@ -490,7 +515,7 @@ public:
|
|||||||
}
|
}
|
||||||
return _pPool;
|
return _pPool;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ThreadPool* _pPool;
|
ThreadPool* _pPool;
|
||||||
FastMutex _mutex;
|
FastMutex _mutex;
|
||||||
|
@ -26,7 +26,7 @@ namespace Poco
|
|||||||
{
|
{
|
||||||
namespace Net
|
namespace Net
|
||||||
{
|
{
|
||||||
constexpr size_t HTTP_DEFAULT_BUFFER_SIZE = 8 * 1024;
|
constexpr size_t HTTP_DEFAULT_BUFFER_SIZE = 1024 * 1024;
|
||||||
|
|
||||||
typedef Poco::BasicBufferedStreamBuf<char, std::char_traits<char>> HTTPBasicStreamBuf;
|
typedef Poco::BasicBufferedStreamBuf<char, std::char_traits<char>> HTTPBasicStreamBuf;
|
||||||
|
|
||||||
|
@ -330,27 +330,26 @@ void SSLManager::initDefaultContext(bool server)
|
|||||||
else
|
else
|
||||||
_ptrDefaultClientContext->disableProtocols(disabledProtocols);
|
_ptrDefaultClientContext->disableProtocols(disabledProtocols);
|
||||||
|
|
||||||
/// Temporarily disabled during the transition from boringssl to OpenSSL due to tsan issues.
|
bool cacheSessions = config.getBool(prefix + CFG_CACHE_SESSIONS, false);
|
||||||
/// bool cacheSessions = config.getBool(prefix + CFG_CACHE_SESSIONS, false);
|
if (server)
|
||||||
/// if (server)
|
{
|
||||||
/// {
|
std::string sessionIdContext = config.getString(prefix + CFG_SESSION_ID_CONTEXT, config.getString("application.name", ""));
|
||||||
/// std::string sessionIdContext = config.getString(prefix + CFG_SESSION_ID_CONTEXT, config.getString("application.name", ""));
|
_ptrDefaultServerContext->enableSessionCache(cacheSessions, sessionIdContext);
|
||||||
/// _ptrDefaultServerContext->enableSessionCache(cacheSessions, sessionIdContext);
|
if (config.hasProperty(prefix + CFG_SESSION_CACHE_SIZE))
|
||||||
/// if (config.hasProperty(prefix + CFG_SESSION_CACHE_SIZE))
|
{
|
||||||
/// {
|
int cacheSize = config.getInt(prefix + CFG_SESSION_CACHE_SIZE);
|
||||||
/// int cacheSize = config.getInt(prefix + CFG_SESSION_CACHE_SIZE);
|
_ptrDefaultServerContext->setSessionCacheSize(cacheSize);
|
||||||
/// _ptrDefaultServerContext->setSessionCacheSize(cacheSize);
|
}
|
||||||
/// }
|
if (config.hasProperty(prefix + CFG_SESSION_TIMEOUT))
|
||||||
/// if (config.hasProperty(prefix + CFG_SESSION_TIMEOUT))
|
{
|
||||||
/// {
|
int timeout = config.getInt(prefix + CFG_SESSION_TIMEOUT);
|
||||||
/// int timeout = config.getInt(prefix + CFG_SESSION_TIMEOUT);
|
_ptrDefaultServerContext->setSessionTimeout(timeout);
|
||||||
/// _ptrDefaultServerContext->setSessionTimeout(timeout);
|
}
|
||||||
/// }
|
}
|
||||||
/// }
|
else
|
||||||
/// else
|
{
|
||||||
/// {
|
_ptrDefaultClientContext->enableSessionCache(cacheSessions);
|
||||||
/// _ptrDefaultClientContext->enableSessionCache(cacheSessions);
|
}
|
||||||
/// }
|
|
||||||
bool extendedVerification = config.getBool(prefix + CFG_EXTENDED_VERIFICATION, false);
|
bool extendedVerification = config.getBool(prefix + CFG_EXTENDED_VERIFICATION, false);
|
||||||
if (server)
|
if (server)
|
||||||
_ptrDefaultServerContext->enableExtendedCertificateVerification(extendedVerification);
|
_ptrDefaultServerContext->enableExtendedCertificateVerification(extendedVerification);
|
||||||
|
2
contrib/openssl
vendored
2
contrib/openssl
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 67c0b63e578e4c751ac9edf490f5a96124fff8dc
|
Subproject commit 277de2ba202af4eb2291b363456d32ff0960e559
|
@ -254,7 +254,7 @@ function run_tests()
|
|||||||
|
|
||||||
set +e
|
set +e
|
||||||
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
|
||||||
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
--no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
|
||||||
| ts '%Y-%m-%d %H:%M:%S' \
|
| ts '%Y-%m-%d %H:%M:%S' \
|
||||||
| tee -a test_output/test_result.txt
|
| tee -a test_output/test_result.txt
|
||||||
set -e
|
set -e
|
||||||
@ -379,6 +379,10 @@ fi
|
|||||||
|
|
||||||
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
|
||||||
|
|
||||||
|
rm -rf /var/lib/clickhouse/data/system/*/
|
||||||
|
tar -chf /test_output/store.tar /var/lib/clickhouse/store ||:
|
||||||
|
tar -chf /test_output/metadata.tar /var/lib/clickhouse/metadata/*.sql ||:
|
||||||
|
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
|
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server1.log ||:
|
||||||
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server2.log ||:
|
rg -Fa "<Fatal>" /var/log/clickhouse-server/clickhouse-server2.log ||:
|
||||||
|
@ -37,7 +37,7 @@ Using named collections:
|
|||||||
<named_collections>
|
<named_collections>
|
||||||
<iceberg_conf>
|
<iceberg_conf>
|
||||||
<url>http://test.s3.amazonaws.com/clickhouse-bucket/</url>
|
<url>http://test.s3.amazonaws.com/clickhouse-bucket/</url>
|
||||||
<access_key_id>test<access_key_id>
|
<access_key_id>test</access_key_id>
|
||||||
<secret_access_key>test</secret_access_key>
|
<secret_access_key>test</secret_access_key>
|
||||||
</iceberg_conf>
|
</iceberg_conf>
|
||||||
</named_collections>
|
</named_collections>
|
||||||
|
@ -13,7 +13,7 @@ This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ec
|
|||||||
CREATE TABLE s3_queue_engine_table (name String, value UInt32)
|
CREATE TABLE s3_queue_engine_table (name String, value UInt32)
|
||||||
ENGINE = S3Queue(path, [NOSIGN, | aws_access_key_id, aws_secret_access_key,] format, [compression])
|
ENGINE = S3Queue(path, [NOSIGN, | aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||||
[SETTINGS]
|
[SETTINGS]
|
||||||
[mode = 'unordered',]
|
[mode = '',]
|
||||||
[after_processing = 'keep',]
|
[after_processing = 'keep',]
|
||||||
[keeper_path = '',]
|
[keeper_path = '',]
|
||||||
[s3queue_loading_retries = 0,]
|
[s3queue_loading_retries = 0,]
|
||||||
|
@ -31,6 +31,56 @@ Alternatively, in order to enable the MySQL interface for an existing service:
|
|||||||
3. After entering the password, you will get prompted the MySQL connection string for this service
|
3. After entering the password, you will get prompted the MySQL connection string for this service
|
||||||
![Connection screen - MySQL Enabled](./images/mysql5.png)
|
![Connection screen - MySQL Enabled](./images/mysql5.png)
|
||||||
|
|
||||||
|
## Creating multiple MySQL users in ClickHouse Cloud
|
||||||
|
|
||||||
|
By default, there is a built-in `mysql4<subdomain>` user, which uses the same password as the `default` one. The `<subdomain>` part is the first segment of your ClickHouse Cloud hostname. This format is necessary to work with the tools that implement secure connection, but don't provide [SNI information in their TLS handshake](https://www.cloudflare.com/learning/ssl/what-is-sni), which makes it impossible to do the internal routing without an extra hint in the username (MySQL console client is one of such tools).
|
||||||
|
|
||||||
|
Because of this, we _highly recommend_ following the `mysql4<subdomain>_<username>` format when creating a new user intended to be used with the MySQL interface, where `<subdomain>` is a hint to identify your Cloud service, and `<username>` is an arbitrary suffix of your choice.
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
For ClickHouse Cloud hostname like `foobar.us-east1.aws.clickhouse.cloud`, the `<subdomain>` part equals to `foobar`, and a custom MySQL username could look like `mysql4foobar_team1`.
|
||||||
|
:::
|
||||||
|
|
||||||
|
You can create extra users to use with the MySQL interface if, for example, you need to apply extra settings.
|
||||||
|
|
||||||
|
1. Optional - create a [settings profile](https://clickhouse.com/docs/en/sql-reference/statements/create/settings-profile) to apply for your custom user. For example, `my_custom_profile` with an extra setting which will be applied by default when we connect with the user we create later:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE SETTINGS PROFILE my_custom_profile SETTINGS prefer_column_name_to_alias=1;
|
||||||
|
```
|
||||||
|
|
||||||
|
`prefer_column_name_to_alias` is used just as an example, you can use other settings there.
|
||||||
|
2. [Create a user](https://clickhouse.com/docs/en/sql-reference/statements/create/user) using the following format: `mysql4<subdomain>_<username>` ([see above](#creating-multiple-mysql-users-in-clickhouse-cloud)). The password must be in double SHA1 format. For example:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER mysql4foobar_team1 IDENTIFIED WITH double_sha1_password BY 'YourPassword42$';
|
||||||
|
```
|
||||||
|
|
||||||
|
or if you want to use a custom profile for this user:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER mysql4foobar_team1 IDENTIFIED WITH double_sha1_password BY 'YourPassword42$' SETTINGS PROFILE 'my_custom_profile';
|
||||||
|
```
|
||||||
|
|
||||||
|
where `my_custom_profile` is the name of the profile you created earlier.
|
||||||
|
3. [Grant](https://clickhouse.com/docs/en/sql-reference/statements/grant) the new user the necessary permissions to interact with the desired tables or databases. For example, if you want to grant access to `system.query_log` only:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
GRANT SELECT ON system.query_log TO mysql4foobar_team1;
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Use the created user to connect to your ClickHouse Cloud service with the MySQL interface.
|
||||||
|
|
||||||
|
### Troubleshooting multiple MySQL users in ClickHouse Cloud
|
||||||
|
|
||||||
|
If you created a new MySQL user, and you see the following error while connecting via MySQL CLI client:
|
||||||
|
|
||||||
|
```
|
||||||
|
ERROR 2013 (HY000): Lost connection to MySQL server at 'reading authorization packet', system error: 54
|
||||||
|
```
|
||||||
|
|
||||||
|
In this case, ensure that the username follows the `mysql4<subdomain>_<username>` format, as described ([above](#creating-multiple-mysql-users-in-clickhouse-cloud)).
|
||||||
|
|
||||||
## Enabling the MySQL Interface On Self-managed ClickHouse
|
## Enabling the MySQL Interface On Self-managed ClickHouse
|
||||||
|
|
||||||
Add the [mysql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-mysql_port) setting to your server's configuration file. For example, you could define the port in a new XML file in your `config.d/` [folder](../operations/configuration-files):
|
Add the [mysql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-mysql_port) setting to your server's configuration file. For example, you could define the port in a new XML file in your `config.d/` [folder](../operations/configuration-files):
|
||||||
|
@ -59,10 +59,10 @@ For that, we need to use `jemalloc`'s tool called [jeprof](https://github.com/je
|
|||||||
If that’s the case, we recommend installing an [alternative implementation](https://github.com/gimli-rs/addr2line) of the tool.
|
If that’s the case, we recommend installing an [alternative implementation](https://github.com/gimli-rs/addr2line) of the tool.
|
||||||
|
|
||||||
```
|
```
|
||||||
git clone https://github.com/gimli-rs/addr2line
|
git clone https://github.com/gimli-rs/addr2line.git --depth=1 --branch=0.23.0
|
||||||
cd addr2line
|
cd addr2line
|
||||||
cargo b --examples -r
|
cargo build --features bin --release
|
||||||
cp ./target/release/examples/addr2line path/to/current/addr2line
|
cp ./target/release/addr2line path/to/current/addr2line
|
||||||
```
|
```
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
@ -591,6 +591,22 @@ Default value: 100000
|
|||||||
<max_part_num_to_warn>400</max_part_num_to_warn>
|
<max_part_num_to_warn>400</max_part_num_to_warn>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## max\_table\_num\_to\_throw {#max-table-num-to-throw}
|
||||||
|
If number of tables is greater than this value, server will throw an exception. 0 means no limitation. View, remote tables, dictionary, system tables are not counted. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.Default value: 0
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
```xml
|
||||||
|
<max_table_num_to_throw>400</max_table_num_to_throw>
|
||||||
|
```
|
||||||
|
|
||||||
|
## max\_database\_num\_to\_throw {#max-table-num-to-throw}
|
||||||
|
If number of _database is greater than this value, server will throw an exception. 0 means no limitation.
|
||||||
|
Default value: 0
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
```xml
|
||||||
|
<max_database_num_to_throw>400</max_database_num_to_throw>
|
||||||
|
```
|
||||||
|
|
||||||
## max_temporary_data_on_disk_size
|
## max_temporary_data_on_disk_size
|
||||||
|
|
||||||
@ -3084,3 +3100,21 @@ This setting is only necessary for the migration period and will become obsolete
|
|||||||
Type: Bool
|
Type: Bool
|
||||||
|
|
||||||
Default: 1
|
Default: 1
|
||||||
|
|
||||||
|
## merge_workload {#merge_workload}
|
||||||
|
|
||||||
|
Used to regulate how resources are utilized and shared between merges and other workloads. Specified value is used as `workload` setting value for all background merges. Can be overridden by a merge tree setting.
|
||||||
|
|
||||||
|
Default value: "default"
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
- [Workload Scheduling](/docs/en/operations/workload-scheduling.md)
|
||||||
|
|
||||||
|
## mutation_workload {#mutation_workload}
|
||||||
|
|
||||||
|
Used to regulate how resources are utilized and shared between mutations and other workloads. Specified value is used as `workload` setting value for all background mutations. Can be overridden by a merge tree setting.
|
||||||
|
|
||||||
|
Default value: "default"
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
- [Workload Scheduling](/docs/en/operations/workload-scheduling.md)
|
||||||
|
@ -974,6 +974,24 @@ Default value: false
|
|||||||
|
|
||||||
- [exclude_deleted_rows_for_part_size_in_merge](#exclude_deleted_rows_for_part_size_in_merge) setting
|
- [exclude_deleted_rows_for_part_size_in_merge](#exclude_deleted_rows_for_part_size_in_merge) setting
|
||||||
|
|
||||||
|
## merge_workload
|
||||||
|
|
||||||
|
Used to regulate how resources are utilized and shared between merges and other workloads. Specified value is used as `workload` setting value for background merges of this table. If not specified (empty string), then server setting `merge_workload` is used instead.
|
||||||
|
|
||||||
|
Default value: an empty string
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
- [Workload Scheduling](/docs/en/operations/workload-scheduling.md)
|
||||||
|
|
||||||
|
## mutation_workload
|
||||||
|
|
||||||
|
Used to regulate how resources are utilized and shared between mutations and other workloads. Specified value is used as `workload` setting value for background mutations of this table. If not specified (empty string), then server setting `mutation_workload` is used instead.
|
||||||
|
|
||||||
|
Default value: an empty string
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
- [Workload Scheduling](/docs/en/operations/workload-scheduling.md)
|
||||||
|
|
||||||
### optimize_row_order
|
### optimize_row_order
|
||||||
|
|
||||||
Controls if the row order should be optimized during inserts to improve the compressability of the newly inserted table part.
|
Controls if the row order should be optimized during inserts to improve the compressability of the newly inserted table part.
|
||||||
|
@ -1592,19 +1592,19 @@ Default value: `default`.
|
|||||||
|
|
||||||
## parallel_replicas_custom_key_range_lower {#parallel_replicas_custom_key_range_lower}
|
## parallel_replicas_custom_key_range_lower {#parallel_replicas_custom_key_range_lower}
|
||||||
|
|
||||||
Allows the filter type `range` to split the work evenly between replicas based on the custom range `[parallel_replicas_custom_key_range_lower, INT_MAX]`.
|
Allows the filter type `range` to split the work evenly between replicas based on the custom range `[parallel_replicas_custom_key_range_lower, INT_MAX]`.
|
||||||
|
|
||||||
When used in conjuction with [parallel_replicas_custom_key_range_upper](#parallel_replicas_custom_key_range_upper), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`.
|
When used in conjuction with [parallel_replicas_custom_key_range_upper](#parallel_replicas_custom_key_range_upper), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`.
|
||||||
|
|
||||||
Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing.
|
Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing.
|
||||||
|
|
||||||
## parallel_replicas_custom_key_range_upper {#parallel_replicas_custom_key_range_upper}
|
## parallel_replicas_custom_key_range_upper {#parallel_replicas_custom_key_range_upper}
|
||||||
|
|
||||||
Allows the filter type `range` to split the work evenly between replicas based on the custom range `[0, parallel_replicas_custom_key_range_upper]`. A value of 0 disables the upper bound, setting it the max value of the custom key expression.
|
Allows the filter type `range` to split the work evenly between replicas based on the custom range `[0, parallel_replicas_custom_key_range_upper]`. A value of 0 disables the upper bound, setting it the max value of the custom key expression.
|
||||||
|
|
||||||
When used in conjuction with [parallel_replicas_custom_key_range_lower](#parallel_replicas_custom_key_range_lower), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`.
|
When used in conjuction with [parallel_replicas_custom_key_range_lower](#parallel_replicas_custom_key_range_lower), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`.
|
||||||
|
|
||||||
Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing.
|
Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing.
|
||||||
|
|
||||||
## allow_experimental_parallel_reading_from_replicas
|
## allow_experimental_parallel_reading_from_replicas
|
||||||
|
|
||||||
@ -3188,7 +3188,7 @@ Default value: `0`.
|
|||||||
|
|
||||||
## lightweight_deletes_sync {#lightweight_deletes_sync}
|
## lightweight_deletes_sync {#lightweight_deletes_sync}
|
||||||
|
|
||||||
The same as 'mutation_sync', but controls only execution of lightweight deletes.
|
The same as 'mutation_sync', but controls only execution of lightweight deletes.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -5150,7 +5150,7 @@ Allows using statistic to optimize the order of [prewhere conditions](../../sql-
|
|||||||
|
|
||||||
## analyze_index_with_space_filling_curves
|
## analyze_index_with_space_filling_curves
|
||||||
|
|
||||||
If a table has a space-filling curve in its index, e.g. `ORDER BY mortonEncode(x, y)`, and the query has conditions on its arguments, e.g. `x >= 10 AND x <= 20 AND y >= 20 AND y <= 30`, use the space-filling curve for index analysis.
|
If a table has a space-filling curve in its index, e.g. `ORDER BY mortonEncode(x, y)` or `ORDER BY hilbertEncode(x, y)`, and the query has conditions on its arguments, e.g. `x >= 10 AND x <= 20 AND y >= 20 AND y <= 30`, use the space-filling curve for index analysis.
|
||||||
|
|
||||||
## query_plan_enable_optimizations {#query_plan_enable_optimizations}
|
## query_plan_enable_optimizations {#query_plan_enable_optimizations}
|
||||||
|
|
||||||
@ -5418,6 +5418,12 @@ When set to `false` than all attempts are made with identical timeouts.
|
|||||||
|
|
||||||
Default value: `true`.
|
Default value: `true`.
|
||||||
|
|
||||||
|
## uniform_snowflake_conversion_functions {#uniform_snowflake_conversion_functions}
|
||||||
|
|
||||||
|
If set to `true`, then functions `snowflakeIDToDateTime`, `snowflakeIDToDateTime64`, `dateTimeToSnowflakeID`, and `dateTime64ToSnowflakeID` are enabled, and functions `snowflakeToDateTime`, `snowflakeToDateTime64`, `dateTimeToSnowflake`, and `dateTime64ToSnowflake` are disabled (and vice versa if set to `false`).
|
||||||
|
|
||||||
|
Default value: `true`
|
||||||
|
|
||||||
## allow_experimental_variant_type {#allow_experimental_variant_type}
|
## allow_experimental_variant_type {#allow_experimental_variant_type}
|
||||||
|
|
||||||
Allows creation of experimental [Variant](../../sql-reference/data-types/variant.md).
|
Allows creation of experimental [Variant](../../sql-reference/data-types/variant.md).
|
||||||
|
@ -113,6 +113,8 @@ Columns:
|
|||||||
- `used_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `functions`, which were used during query execution.
|
- `used_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `functions`, which were used during query execution.
|
||||||
- `used_storages` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `storages`, which were used during query execution.
|
- `used_storages` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `storages`, which were used during query execution.
|
||||||
- `used_table_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `table functions`, which were used during query execution.
|
- `used_table_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `table functions`, which were used during query execution.
|
||||||
|
- `used_privileges` ([Array(String)](../../sql-reference/data-types/array.md)) - Privileges which were successfully checked during query execution.
|
||||||
|
- `missing_privileges` ([Array(String)](../../sql-reference/data-types/array.md)) - Privileges that are missing during query execution.
|
||||||
- `query_cache_usage` ([Enum8](../../sql-reference/data-types/enum.md)) — Usage of the [query cache](../query-cache.md) during query execution. Values:
|
- `query_cache_usage` ([Enum8](../../sql-reference/data-types/enum.md)) — Usage of the [query cache](../query-cache.md) during query execution. Values:
|
||||||
- `'Unknown'` = Status unknown.
|
- `'Unknown'` = Status unknown.
|
||||||
- `'None'` = The query result was neither written into nor read from the query cache.
|
- `'None'` = The query result was neither written into nor read from the query cache.
|
||||||
@ -194,6 +196,8 @@ used_formats: []
|
|||||||
used_functions: []
|
used_functions: []
|
||||||
used_storages: []
|
used_storages: []
|
||||||
used_table_functions: []
|
used_table_functions: []
|
||||||
|
used_privileges: []
|
||||||
|
missing_privileges: []
|
||||||
query_cache_usage: None
|
query_cache_usage: None
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -47,6 +47,8 @@ Example:
|
|||||||
|
|
||||||
Queries can be marked with setting `workload` to distinguish different workloads. If `workload` is not set, than value "default" is used. Note that you are able to specify the other value using settings profiles. Setting constraints can be used to make `workload` constant if you want all queries from the user to be marked with fixed value of `workload` setting.
|
Queries can be marked with setting `workload` to distinguish different workloads. If `workload` is not set, than value "default" is used. Note that you are able to specify the other value using settings profiles. Setting constraints can be used to make `workload` constant if you want all queries from the user to be marked with fixed value of `workload` setting.
|
||||||
|
|
||||||
|
It is possible to assign a `workload` setting for background activities. Merges and mutations are using `merge_workload` and `mutation_workload` server settings correspondingly. These values can also be overridden for specific tables using `merge_workload` and `mutation_workload` merge tree settings
|
||||||
|
|
||||||
Let's consider an example of a system with two different workloads: "production" and "development".
|
Let's consider an example of a system with two different workloads: "production" and "development".
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
@ -151,6 +153,9 @@ Example:
|
|||||||
</clickhouse>
|
</clickhouse>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## See also
|
## See also
|
||||||
- [system.scheduler](/docs/en/operations/system-tables/scheduler.md)
|
- [system.scheduler](/docs/en/operations/system-tables/scheduler.md)
|
||||||
|
- [merge_workload](/docs/en/operations/settings/merge-tree-settings.md#merge_workload) merge tree setting
|
||||||
|
- [merge_workload](/docs/en/operations/server-configuration-parameters/settings.md#merge_workload) global server setting
|
||||||
|
- [mutation_workload](/docs/en/operations/settings/merge-tree-settings.md#mutation_workload) merge tree setting
|
||||||
|
- [mutation_workload](/docs/en/operations/server-configuration-parameters/settings.md#mutation_workload) global server setting
|
||||||
|
@ -0,0 +1,90 @@
|
|||||||
|
---
|
||||||
|
slug: /en/sql-reference/aggregate-functions/reference/groupconcat
|
||||||
|
sidebar_position: 363
|
||||||
|
sidebar_label: groupConcat
|
||||||
|
title: groupConcat
|
||||||
|
---
|
||||||
|
|
||||||
|
Calculates a concatenated string from a group of strings, optionally separated by a delimiter, and optionally limited by a maximum number of elements.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
groupConcat(expression [, delimiter] [, limit]);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `expression` — The expression or column name that outputs strings to be concatenated..
|
||||||
|
- `delimiter` — A [string](../../../sql-reference/data-types/string.md) that will be used to separate concatenated values. This parameter is optional and defaults to an empty string if not specified.
|
||||||
|
- `limit` — A positive [integer](../../../sql-reference/data-types/int-uint.md) specifying the maximum number of elements to concatenate. If more elements are present, excess elements are ignored. This parameter is optional.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
If delimiter is specified without limit, it must be the first parameter following the expression. If both delimiter and limit are specified, delimiter must precede limit.
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Returns a [string](../../../sql-reference/data-types/string.md) consisting of the concatenated values of the column or expression. If the group has no elements or only null elements, and the function does not specify a handling for only null values, the result is a nullable string with a null value.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
Input table:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─id─┬─name─┐
|
||||||
|
│ 1 │ John│
|
||||||
|
│ 2 │ Jane│
|
||||||
|
│ 3 │ Bob│
|
||||||
|
└────┴──────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
1. Basic usage without a delimiter:
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT groupConcat(Name) FROM Employees;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
JohnJaneBob
|
||||||
|
```
|
||||||
|
|
||||||
|
This concatenates all names into one continuous string without any separator.
|
||||||
|
|
||||||
|
|
||||||
|
2. Using comma as a delimiter:
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT groupConcat(Name, ', ', 2) FROM Employees;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
John, Jane, Bob
|
||||||
|
```
|
||||||
|
|
||||||
|
This output shows the names separated by a comma followed by a space.
|
||||||
|
|
||||||
|
|
||||||
|
3. Limiting the number of concatenated elements
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT groupConcat(Name, ', ', 2) FROM Employees;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
John, Jane
|
||||||
|
```
|
||||||
|
|
||||||
|
This query limits the output to the first two names, even though there are more names in the table.
|
@ -137,7 +137,7 @@ If the time transition (due to daylight saving time or for other reasons) was pe
|
|||||||
|
|
||||||
Non-monotonic calendar dates. For example, in Happy Valley - Goose Bay, the time was transitioned one hour backwards at 00:01:00 7 Nov 2010 (one minute after midnight). So after 6th Nov has ended, people observed a whole one minute of 7th Nov, then time was changed back to 23:01 6th Nov and after another 59 minutes the 7th Nov started again. ClickHouse does not (yet) support this kind of fun. During these days the results of time processing functions may be slightly incorrect.
|
Non-monotonic calendar dates. For example, in Happy Valley - Goose Bay, the time was transitioned one hour backwards at 00:01:00 7 Nov 2010 (one minute after midnight). So after 6th Nov has ended, people observed a whole one minute of 7th Nov, then time was changed back to 23:01 6th Nov and after another 59 minutes the 7th Nov started again. ClickHouse does not (yet) support this kind of fun. During these days the results of time processing functions may be slightly incorrect.
|
||||||
|
|
||||||
Similar issue exists for Casey Antarctic station in year 2010. They changed time three hours back at 5 Mar, 02:00. If you are working in antarctic station, please don't afraid to use ClickHouse. Just make sure you set timezone to UTC or be aware of inaccuracies.
|
Similar issue exists for Casey Antarctic station in year 2010. They changed time three hours back at 5 Mar, 02:00. If you are working in antarctic station, please don't be afraid to use ClickHouse. Just make sure you set timezone to UTC or be aware of inaccuracies.
|
||||||
|
|
||||||
Time shifts for multiple days. Some pacific islands changed their timezone offset from UTC+14 to UTC-12. That's alright but some inaccuracies may present if you do calculations with their timezone for historical time points at the days of conversion.
|
Time shifts for multiple days. Some pacific islands changed their timezone offset from UTC+14 to UTC-12. That's alright but some inaccuracies may present if you do calculations with their timezone for historical time points at the days of conversion.
|
||||||
|
|
||||||
|
@ -2178,6 +2178,32 @@ Result:
|
|||||||
|
|
||||||
Alias: levenshteinDistance
|
Alias: levenshteinDistance
|
||||||
|
|
||||||
|
## editDistanceUTF8
|
||||||
|
|
||||||
|
Calculates the [edit distance](https://en.wikipedia.org/wiki/Edit_distance) between two UTF8 strings.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
editDistanceUTF8(string1, string2)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT editDistanceUTF8('我是谁', '我是我');
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─editDistanceUTF8('我是谁', '我是我')──┐
|
||||||
|
│ 1 │
|
||||||
|
└─────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Alias: levenshteinDistanceUTF8
|
||||||
|
|
||||||
## damerauLevenshteinDistance
|
## damerauLevenshteinDistance
|
||||||
|
|
||||||
Calculates the [Damerau-Levenshtein distance](https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance) between two byte strings.
|
Calculates the [Damerau-Levenshtein distance](https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance) between two byte strings.
|
||||||
|
@ -543,12 +543,17 @@ serverUUID()
|
|||||||
|
|
||||||
Generates a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID).
|
Generates a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID).
|
||||||
|
|
||||||
The generated Snowflake ID contains the current Unix timestamp in milliseconds 41 (+ 1 top zero bit) bits, followed by machine id (10 bits), a counter (12 bits) to distinguish IDs within a millisecond.
|
The generated Snowflake ID contains the current Unix timestamp in milliseconds (41 + 1 top zero bits), followed by a machine id (10 bits), and a counter (12 bits) to distinguish IDs within a millisecond.
|
||||||
For any given timestamp (unix_ts_ms), the counter starts at 0 and is incremented by 1 for each new Snowflake ID until the timestamp changes.
|
For any given timestamp (unix_ts_ms), the counter starts at 0 and is incremented by 1 for each new Snowflake ID until the timestamp changes.
|
||||||
In case the counter overflows, the timestamp field is incremented by 1 and the counter is reset to 0.
|
In case the counter overflows, the timestamp field is incremented by 1 and the counter is reset to 0.
|
||||||
|
|
||||||
Function `generateSnowflakeID` guarantees that the counter field within a timestamp increments monotonically across all function invocations in concurrently running threads and queries.
|
Function `generateSnowflakeID` guarantees that the counter field within a timestamp increments monotonically across all function invocations in concurrently running threads and queries.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
The generated Snowflake IDs are based on the UNIX epoch 1970-01-01.
|
||||||
|
While no standard or recommendation exists for the epoch of Snowflake IDs, implementations in other systems may use a different epoch, e.g. Twitter/X (2010-11-04) or Mastodon (2015-01-01).
|
||||||
|
:::
|
||||||
|
|
||||||
```
|
```
|
||||||
0 1 2 3
|
0 1 2 3
|
||||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||||
@ -605,6 +610,11 @@ SELECT generateSnowflakeID(1), generateSnowflakeID(2);
|
|||||||
|
|
||||||
## snowflakeToDateTime
|
## snowflakeToDateTime
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
This function is deprecated and can only be used if setting [uniform_snowflake_conversion_functions](../../operations/settings/settings.md#uniform_snowflake_conversion_functions) is disabled.
|
||||||
|
The function will be removed at some point in future.
|
||||||
|
:::
|
||||||
|
|
||||||
Extracts the timestamp component of a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) in [DateTime](../data-types/datetime.md) format.
|
Extracts the timestamp component of a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) in [DateTime](../data-types/datetime.md) format.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
@ -641,6 +651,11 @@ Result:
|
|||||||
|
|
||||||
## snowflakeToDateTime64
|
## snowflakeToDateTime64
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
This function is deprecated and can only be used if setting [uniform_snowflake_conversion_functions](../../operations/settings/settings.md#uniform_snowflake_conversion_functions) is disabled.
|
||||||
|
The function will be removed at some point in future.
|
||||||
|
:::
|
||||||
|
|
||||||
Extracts the timestamp component of a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) in [DateTime64](../data-types/datetime64.md) format.
|
Extracts the timestamp component of a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) in [DateTime64](../data-types/datetime64.md) format.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
@ -677,6 +692,11 @@ Result:
|
|||||||
|
|
||||||
## dateTimeToSnowflake
|
## dateTimeToSnowflake
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
This function is deprecated and can only be used if setting [uniform_snowflake_conversion_functions](../../operations/settings/settings.md#uniform_snowflake_conversion_functions) is disabled.
|
||||||
|
The function will be removed at some point in future.
|
||||||
|
:::
|
||||||
|
|
||||||
Converts a [DateTime](../data-types/datetime.md) value to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
Converts a [DateTime](../data-types/datetime.md) value to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
@ -711,6 +731,11 @@ Result:
|
|||||||
|
|
||||||
## dateTime64ToSnowflake
|
## dateTime64ToSnowflake
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
This function is deprecated and can only be used if setting [uniform_snowflake_conversion_functions](../../operations/settings/settings.md#uniform_snowflake_conversion_functions) is disabled.
|
||||||
|
The function will be removed at some point in future.
|
||||||
|
:::
|
||||||
|
|
||||||
Convert a [DateTime64](../data-types/datetime64.md) to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
Convert a [DateTime64](../data-types/datetime64.md) to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
@ -743,6 +768,148 @@ Result:
|
|||||||
└─────────────────────────────┘
|
└─────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## snowflakeIDToDateTime
|
||||||
|
|
||||||
|
Returns the timestamp component of a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as a value of type [DateTime](../data-types/datetime.md).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
snowflakeIDToDateTime(value[, epoch[, time_zone]])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `value` — Snowflake ID. [UInt64](../data-types/int-uint.md).
|
||||||
|
- `epoch` - Epoch of the Snowflake ID in milliseconds since 1970-01-01. Defaults to 0 (1970-01-01). For the Twitter/X epoch (2015-01-01), provide 1288834974657. Optional. [UInt*](../data-types/int-uint.md).
|
||||||
|
- `time_zone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The timestamp component of `value` as a [DateTime](../data-types/datetime.md) value.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT snowflakeIDToDateTime(7204436857747984384) AS res
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────res─┐
|
||||||
|
│ 2024-06-06 10:59:58 │
|
||||||
|
└─────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## snowflakeIDToDateTime64
|
||||||
|
|
||||||
|
Returns the timestamp component of a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as a value of type [DateTime64](../data-types/datetime64.md).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
snowflakeIDToDateTime64(value[, epoch[, time_zone]])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `value` — Snowflake ID. [UInt64](../data-types/int-uint.md).
|
||||||
|
- `epoch` - Epoch of the Snowflake ID in milliseconds since 1970-01-01. Defaults to 0 (1970-01-01). For the Twitter/X epoch (2015-01-01), provide 1288834974657. Optional. [UInt*](../data-types/int-uint.md).
|
||||||
|
- `time_zone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The timestamp component of `value` as a [DateTime64](../data-types/datetime64.md) with scale = 3, i.e. millisecond precision.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT snowflakeIDToDateTime64(7204436857747984384) AS res
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────res─┐
|
||||||
|
│ 2024-06-06 10:59:58 │
|
||||||
|
└─────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## dateTimeToSnowflakeID
|
||||||
|
|
||||||
|
Converts a [DateTime](../data-types/datetime.md) value to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
dateTimeToSnowflakeID(value[, epoch])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `value` — Date with time. [DateTime](../data-types/datetime.md).
|
||||||
|
- `epoch` - Epoch of the Snowflake ID in milliseconds since 1970-01-01. Defaults to 0 (1970-01-01). For the Twitter/X epoch (2015-01-01), provide 1288834974657. Optional. [UInt*](../data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Input value converted to [UInt64](../data-types/int-uint.md) as the first Snowflake ID at that time.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai') AS dt, dateTimeToSnowflakeID(dt) AS res;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌──────────────────dt─┬─────────────────res─┐
|
||||||
|
│ 2021-08-15 18:57:56 │ 6832626392367104000 │
|
||||||
|
└─────────────────────┴─────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## dateTime64ToSnowflakeID
|
||||||
|
|
||||||
|
Convert a [DateTime64](../data-types/datetime64.md) to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
dateTime64ToSnowflakeID(value[, epoch])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `value` — Date with time. [DateTime64](../data-types/datetime64.md).
|
||||||
|
- `epoch` - Epoch of the Snowflake ID in milliseconds since 1970-01-01. Defaults to 0 (1970-01-01). For the Twitter/X epoch (2015-01-01), provide 1288834974657. Optional. [UInt*](../data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Input value converted to [UInt64](../data-types/int-uint.md) as the first Snowflake ID at that time.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT toDateTime('2021-08-15 18:57:56.493', 3, 'Asia/Shanghai') AS dt, dateTime64ToSnowflakeID(dt) AS res;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌──────────────────────dt─┬─────────────────res─┐
|
||||||
|
│ 2021-08-15 18:57:56.493 │ 6832626394434895872 │
|
||||||
|
└─────────────────────────┴─────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## See also
|
## See also
|
||||||
|
|
||||||
- [dictGetUUID](../functions/ext-dict-functions.md#ext_dict_functions-other)
|
- [dictGetUUID](../functions/ext-dict-functions.md#ext_dict_functions-other)
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <Poco/Net/NetException.h>
|
#include <Poco/Net/NetException.h>
|
||||||
#include <Poco/Util/HelpFormatter.h>
|
#include <Poco/Util/HelpFormatter.h>
|
||||||
#include <Poco/Environment.h>
|
#include <Poco/Environment.h>
|
||||||
|
#include <Poco/Config.h>
|
||||||
#include <Common/scope_guard_safe.h>
|
#include <Common/scope_guard_safe.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
#include <base/phdr_cache.h>
|
#include <base/phdr_cache.h>
|
||||||
@ -721,11 +722,6 @@ try
|
|||||||
CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::getVersionRevision());
|
CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::getVersionRevision());
|
||||||
CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger());
|
CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger());
|
||||||
|
|
||||||
Poco::ThreadPool server_pool(3, server_settings.max_connections);
|
|
||||||
std::mutex servers_lock;
|
|
||||||
std::vector<ProtocolServerAdapter> servers;
|
|
||||||
std::vector<ProtocolServerAdapter> servers_to_start_before_tables;
|
|
||||||
|
|
||||||
/** Context contains all that query execution is dependent:
|
/** Context contains all that query execution is dependent:
|
||||||
* settings, available functions, data types, aggregate functions, databases, ...
|
* settings, available functions, data types, aggregate functions, databases, ...
|
||||||
*/
|
*/
|
||||||
@ -823,6 +819,18 @@ try
|
|||||||
total_memory_tracker.setSampleMaxAllocationSize(server_settings.total_memory_profiler_sample_max_allocation_size);
|
total_memory_tracker.setSampleMaxAllocationSize(server_settings.total_memory_profiler_sample_max_allocation_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Poco::ThreadPool server_pool(
|
||||||
|
/* minCapacity */3,
|
||||||
|
/* maxCapacity */server_settings.max_connections,
|
||||||
|
/* idleTime */60,
|
||||||
|
/* stackSize */POCO_THREAD_STACK_SIZE,
|
||||||
|
server_settings.global_profiler_real_time_period_ns,
|
||||||
|
server_settings.global_profiler_cpu_time_period_ns);
|
||||||
|
|
||||||
|
std::mutex servers_lock;
|
||||||
|
std::vector<ProtocolServerAdapter> servers;
|
||||||
|
std::vector<ProtocolServerAdapter> servers_to_start_before_tables;
|
||||||
|
|
||||||
/// Wait for all threads to avoid possible use-after-free (for example logging objects can be already destroyed).
|
/// Wait for all threads to avoid possible use-after-free (for example logging objects can be already destroyed).
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT({
|
||||||
Stopwatch watch;
|
Stopwatch watch;
|
||||||
@ -1609,6 +1617,10 @@ try
|
|||||||
0, // We don't need any threads one all the parts will be deleted
|
0, // We don't need any threads one all the parts will be deleted
|
||||||
new_server_settings.max_parts_cleaning_thread_pool_size);
|
new_server_settings.max_parts_cleaning_thread_pool_size);
|
||||||
|
|
||||||
|
|
||||||
|
global_context->setMergeWorkload(new_server_settings.merge_workload);
|
||||||
|
global_context->setMutationWorkload(new_server_settings.mutation_workload);
|
||||||
|
|
||||||
if (config->has("resources"))
|
if (config->has("resources"))
|
||||||
{
|
{
|
||||||
global_context->getResourceManager()->updateConfiguration(*config);
|
global_context->getResourceManager()->updateConfiguration(*config);
|
||||||
|
@ -371,7 +371,7 @@
|
|||||||
<!-- Enables asynchronous loading of databases and tables to speedup server startup.
|
<!-- Enables asynchronous loading of databases and tables to speedup server startup.
|
||||||
Queries to not yet loaded entity will be blocked until load is finished.
|
Queries to not yet loaded entity will be blocked until load is finished.
|
||||||
-->
|
-->
|
||||||
<!-- <async_load_databases>true</async_load_databases> -->
|
<async_load_databases>true</async_load_databases>
|
||||||
|
|
||||||
<!-- On memory constrained environments you may have to set this to value larger than 1.
|
<!-- On memory constrained environments you may have to set this to value larger than 1.
|
||||||
-->
|
-->
|
||||||
@ -1396,6 +1396,14 @@
|
|||||||
<!-- <host_name>replica</host_name> -->
|
<!-- <host_name>replica</host_name> -->
|
||||||
</distributed_ddl>
|
</distributed_ddl>
|
||||||
|
|
||||||
|
<!-- Used to regulate how resources are utilized and shared between merges, mutations and other workloads.
|
||||||
|
Specified value is used as `workload` setting value for background merge or mutation.
|
||||||
|
-->
|
||||||
|
<!--
|
||||||
|
<merge_workload>merges_and_mutations</merge_workload>
|
||||||
|
<mutation_workload>merges_and_mutations</mutation_workload>
|
||||||
|
-->
|
||||||
|
|
||||||
<!-- Settings to fine-tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
|
<!-- Settings to fine-tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
|
||||||
<!--
|
<!--
|
||||||
<merge_tree>
|
<merge_tree>
|
||||||
|
@ -4,12 +4,12 @@
|
|||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
CachedAccessChecking::CachedAccessChecking(const std::shared_ptr<const ContextAccess> & access_, AccessFlags access_flags_)
|
CachedAccessChecking::CachedAccessChecking(const std::shared_ptr<const ContextAccessWrapper> & access_, AccessFlags access_flags_)
|
||||||
: CachedAccessChecking(access_, AccessRightsElement{access_flags_})
|
: CachedAccessChecking(access_, AccessRightsElement{access_flags_})
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
CachedAccessChecking::CachedAccessChecking(const std::shared_ptr<const ContextAccess> & access_, const AccessRightsElement & element_)
|
CachedAccessChecking::CachedAccessChecking(const std::shared_ptr<const ContextAccessWrapper> & access_, const AccessRightsElement & element_)
|
||||||
: access(access_), element(element_)
|
: access(access_), element(element_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Access/Common/AccessRightsElement.h>
|
#include <Access/Common/AccessRightsElement.h>
|
||||||
|
#include <Access/ContextAccess.h>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
|
|
||||||
@ -13,14 +14,14 @@ class ContextAccess;
|
|||||||
class CachedAccessChecking
|
class CachedAccessChecking
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
CachedAccessChecking(const std::shared_ptr<const ContextAccess> & access_, AccessFlags access_flags_);
|
CachedAccessChecking(const std::shared_ptr<const ContextAccessWrapper> & access_, AccessFlags access_flags_);
|
||||||
CachedAccessChecking(const std::shared_ptr<const ContextAccess> & access_, const AccessRightsElement & element_);
|
CachedAccessChecking(const std::shared_ptr<const ContextAccessWrapper> & access_, const AccessRightsElement & element_);
|
||||||
~CachedAccessChecking();
|
~CachedAccessChecking();
|
||||||
|
|
||||||
bool checkAccess(bool throw_if_denied = true);
|
bool checkAccess(bool throw_if_denied = true);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const std::shared_ptr<const ContextAccess> access;
|
const std::shared_ptr<const ContextAccessWrapper> access;
|
||||||
const AccessRightsElement element;
|
const AccessRightsElement element;
|
||||||
bool checked = false;
|
bool checked = false;
|
||||||
bool result = false;
|
bool result = false;
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
#include <boost/algorithm/string/join.hpp>
|
#include <boost/algorithm/string/join.hpp>
|
||||||
#include <boost/range/algorithm/set_algorithm.hpp>
|
#include <boost/range/algorithm/set_algorithm.hpp>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
#include <unordered_set>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -271,7 +272,7 @@ namespace
|
|||||||
|
|
||||||
std::shared_ptr<const ContextAccess> ContextAccess::fromContext(const ContextPtr & context)
|
std::shared_ptr<const ContextAccess> ContextAccess::fromContext(const ContextPtr & context)
|
||||||
{
|
{
|
||||||
return context->getAccess();
|
return ContextAccessWrapper::fromContext(context)->getAccess();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -560,7 +561,7 @@ std::shared_ptr<const AccessRights> ContextAccess::getAccessRightsWithImplicit()
|
|||||||
|
|
||||||
|
|
||||||
template <bool throw_if_denied, bool grant_option, typename... Args>
|
template <bool throw_if_denied, bool grant_option, typename... Args>
|
||||||
bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... args) const
|
bool ContextAccess::checkAccessImplHelper(const ContextPtr & context, AccessFlags flags, const Args &... args) const
|
||||||
{
|
{
|
||||||
if (user_was_dropped)
|
if (user_was_dropped)
|
||||||
{
|
{
|
||||||
@ -573,8 +574,10 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
|
|||||||
if (params.full_access)
|
if (params.full_access)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
auto access_granted = []
|
auto access_granted = [&]
|
||||||
{
|
{
|
||||||
|
if constexpr (throw_if_denied)
|
||||||
|
context->addQueryPrivilegesInfo(AccessRightsElement{flags, args...}.toStringWithoutOptions(), true);
|
||||||
return true;
|
return true;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -583,7 +586,10 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
|
|||||||
FmtArgs && ...fmt_args [[maybe_unused]])
|
FmtArgs && ...fmt_args [[maybe_unused]])
|
||||||
{
|
{
|
||||||
if constexpr (throw_if_denied)
|
if constexpr (throw_if_denied)
|
||||||
|
{
|
||||||
|
context->addQueryPrivilegesInfo(AccessRightsElement{flags, args...}.toStringWithoutOptions(), false);
|
||||||
throw Exception(error_code, std::move(fmt_string), getUserName(), std::forward<FmtArgs>(fmt_args)...);
|
throw Exception(error_code, std::move(fmt_string), getUserName(), std::forward<FmtArgs>(fmt_args)...);
|
||||||
|
}
|
||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -686,102 +692,102 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <bool throw_if_denied, bool grant_option>
|
template <bool throw_if_denied, bool grant_option>
|
||||||
bool ContextAccess::checkAccessImpl(const AccessFlags & flags) const
|
bool ContextAccess::checkAccessImpl(const ContextPtr & context, const AccessFlags & flags) const
|
||||||
{
|
{
|
||||||
return checkAccessImplHelper<throw_if_denied, grant_option>(flags);
|
return checkAccessImplHelper<throw_if_denied, grant_option>(context, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool throw_if_denied, bool grant_option, typename... Args>
|
template <bool throw_if_denied, bool grant_option, typename... Args>
|
||||||
bool ContextAccess::checkAccessImpl(const AccessFlags & flags, std::string_view database, const Args &... args) const
|
bool ContextAccess::checkAccessImpl(const ContextPtr & context, const AccessFlags & flags, std::string_view database, const Args &... args) const
|
||||||
{
|
{
|
||||||
return checkAccessImplHelper<throw_if_denied, grant_option>(flags, database.empty() ? params.current_database : database, args...);
|
return checkAccessImplHelper<throw_if_denied, grant_option>(context, flags, database.empty() ? params.current_database : database, args...);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool throw_if_denied, bool grant_option>
|
template <bool throw_if_denied, bool grant_option>
|
||||||
bool ContextAccess::checkAccessImplHelper(const AccessRightsElement & element) const
|
bool ContextAccess::checkAccessImplHelper(const ContextPtr & context, const AccessRightsElement & element) const
|
||||||
{
|
{
|
||||||
assert(!element.grant_option || grant_option);
|
assert(!element.grant_option || grant_option);
|
||||||
if (element.isGlobalWithParameter())
|
if (element.isGlobalWithParameter())
|
||||||
{
|
{
|
||||||
if (element.any_parameter)
|
if (element.any_parameter)
|
||||||
return checkAccessImpl<throw_if_denied, grant_option>(element.access_flags);
|
return checkAccessImpl<throw_if_denied, grant_option>(context, element.access_flags);
|
||||||
else
|
else
|
||||||
return checkAccessImpl<throw_if_denied, grant_option>(element.access_flags, element.parameter);
|
return checkAccessImpl<throw_if_denied, grant_option>(context, element.access_flags, element.parameter);
|
||||||
}
|
}
|
||||||
else if (element.any_database)
|
else if (element.any_database)
|
||||||
return checkAccessImpl<throw_if_denied, grant_option>(element.access_flags);
|
return checkAccessImpl<throw_if_denied, grant_option>(context, element.access_flags);
|
||||||
else if (element.any_table)
|
else if (element.any_table)
|
||||||
return checkAccessImpl<throw_if_denied, grant_option>(element.access_flags, element.database);
|
return checkAccessImpl<throw_if_denied, grant_option>(context, element.access_flags, element.database);
|
||||||
else if (element.any_column)
|
else if (element.any_column)
|
||||||
return checkAccessImpl<throw_if_denied, grant_option>(element.access_flags, element.database, element.table);
|
return checkAccessImpl<throw_if_denied, grant_option>(context, element.access_flags, element.database, element.table);
|
||||||
else
|
else
|
||||||
return checkAccessImpl<throw_if_denied, grant_option>(element.access_flags, element.database, element.table, element.columns);
|
return checkAccessImpl<throw_if_denied, grant_option>(context, element.access_flags, element.database, element.table, element.columns);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool throw_if_denied, bool grant_option>
|
template <bool throw_if_denied, bool grant_option>
|
||||||
bool ContextAccess::checkAccessImpl(const AccessRightsElement & element) const
|
bool ContextAccess::checkAccessImpl(const ContextPtr & context, const AccessRightsElement & element) const
|
||||||
{
|
{
|
||||||
if constexpr (grant_option)
|
if constexpr (grant_option)
|
||||||
{
|
{
|
||||||
return checkAccessImplHelper<throw_if_denied, true>(element);
|
return checkAccessImplHelper<throw_if_denied, true>(context, element);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (element.grant_option)
|
if (element.grant_option)
|
||||||
return checkAccessImplHelper<throw_if_denied, true>(element);
|
return checkAccessImplHelper<throw_if_denied, true>(context, element);
|
||||||
else
|
else
|
||||||
return checkAccessImplHelper<throw_if_denied, false>(element);
|
return checkAccessImplHelper<throw_if_denied, false>(context, element);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool throw_if_denied, bool grant_option>
|
template <bool throw_if_denied, bool grant_option>
|
||||||
bool ContextAccess::checkAccessImpl(const AccessRightsElements & elements) const
|
bool ContextAccess::checkAccessImpl(const ContextPtr & context, const AccessRightsElements & elements) const
|
||||||
{
|
{
|
||||||
for (const auto & element : elements)
|
for (const auto & element : elements)
|
||||||
if (!checkAccessImpl<throw_if_denied, grant_option>(element))
|
if (!checkAccessImpl<throw_if_denied, grant_option>(context, element))
|
||||||
return false;
|
return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ContextAccess::isGranted(const AccessFlags & flags) const { return checkAccessImpl<false, false>(flags); }
|
bool ContextAccess::isGranted(const ContextPtr & context, const AccessFlags & flags) const { return checkAccessImpl<false, false>(context, flags); }
|
||||||
bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database) const { return checkAccessImpl<false, false>(flags, database); }
|
bool ContextAccess::isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database) const { return checkAccessImpl<false, false>(context, flags, database); }
|
||||||
bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table) const { return checkAccessImpl<false, false>(flags, database, table); }
|
bool ContextAccess::isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table) const { return checkAccessImpl<false, false>(context, flags, database, table); }
|
||||||
bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return checkAccessImpl<false, false>(flags, database, table, column); }
|
bool ContextAccess::isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return checkAccessImpl<false, false>(context, flags, database, table, column); }
|
||||||
bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const { return checkAccessImpl<false, false>(flags, database, table, columns); }
|
bool ContextAccess::isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const { return checkAccessImpl<false, false>(context, flags, database, table, columns); }
|
||||||
bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return checkAccessImpl<false, false>(flags, database, table, columns); }
|
bool ContextAccess::isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return checkAccessImpl<false, false>(context, flags, database, table, columns); }
|
||||||
bool ContextAccess::isGranted(const AccessRightsElement & element) const { return checkAccessImpl<false, false>(element); }
|
bool ContextAccess::isGranted(const ContextPtr & context, const AccessRightsElement & element) const { return checkAccessImpl<false, false>(context, element); }
|
||||||
bool ContextAccess::isGranted(const AccessRightsElements & elements) const { return checkAccessImpl<false, false>(elements); }
|
bool ContextAccess::isGranted(const ContextPtr & context, const AccessRightsElements & elements) const { return checkAccessImpl<false, false>(context, elements); }
|
||||||
|
|
||||||
bool ContextAccess::hasGrantOption(const AccessFlags & flags) const { return checkAccessImpl<false, true>(flags); }
|
bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessFlags & flags) const { return checkAccessImpl<false, true>(context, flags); }
|
||||||
bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database) const { return checkAccessImpl<false, true>(flags, database); }
|
bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database) const { return checkAccessImpl<false, true>(context, flags, database); }
|
||||||
bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) const { return checkAccessImpl<false, true>(flags, database, table); }
|
bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table) const { return checkAccessImpl<false, true>(context, flags, database, table); }
|
||||||
bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return checkAccessImpl<false, true>(flags, database, table, column); }
|
bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return checkAccessImpl<false, true>(context, flags, database, table, column); }
|
||||||
bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const { return checkAccessImpl<false, true>(flags, database, table, columns); }
|
bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const { return checkAccessImpl<false, true>(context, flags, database, table, columns); }
|
||||||
bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return checkAccessImpl<false, true>(flags, database, table, columns); }
|
bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return checkAccessImpl<false, true>(context, flags, database, table, columns); }
|
||||||
bool ContextAccess::hasGrantOption(const AccessRightsElement & element) const { return checkAccessImpl<false, true>(element); }
|
bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessRightsElement & element) const { return checkAccessImpl<false, true>(context, element); }
|
||||||
bool ContextAccess::hasGrantOption(const AccessRightsElements & elements) const { return checkAccessImpl<false, true>(elements); }
|
bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessRightsElements & elements) const { return checkAccessImpl<false, true>(context, elements); }
|
||||||
|
|
||||||
void ContextAccess::checkAccess(const AccessFlags & flags) const { checkAccessImpl<true, false>(flags); }
|
void ContextAccess::checkAccess(const ContextPtr & context, const AccessFlags & flags) const { checkAccessImpl<true, false>(context, flags); }
|
||||||
void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database) const { checkAccessImpl<true, false>(flags, database); }
|
void ContextAccess::checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database) const { checkAccessImpl<true, false>(context, flags, database); }
|
||||||
void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table) const { checkAccessImpl<true, false>(flags, database, table); }
|
void ContextAccess::checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table) const { checkAccessImpl<true, false>(context, flags, database, table); }
|
||||||
void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { checkAccessImpl<true, false>(flags, database, table, column); }
|
void ContextAccess::checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { checkAccessImpl<true, false>(context, flags, database, table, column); }
|
||||||
void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const { checkAccessImpl<true, false>(flags, database, table, columns); }
|
void ContextAccess::checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const { checkAccessImpl<true, false>(context, flags, database, table, columns); }
|
||||||
void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { checkAccessImpl<true, false>(flags, database, table, columns); }
|
void ContextAccess::checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { checkAccessImpl<true, false>(context, flags, database, table, columns); }
|
||||||
void ContextAccess::checkAccess(const AccessRightsElement & element) const { checkAccessImpl<true, false>(element); }
|
void ContextAccess::checkAccess(const ContextPtr & context, const AccessRightsElement & element) const { checkAccessImpl<true, false>(context, element); }
|
||||||
void ContextAccess::checkAccess(const AccessRightsElements & elements) const { checkAccessImpl<true, false>(elements); }
|
void ContextAccess::checkAccess(const ContextPtr & context, const AccessRightsElements & elements) const { checkAccessImpl<true, false>(context, elements); }
|
||||||
|
|
||||||
void ContextAccess::checkGrantOption(const AccessFlags & flags) const { checkAccessImpl<true, true>(flags); }
|
void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessFlags & flags) const { checkAccessImpl<true, true>(context, flags); }
|
||||||
void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database) const { checkAccessImpl<true, true>(flags, database); }
|
void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database) const { checkAccessImpl<true, true>(context, flags, database); }
|
||||||
void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) const { checkAccessImpl<true, true>(flags, database, table); }
|
void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table) const { checkAccessImpl<true, true>(context, flags, database, table); }
|
||||||
void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { checkAccessImpl<true, true>(flags, database, table, column); }
|
void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { checkAccessImpl<true, true>(context, flags, database, table, column); }
|
||||||
void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const { checkAccessImpl<true, true>(flags, database, table, columns); }
|
void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const { checkAccessImpl<true, true>(context, flags, database, table, columns); }
|
||||||
void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { checkAccessImpl<true, true>(flags, database, table, columns); }
|
void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { checkAccessImpl<true, true>(context, flags, database, table, columns); }
|
||||||
void ContextAccess::checkGrantOption(const AccessRightsElement & element) const { checkAccessImpl<true, true>(element); }
|
void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessRightsElement & element) const { checkAccessImpl<true, true>(context, element); }
|
||||||
void ContextAccess::checkGrantOption(const AccessRightsElements & elements) const { checkAccessImpl<true, true>(elements); }
|
void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessRightsElements & elements) const { checkAccessImpl<true, true>(context, elements); }
|
||||||
|
|
||||||
|
|
||||||
template <bool throw_if_denied, typename Container, typename GetNameFunction>
|
template <bool throw_if_denied, typename Container, typename GetNameFunction>
|
||||||
bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const GetNameFunction & get_name_function) const
|
bool ContextAccess::checkAdminOptionImplHelper(const ContextPtr & context, const Container & role_ids, const GetNameFunction & get_name_function) const
|
||||||
{
|
{
|
||||||
auto show_error = []<typename... FmtArgs>(int error_code [[maybe_unused]],
|
auto show_error = []<typename... FmtArgs>(int error_code [[maybe_unused]],
|
||||||
FormatStringHelper<FmtArgs...> fmt_string [[maybe_unused]],
|
FormatStringHelper<FmtArgs...> fmt_string [[maybe_unused]],
|
||||||
@ -804,7 +810,7 @@ bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const
|
|||||||
if (!std::size(role_ids))
|
if (!std::size(role_ids))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (isGranted(AccessType::ROLE_ADMIN))
|
if (isGranted(context, AccessType::ROLE_ADMIN))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
auto info = getRolesInfo();
|
auto info = getRolesInfo();
|
||||||
@ -840,54 +846,54 @@ bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <bool throw_if_denied>
|
template <bool throw_if_denied>
|
||||||
bool ContextAccess::checkAdminOptionImpl(const UUID & role_id) const
|
bool ContextAccess::checkAdminOptionImpl(const ContextPtr & context, const UUID & role_id) const
|
||||||
{
|
{
|
||||||
return checkAdminOptionImplHelper<throw_if_denied>(to_array(role_id), [this](const UUID & id, size_t) { return access_control->tryReadName(id); });
|
return checkAdminOptionImplHelper<throw_if_denied>(context, to_array(role_id), [this](const UUID & id, size_t) { return access_control->tryReadName(id); });
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool throw_if_denied>
|
template <bool throw_if_denied>
|
||||||
bool ContextAccess::checkAdminOptionImpl(const UUID & role_id, const String & role_name) const
|
bool ContextAccess::checkAdminOptionImpl(const ContextPtr & context, const UUID & role_id, const String & role_name) const
|
||||||
{
|
{
|
||||||
return checkAdminOptionImplHelper<throw_if_denied>(to_array(role_id), [&role_name](const UUID &, size_t) { return std::optional<String>{role_name}; });
|
return checkAdminOptionImplHelper<throw_if_denied>(context, to_array(role_id), [&role_name](const UUID &, size_t) { return std::optional<String>{role_name}; });
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool throw_if_denied>
|
template <bool throw_if_denied>
|
||||||
bool ContextAccess::checkAdminOptionImpl(const UUID & role_id, const std::unordered_map<UUID, String> & names_of_roles) const
|
bool ContextAccess::checkAdminOptionImpl(const ContextPtr & context, const UUID & role_id, const std::unordered_map<UUID, String> & names_of_roles) const
|
||||||
{
|
{
|
||||||
return checkAdminOptionImplHelper<throw_if_denied>(to_array(role_id), [&names_of_roles](const UUID & id, size_t) { auto it = names_of_roles.find(id); return (it != names_of_roles.end()) ? it->second : std::optional<String>{}; });
|
return checkAdminOptionImplHelper<throw_if_denied>(context, to_array(role_id), [&names_of_roles](const UUID & id, size_t) { auto it = names_of_roles.find(id); return (it != names_of_roles.end()) ? it->second : std::optional<String>{}; });
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool throw_if_denied>
|
template <bool throw_if_denied>
|
||||||
bool ContextAccess::checkAdminOptionImpl(const std::vector<UUID> & role_ids) const
|
bool ContextAccess::checkAdminOptionImpl(const ContextPtr & context, const std::vector<UUID> & role_ids) const
|
||||||
{
|
{
|
||||||
return checkAdminOptionImplHelper<throw_if_denied>(role_ids, [this](const UUID & id, size_t) { return access_control->tryReadName(id); });
|
return checkAdminOptionImplHelper<throw_if_denied>(context, role_ids, [this](const UUID & id, size_t) { return access_control->tryReadName(id); });
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool throw_if_denied>
|
template <bool throw_if_denied>
|
||||||
bool ContextAccess::checkAdminOptionImpl(const std::vector<UUID> & role_ids, const Strings & names_of_roles) const
|
bool ContextAccess::checkAdminOptionImpl(const ContextPtr & context, const std::vector<UUID> & role_ids, const Strings & names_of_roles) const
|
||||||
{
|
{
|
||||||
return checkAdminOptionImplHelper<throw_if_denied>(role_ids, [&names_of_roles](const UUID &, size_t i) { return std::optional<String>{names_of_roles[i]}; });
|
return checkAdminOptionImplHelper<throw_if_denied>(context, role_ids, [&names_of_roles](const UUID &, size_t i) { return std::optional<String>{names_of_roles[i]}; });
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool throw_if_denied>
|
template <bool throw_if_denied>
|
||||||
bool ContextAccess::checkAdminOptionImpl(const std::vector<UUID> & role_ids, const std::unordered_map<UUID, String> & names_of_roles) const
|
bool ContextAccess::checkAdminOptionImpl(const ContextPtr & context, const std::vector<UUID> & role_ids, const std::unordered_map<UUID, String> & names_of_roles) const
|
||||||
{
|
{
|
||||||
return checkAdminOptionImplHelper<throw_if_denied>(role_ids, [&names_of_roles](const UUID & id, size_t) { auto it = names_of_roles.find(id); return (it != names_of_roles.end()) ? it->second : std::optional<String>{}; });
|
return checkAdminOptionImplHelper<throw_if_denied>(context, role_ids, [&names_of_roles](const UUID & id, size_t) { auto it = names_of_roles.find(id); return (it != names_of_roles.end()) ? it->second : std::optional<String>{}; });
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ContextAccess::hasAdminOption(const UUID & role_id) const { return checkAdminOptionImpl<false>(role_id); }
|
bool ContextAccess::hasAdminOption(const ContextPtr & context, const UUID & role_id) const { return checkAdminOptionImpl<false>(context, role_id); }
|
||||||
bool ContextAccess::hasAdminOption(const UUID & role_id, const String & role_name) const { return checkAdminOptionImpl<false>(role_id, role_name); }
|
bool ContextAccess::hasAdminOption(const ContextPtr & context, const UUID & role_id, const String & role_name) const { return checkAdminOptionImpl<false>(context, role_id, role_name); }
|
||||||
bool ContextAccess::hasAdminOption(const UUID & role_id, const std::unordered_map<UUID, String> & names_of_roles) const { return checkAdminOptionImpl<false>(role_id, names_of_roles); }
|
bool ContextAccess::hasAdminOption(const ContextPtr & context, const UUID & role_id, const std::unordered_map<UUID, String> & names_of_roles) const { return checkAdminOptionImpl<false>(context, role_id, names_of_roles); }
|
||||||
bool ContextAccess::hasAdminOption(const std::vector<UUID> & role_ids) const { return checkAdminOptionImpl<false>(role_ids); }
|
bool ContextAccess::hasAdminOption(const ContextPtr & context, const std::vector<UUID> & role_ids) const { return checkAdminOptionImpl<false>(context, role_ids); }
|
||||||
bool ContextAccess::hasAdminOption(const std::vector<UUID> & role_ids, const Strings & names_of_roles) const { return checkAdminOptionImpl<false>(role_ids, names_of_roles); }
|
bool ContextAccess::hasAdminOption(const ContextPtr & context, const std::vector<UUID> & role_ids, const Strings & names_of_roles) const { return checkAdminOptionImpl<false>(context, role_ids, names_of_roles); }
|
||||||
bool ContextAccess::hasAdminOption(const std::vector<UUID> & role_ids, const std::unordered_map<UUID, String> & names_of_roles) const { return checkAdminOptionImpl<false>(role_ids, names_of_roles); }
|
bool ContextAccess::hasAdminOption(const ContextPtr & context, const std::vector<UUID> & role_ids, const std::unordered_map<UUID, String> & names_of_roles) const { return checkAdminOptionImpl<false>(context, role_ids, names_of_roles); }
|
||||||
|
|
||||||
void ContextAccess::checkAdminOption(const UUID & role_id) const { checkAdminOptionImpl<true>(role_id); }
|
void ContextAccess::checkAdminOption(const ContextPtr & context, const UUID & role_id) const { checkAdminOptionImpl<true>(context, role_id); }
|
||||||
void ContextAccess::checkAdminOption(const UUID & role_id, const String & role_name) const { checkAdminOptionImpl<true>(role_id, role_name); }
|
void ContextAccess::checkAdminOption(const ContextPtr & context, const UUID & role_id, const String & role_name) const { checkAdminOptionImpl<true>(context, role_id, role_name); }
|
||||||
void ContextAccess::checkAdminOption(const UUID & role_id, const std::unordered_map<UUID, String> & names_of_roles) const { checkAdminOptionImpl<true>(role_id, names_of_roles); }
|
void ContextAccess::checkAdminOption(const ContextPtr & context, const UUID & role_id, const std::unordered_map<UUID, String> & names_of_roles) const { checkAdminOptionImpl<true>(context, role_id, names_of_roles); }
|
||||||
void ContextAccess::checkAdminOption(const std::vector<UUID> & role_ids) const { checkAdminOptionImpl<true>(role_ids); }
|
void ContextAccess::checkAdminOption(const ContextPtr & context, const std::vector<UUID> & role_ids) const { checkAdminOptionImpl<true>(context, role_ids); }
|
||||||
void ContextAccess::checkAdminOption(const std::vector<UUID> & role_ids, const Strings & names_of_roles) const { checkAdminOptionImpl<true>(role_ids, names_of_roles); }
|
void ContextAccess::checkAdminOption(const ContextPtr & context, const std::vector<UUID> & role_ids, const Strings & names_of_roles) const { checkAdminOptionImpl<true>(context, role_ids, names_of_roles); }
|
||||||
void ContextAccess::checkAdminOption(const std::vector<UUID> & role_ids, const std::unordered_map<UUID, String> & names_of_roles) const { checkAdminOptionImpl<true>(role_ids, names_of_roles); }
|
void ContextAccess::checkAdminOption(const ContextPtr & context, const std::vector<UUID> & role_ids, const std::unordered_map<UUID, String> & names_of_roles) const { checkAdminOptionImpl<true>(context, role_ids, names_of_roles); }
|
||||||
|
|
||||||
|
|
||||||
void ContextAccess::checkGranteeIsAllowed(const UUID & grantee_id, const IAccessEntity & grantee) const
|
void ContextAccess::checkGranteeIsAllowed(const UUID & grantee_id, const IAccessEntity & grantee) const
|
||||||
@ -919,4 +925,10 @@ void ContextAccess::checkGranteesAreAllowed(const std::vector<UUID> & grantee_id
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<const ContextAccessWrapper> ContextAccessWrapper::fromContext(const ContextPtr & context)
|
||||||
|
{
|
||||||
|
return context->getAccess();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -4,9 +4,12 @@
|
|||||||
#include <Access/ContextAccessParams.h>
|
#include <Access/ContextAccessParams.h>
|
||||||
#include <Access/EnabledRowPolicies.h>
|
#include <Access/EnabledRowPolicies.h>
|
||||||
#include <Interpreters/ClientInfo.h>
|
#include <Interpreters/ClientInfo.h>
|
||||||
|
#include <Access/QuotaUsage.h>
|
||||||
|
#include <Common/SettingsChanges.h>
|
||||||
#include <Core/UUID.h>
|
#include <Core/UUID.h>
|
||||||
#include <base/scope_guard.h>
|
#include <base/scope_guard.h>
|
||||||
#include <boost/container/flat_set.hpp>
|
#include <boost/container/flat_set.hpp>
|
||||||
|
#include <memory>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
@ -71,59 +74,59 @@ public:
|
|||||||
|
|
||||||
/// Checks if a specified access is granted, and throws an exception if not.
|
/// Checks if a specified access is granted, and throws an exception if not.
|
||||||
/// Empty database means the current database.
|
/// Empty database means the current database.
|
||||||
void checkAccess(const AccessFlags & flags) const;
|
void checkAccess(const ContextPtr & context, const AccessFlags & flags) const;
|
||||||
void checkAccess(const AccessFlags & flags, std::string_view database) const;
|
void checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database) const;
|
||||||
void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table) const;
|
void checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table) const;
|
||||||
void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const;
|
void checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const;
|
||||||
void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const;
|
void checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const;
|
||||||
void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const;
|
void checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const;
|
||||||
void checkAccess(const AccessRightsElement & element) const;
|
void checkAccess(const ContextPtr & context, const AccessRightsElement & element) const;
|
||||||
void checkAccess(const AccessRightsElements & elements) const;
|
void checkAccess(const ContextPtr & context, const AccessRightsElements & elements) const;
|
||||||
|
|
||||||
void checkGrantOption(const AccessFlags & flags) const;
|
void checkGrantOption(const ContextPtr & context, const AccessFlags & flags) const;
|
||||||
void checkGrantOption(const AccessFlags & flags, std::string_view database) const;
|
void checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database) const;
|
||||||
void checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) const;
|
void checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table) const;
|
||||||
void checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const;
|
void checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const;
|
||||||
void checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const;
|
void checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const;
|
||||||
void checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const;
|
void checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const;
|
||||||
void checkGrantOption(const AccessRightsElement & element) const;
|
void checkGrantOption(const ContextPtr & context, const AccessRightsElement & element) const;
|
||||||
void checkGrantOption(const AccessRightsElements & elements) const;
|
void checkGrantOption(const ContextPtr & context, const AccessRightsElements & elements) const;
|
||||||
|
|
||||||
/// Checks if a specified access is granted, and returns false if not.
|
/// Checks if a specified access is granted, and returns false if not.
|
||||||
/// Empty database means the current database.
|
/// Empty database means the current database.
|
||||||
bool isGranted(const AccessFlags & flags) const;
|
bool isGranted(const ContextPtr & context, const AccessFlags & flags) const;
|
||||||
bool isGranted(const AccessFlags & flags, std::string_view database) const;
|
bool isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database) const;
|
||||||
bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table) const;
|
bool isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table) const;
|
||||||
bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const;
|
bool isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const;
|
||||||
bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const;
|
bool isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const;
|
||||||
bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const;
|
bool isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const;
|
||||||
bool isGranted(const AccessRightsElement & element) const;
|
bool isGranted(const ContextPtr & context, const AccessRightsElement & element) const;
|
||||||
bool isGranted(const AccessRightsElements & elements) const;
|
bool isGranted(const ContextPtr & context, const AccessRightsElements & elements) const;
|
||||||
|
|
||||||
bool hasGrantOption(const AccessFlags & flags) const;
|
bool hasGrantOption(const ContextPtr & context, const AccessFlags & flags) const;
|
||||||
bool hasGrantOption(const AccessFlags & flags, std::string_view database) const;
|
bool hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database) const;
|
||||||
bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) const;
|
bool hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table) const;
|
||||||
bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const;
|
bool hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const;
|
||||||
bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const;
|
bool hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const;
|
||||||
bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const;
|
bool hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const;
|
||||||
bool hasGrantOption(const AccessRightsElement & element) const;
|
bool hasGrantOption(const ContextPtr & context, const AccessRightsElement & element) const;
|
||||||
bool hasGrantOption(const AccessRightsElements & elements) const;
|
bool hasGrantOption(const ContextPtr & context, const AccessRightsElements & elements) const;
|
||||||
|
|
||||||
/// Checks if a specified role is granted with admin option, and throws an exception if not.
|
/// Checks if a specified role is granted with admin option, and throws an exception if not.
|
||||||
void checkAdminOption(const UUID & role_id) const;
|
void checkAdminOption(const ContextPtr & context, const UUID & role_id) const;
|
||||||
void checkAdminOption(const UUID & role_id, const String & role_name) const;
|
void checkAdminOption(const ContextPtr & context, const UUID & role_id, const String & role_name) const;
|
||||||
void checkAdminOption(const UUID & role_id, const std::unordered_map<UUID, String> & names_of_roles) const;
|
void checkAdminOption(const ContextPtr & context, const UUID & role_id, const std::unordered_map<UUID, String> & names_of_roles) const;
|
||||||
void checkAdminOption(const std::vector<UUID> & role_ids) const;
|
void checkAdminOption(const ContextPtr & context, const std::vector<UUID> & role_ids) const;
|
||||||
void checkAdminOption(const std::vector<UUID> & role_ids, const Strings & names_of_roles) const;
|
void checkAdminOption(const ContextPtr & context, const std::vector<UUID> & role_ids, const Strings & names_of_roles) const;
|
||||||
void checkAdminOption(const std::vector<UUID> & role_ids, const std::unordered_map<UUID, String> & names_of_roles) const;
|
void checkAdminOption(const ContextPtr & context, const std::vector<UUID> & role_ids, const std::unordered_map<UUID, String> & names_of_roles) const;
|
||||||
|
|
||||||
/// Checks if a specified role is granted with admin option, and returns false if not.
|
/// Checks if a specified role is granted with admin option, and returns false if not.
|
||||||
bool hasAdminOption(const UUID & role_id) const;
|
bool hasAdminOption(const ContextPtr & context, const UUID & role_id) const;
|
||||||
bool hasAdminOption(const UUID & role_id, const String & role_name) const;
|
bool hasAdminOption(const ContextPtr & context, const UUID & role_id, const String & role_name) const;
|
||||||
bool hasAdminOption(const UUID & role_id, const std::unordered_map<UUID, String> & names_of_roles) const;
|
bool hasAdminOption(const ContextPtr & context, const UUID & role_id, const std::unordered_map<UUID, String> & names_of_roles) const;
|
||||||
bool hasAdminOption(const std::vector<UUID> & role_ids) const;
|
bool hasAdminOption(const ContextPtr & context, const std::vector<UUID> & role_ids) const;
|
||||||
bool hasAdminOption(const std::vector<UUID> & role_ids, const Strings & names_of_roles) const;
|
bool hasAdminOption(const ContextPtr & context, const std::vector<UUID> & role_ids, const Strings & names_of_roles) const;
|
||||||
bool hasAdminOption(const std::vector<UUID> & role_ids, const std::unordered_map<UUID, String> & names_of_roles) const;
|
bool hasAdminOption(const ContextPtr & context, const std::vector<UUID> & role_ids, const std::unordered_map<UUID, String> & names_of_roles) const;
|
||||||
|
|
||||||
/// Checks if a grantee is allowed for the current user, throws an exception if not.
|
/// Checks if a grantee is allowed for the current user, throws an exception if not.
|
||||||
void checkGranteeIsAllowed(const UUID & grantee_id, const IAccessEntity & grantee) const;
|
void checkGranteeIsAllowed(const UUID & grantee_id, const IAccessEntity & grantee) const;
|
||||||
@ -142,43 +145,43 @@ private:
|
|||||||
void calculateAccessRights() const TSA_REQUIRES(mutex);
|
void calculateAccessRights() const TSA_REQUIRES(mutex);
|
||||||
|
|
||||||
template <bool throw_if_denied, bool grant_option>
|
template <bool throw_if_denied, bool grant_option>
|
||||||
bool checkAccessImpl(const AccessFlags & flags) const;
|
bool checkAccessImpl(const ContextPtr & context, const AccessFlags & flags) const;
|
||||||
|
|
||||||
template <bool throw_if_denied, bool grant_option, typename... Args>
|
template <bool throw_if_denied, bool grant_option, typename... Args>
|
||||||
bool checkAccessImpl(const AccessFlags & flags, std::string_view database, const Args &... args) const;
|
bool checkAccessImpl(const ContextPtr & context, const AccessFlags & flags, std::string_view database, const Args &... args) const;
|
||||||
|
|
||||||
template <bool throw_if_denied, bool grant_option>
|
template <bool throw_if_denied, bool grant_option>
|
||||||
bool checkAccessImpl(const AccessRightsElement & element) const;
|
bool checkAccessImpl(const ContextPtr & context, const AccessRightsElement & element) const;
|
||||||
|
|
||||||
template <bool throw_if_denied, bool grant_option>
|
template <bool throw_if_denied, bool grant_option>
|
||||||
bool checkAccessImpl(const AccessRightsElements & elements) const;
|
bool checkAccessImpl(const ContextPtr & context, const AccessRightsElements & elements) const;
|
||||||
|
|
||||||
template <bool throw_if_denied, bool grant_option, typename... Args>
|
template <bool throw_if_denied, bool grant_option, typename... Args>
|
||||||
bool checkAccessImplHelper(AccessFlags flags, const Args &... args) const;
|
bool checkAccessImplHelper(const ContextPtr & context, AccessFlags flags, const Args &... args) const;
|
||||||
|
|
||||||
template <bool throw_if_denied, bool grant_option>
|
template <bool throw_if_denied, bool grant_option>
|
||||||
bool checkAccessImplHelper(const AccessRightsElement & element) const;
|
bool checkAccessImplHelper(const ContextPtr & context, const AccessRightsElement & element) const;
|
||||||
|
|
||||||
template <bool throw_if_denied>
|
template <bool throw_if_denied>
|
||||||
bool checkAdminOptionImpl(const UUID & role_id) const;
|
bool checkAdminOptionImpl(const ContextPtr & context, const UUID & role_id) const;
|
||||||
|
|
||||||
template <bool throw_if_denied>
|
template <bool throw_if_denied>
|
||||||
bool checkAdminOptionImpl(const UUID & role_id, const String & role_name) const;
|
bool checkAdminOptionImpl(const ContextPtr & context, const UUID & role_id, const String & role_name) const;
|
||||||
|
|
||||||
template <bool throw_if_denied>
|
template <bool throw_if_denied>
|
||||||
bool checkAdminOptionImpl(const UUID & role_id, const std::unordered_map<UUID, String> & names_of_roles) const;
|
bool checkAdminOptionImpl(const ContextPtr & context, const UUID & role_id, const std::unordered_map<UUID, String> & names_of_roles) const;
|
||||||
|
|
||||||
template <bool throw_if_denied>
|
template <bool throw_if_denied>
|
||||||
bool checkAdminOptionImpl(const std::vector<UUID> & role_ids) const;
|
bool checkAdminOptionImpl(const ContextPtr & context, const std::vector<UUID> & role_ids) const;
|
||||||
|
|
||||||
template <bool throw_if_denied>
|
template <bool throw_if_denied>
|
||||||
bool checkAdminOptionImpl(const std::vector<UUID> & role_ids, const Strings & names_of_roles) const;
|
bool checkAdminOptionImpl(const ContextPtr & context, const std::vector<UUID> & role_ids, const Strings & names_of_roles) const;
|
||||||
|
|
||||||
template <bool throw_if_denied>
|
template <bool throw_if_denied>
|
||||||
bool checkAdminOptionImpl(const std::vector<UUID> & role_ids, const std::unordered_map<UUID, String> & names_of_roles) const;
|
bool checkAdminOptionImpl(const ContextPtr & context, const std::vector<UUID> & role_ids, const std::unordered_map<UUID, String> & names_of_roles) const;
|
||||||
|
|
||||||
template <bool throw_if_denied, typename Container, typename GetNameFunction>
|
template <bool throw_if_denied, typename Container, typename GetNameFunction>
|
||||||
bool checkAdminOptionImplHelper(const Container & role_ids, const GetNameFunction & get_name_function) const;
|
bool checkAdminOptionImplHelper(const ContextPtr & context, const Container & role_ids, const GetNameFunction & get_name_function) const;
|
||||||
|
|
||||||
const AccessControl * access_control = nullptr;
|
const AccessControl * access_control = nullptr;
|
||||||
const Params params;
|
const Params params;
|
||||||
@ -203,4 +206,115 @@ private:
|
|||||||
mutable std::shared_ptr<const EnabledSettings> enabled_settings TSA_GUARDED_BY(mutex);
|
mutable std::shared_ptr<const EnabledSettings> enabled_settings TSA_GUARDED_BY(mutex);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// This wrapper was added to be able to pass the current context to the access
|
||||||
|
/// without the need to change the signature and all calls to the ContextAccess itself.
|
||||||
|
/// Right now a context is used to store privileges that are checked for a query,
|
||||||
|
/// and might be useful for something else in the future as well.
|
||||||
|
class ContextAccessWrapper : public std::enable_shared_from_this<ContextAccessWrapper>
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
using ContextAccessPtr = std::shared_ptr<const ContextAccess>;
|
||||||
|
|
||||||
|
ContextAccessWrapper(const ContextAccessPtr & access_, const ContextPtr & context_): access(access_), context(context_) {}
|
||||||
|
~ContextAccessWrapper() = default;
|
||||||
|
|
||||||
|
static std::shared_ptr<const ContextAccessWrapper> fromContext(const ContextPtr & context);
|
||||||
|
|
||||||
|
const ContextAccess::Params & getParams() const { return access->getParams(); }
|
||||||
|
|
||||||
|
const ContextAccessPtr & getAccess() const { return access; }
|
||||||
|
|
||||||
|
/// Returns the current user. Throws if user is nullptr.
|
||||||
|
ALWAYS_INLINE UserPtr getUser() const { return access->getUser(); }
|
||||||
|
/// Same as above, but can return nullptr.
|
||||||
|
ALWAYS_INLINE UserPtr tryGetUser() const { return access->tryGetUser(); }
|
||||||
|
ALWAYS_INLINE String getUserName() const { return access->getUserName(); }
|
||||||
|
ALWAYS_INLINE std::optional<UUID> getUserID() const { return access->getUserID(); }
|
||||||
|
|
||||||
|
/// Returns information about current and enabled roles.
|
||||||
|
ALWAYS_INLINE std::shared_ptr<const EnabledRolesInfo> getRolesInfo() const { return access->getRolesInfo(); }
|
||||||
|
|
||||||
|
/// Returns the row policy filter for a specified table.
|
||||||
|
/// The function returns nullptr if there is no filter to apply.
|
||||||
|
ALWAYS_INLINE RowPolicyFilterPtr getRowPolicyFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type) const { return access->getRowPolicyFilter(database, table_name, filter_type); }
|
||||||
|
|
||||||
|
/// Returns the quota to track resource consumption.
|
||||||
|
ALWAYS_INLINE std::shared_ptr<const EnabledQuota> getQuota() const { return access->getQuota(); }
|
||||||
|
ALWAYS_INLINE std::optional<QuotaUsage> getQuotaUsage() const { return access->getQuotaUsage(); }
|
||||||
|
|
||||||
|
/// Returns the default settings, i.e. the settings which should be applied on user's login.
|
||||||
|
ALWAYS_INLINE SettingsChanges getDefaultSettings() const { return access->getDefaultSettings(); }
|
||||||
|
ALWAYS_INLINE std::shared_ptr<const SettingsProfilesInfo> getDefaultProfileInfo() const { return access->getDefaultProfileInfo(); }
|
||||||
|
|
||||||
|
/// Returns the current access rights.
|
||||||
|
ALWAYS_INLINE std::shared_ptr<const AccessRights> getAccessRights() const { return access->getAccessRights(); }
|
||||||
|
ALWAYS_INLINE std::shared_ptr<const AccessRights> getAccessRightsWithImplicit() const { return access->getAccessRightsWithImplicit(); }
|
||||||
|
|
||||||
|
/// Checks if a specified access is granted, and throws an exception if not.
|
||||||
|
/// Empty database means the current database.
|
||||||
|
ALWAYS_INLINE void checkAccess(const AccessFlags & flags) const { access->checkAccess(context, flags); }
|
||||||
|
ALWAYS_INLINE void checkAccess(const AccessFlags & flags, std::string_view database) const { access->checkAccess(context, flags, database); }
|
||||||
|
ALWAYS_INLINE void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table) const { access->checkAccess(context, flags, database, table); }
|
||||||
|
ALWAYS_INLINE void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { access->checkAccess(context, flags, database, table, column); }
|
||||||
|
ALWAYS_INLINE void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const { access->checkAccess(context, flags, database, table, columns); }
|
||||||
|
ALWAYS_INLINE void checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { access->checkAccess(context, flags, database, table, columns); }
|
||||||
|
ALWAYS_INLINE void checkAccess(const AccessRightsElement & element) const { access->checkAccess(context, element); }
|
||||||
|
ALWAYS_INLINE void checkAccess(const AccessRightsElements & elements) const { access->checkAccess(context, elements); }
|
||||||
|
|
||||||
|
ALWAYS_INLINE void checkGrantOption(const AccessFlags & flags) const { access->checkGrantOption(context, flags); }
|
||||||
|
ALWAYS_INLINE void checkGrantOption(const AccessFlags & flags, std::string_view database) const { access->checkGrantOption(context, flags, database); }
|
||||||
|
ALWAYS_INLINE void checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) const { access->checkGrantOption(context, flags, database, table); }
|
||||||
|
ALWAYS_INLINE void checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { access->checkGrantOption(context, flags, database, table, column); }
|
||||||
|
ALWAYS_INLINE void checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const { access->checkGrantOption(context, flags, database, table, columns); }
|
||||||
|
ALWAYS_INLINE void checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { access->checkGrantOption(context, flags, database, table, columns); }
|
||||||
|
ALWAYS_INLINE void checkGrantOption(const AccessRightsElement & element) const { access->checkGrantOption(context, element); }
|
||||||
|
ALWAYS_INLINE void checkGrantOption(const AccessRightsElements & elements) const { access->checkGrantOption(context, elements); }
|
||||||
|
|
||||||
|
/// Checks if a specified access is granted, and returns false if not.
|
||||||
|
/// Empty database means the current database.
|
||||||
|
ALWAYS_INLINE bool isGranted(const AccessFlags & flags) const { return access->isGranted(context, flags); }
|
||||||
|
ALWAYS_INLINE bool isGranted(const AccessFlags & flags, std::string_view database) const { return access->isGranted(context, flags, database); }
|
||||||
|
ALWAYS_INLINE bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table) const { return access->isGranted(context, flags, database, table); }
|
||||||
|
ALWAYS_INLINE bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return access->isGranted(context, flags, database, table, column); }
|
||||||
|
ALWAYS_INLINE bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const { return access->isGranted(context, flags, database, table, columns); }
|
||||||
|
ALWAYS_INLINE bool isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return access->isGranted(context, flags, database, table, columns); }
|
||||||
|
ALWAYS_INLINE bool isGranted(const AccessRightsElement & element) const { return access->isGranted(context, element); }
|
||||||
|
ALWAYS_INLINE bool isGranted(const AccessRightsElements & elements) const { return access->isGranted(context, elements); }
|
||||||
|
|
||||||
|
ALWAYS_INLINE bool hasGrantOption(const AccessFlags & flags) const { return access->hasGrantOption(context, flags); }
|
||||||
|
ALWAYS_INLINE bool hasGrantOption(const AccessFlags & flags, std::string_view database) const { return access->hasGrantOption(context, flags, database); }
|
||||||
|
ALWAYS_INLINE bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) const { return access->hasGrantOption(context, flags, database, table); }
|
||||||
|
ALWAYS_INLINE bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return access->hasGrantOption(context, flags, database, table, column); }
|
||||||
|
ALWAYS_INLINE bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector<std::string_view> & columns) const { return access->hasGrantOption(context, flags, database, table, columns); }
|
||||||
|
ALWAYS_INLINE bool hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return access->hasGrantOption(context, flags, database, table, columns); }
|
||||||
|
ALWAYS_INLINE bool hasGrantOption(const AccessRightsElement & element) const { return access->hasGrantOption(context, element); }
|
||||||
|
ALWAYS_INLINE bool hasGrantOption(const AccessRightsElements & elements) const { return access->hasGrantOption(context, elements); }
|
||||||
|
|
||||||
|
/// Checks if a specified role is granted with admin option, and throws an exception if not.
|
||||||
|
ALWAYS_INLINE void checkAdminOption(const UUID & role_id) const { access->checkAdminOption(context, role_id); }
|
||||||
|
ALWAYS_INLINE void checkAdminOption(const UUID & role_id, const String & role_name) const { access->checkAdminOption(context, role_id, role_name); }
|
||||||
|
ALWAYS_INLINE void checkAdminOption(const UUID & role_id, const std::unordered_map<UUID, String> & names_of_roles) const { access->checkAdminOption(context, role_id, names_of_roles); }
|
||||||
|
ALWAYS_INLINE void checkAdminOption(const std::vector<UUID> & role_ids) const { access->checkAdminOption(context, role_ids); }
|
||||||
|
ALWAYS_INLINE void checkAdminOption(const std::vector<UUID> & role_ids, const Strings & names_of_roles) const { access->checkAdminOption(context, role_ids, names_of_roles); }
|
||||||
|
ALWAYS_INLINE void checkAdminOption(const std::vector<UUID> & role_ids, const std::unordered_map<UUID, String> & names_of_roles) const { access->checkAdminOption(context, role_ids, names_of_roles); }
|
||||||
|
|
||||||
|
/// Checks if a specified role is granted with admin option, and returns false if not.
|
||||||
|
ALWAYS_INLINE bool hasAdminOption(const UUID & role_id) const { return access->hasAdminOption(context, role_id); }
|
||||||
|
ALWAYS_INLINE bool hasAdminOption(const UUID & role_id, const String & role_name) const { return access->hasAdminOption(context, role_id, role_name); }
|
||||||
|
ALWAYS_INLINE bool hasAdminOption(const UUID & role_id, const std::unordered_map<UUID, String> & names_of_roles) const { return access->hasAdminOption(context, role_id, names_of_roles); }
|
||||||
|
ALWAYS_INLINE bool hasAdminOption(const std::vector<UUID> & role_ids) const { return access->hasAdminOption(context, role_ids); }
|
||||||
|
ALWAYS_INLINE bool hasAdminOption(const std::vector<UUID> & role_ids, const Strings & names_of_roles) const { return access->hasAdminOption(context, role_ids, names_of_roles); }
|
||||||
|
ALWAYS_INLINE bool hasAdminOption(const std::vector<UUID> & role_ids, const std::unordered_map<UUID, String> & names_of_roles) const { return access->hasAdminOption(context, role_ids, names_of_roles); }
|
||||||
|
|
||||||
|
/// Checks if a grantee is allowed for the current user, throws an exception if not.
|
||||||
|
ALWAYS_INLINE void checkGranteeIsAllowed(const UUID & grantee_id, const IAccessEntity & grantee) const { access->checkGranteeIsAllowed(grantee_id, grantee); }
|
||||||
|
/// Checks if grantees are allowed for the current user, throws an exception if not.
|
||||||
|
ALWAYS_INLINE void checkGranteesAreAllowed(const std::vector<UUID> & grantee_ids) const { access->checkGranteesAreAllowed(grantee_ids); }
|
||||||
|
|
||||||
|
private:
|
||||||
|
ContextAccessPtr access;
|
||||||
|
ContextPtr context;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -15,22 +15,8 @@ namespace ErrorCodes
|
|||||||
|
|
||||||
bool operator==(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs)
|
bool operator==(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs)
|
||||||
{
|
{
|
||||||
if (lhs.settings != rhs.settings)
|
return std::tie(lhs.settings, lhs.constraints, lhs.profiles, lhs.profiles_with_implicit, lhs.names_of_profiles)
|
||||||
return false;
|
== std::tie(rhs.settings, rhs.constraints, rhs.profiles, rhs.profiles_with_implicit, rhs.names_of_profiles);
|
||||||
|
|
||||||
if (lhs.constraints != rhs.constraints)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (lhs.profiles != rhs.profiles)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (lhs.profiles_with_implicit != rhs.profiles_with_implicit)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
if (lhs.names_of_profiles != rhs.names_of_profiles)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<const SettingsConstraintsAndProfileIDs>
|
std::shared_ptr<const SettingsConstraintsAndProfileIDs>
|
||||||
@ -66,18 +52,20 @@ Strings SettingsProfilesInfo::getProfileNames() const
|
|||||||
{
|
{
|
||||||
Strings result;
|
Strings result;
|
||||||
result.reserve(profiles.size());
|
result.reserve(profiles.size());
|
||||||
for (const auto & profile_id : profiles)
|
for (const UUID & profile_uuid : profiles)
|
||||||
{
|
{
|
||||||
const auto p = names_of_profiles.find(profile_id);
|
const auto names_it = names_of_profiles.find(profile_uuid);
|
||||||
if (p != names_of_profiles.end())
|
if (names_it != names_of_profiles.end())
|
||||||
result.push_back(p->second);
|
{
|
||||||
|
result.push_back(names_it->second);
|
||||||
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (const auto name = access_control.tryReadName(profile_id))
|
if (const auto name = access_control.tryReadName(profile_uuid))
|
||||||
// We could've updated cache here, but it is a very rare case, so don't bother.
|
// We could've updated cache here, but it is a very rare case, so don't bother.
|
||||||
result.push_back(*name);
|
result.push_back(*name);
|
||||||
else
|
else
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unable to get profile name for {}", toString(profile_id));
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unable to get profile name for {}", toString(profile_uuid));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,7 +29,11 @@ struct SettingsProfilesInfo
|
|||||||
/// Names of all the profiles in `profiles`.
|
/// Names of all the profiles in `profiles`.
|
||||||
std::unordered_map<UUID, String> names_of_profiles;
|
std::unordered_map<UUID, String> names_of_profiles;
|
||||||
|
|
||||||
explicit SettingsProfilesInfo(const AccessControl & access_control_) : constraints(access_control_), access_control(access_control_) {}
|
explicit SettingsProfilesInfo(const AccessControl & access_control_)
|
||||||
|
: constraints(access_control_), access_control(access_control_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
std::shared_ptr<const SettingsConstraintsAndProfileIDs> getConstraintsAndProfileIDs(
|
std::shared_ptr<const SettingsConstraintsAndProfileIDs> getConstraintsAndProfileIDs(
|
||||||
const std::shared_ptr<const SettingsConstraintsAndProfileIDs> & previous = nullptr) const;
|
const std::shared_ptr<const SettingsConstraintsAndProfileIDs> & previous = nullptr) const;
|
||||||
|
|
||||||
|
@ -228,6 +228,11 @@ public:
|
|||||||
return prefix_size + nested_func->sizeOfData();
|
return prefix_size + nested_func->sizeOfData();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t alignOfData() const override
|
||||||
|
{
|
||||||
|
return std::max(alignof(Data), nested_func->alignOfData());
|
||||||
|
}
|
||||||
|
|
||||||
void create(AggregateDataPtr __restrict place) const override
|
void create(AggregateDataPtr __restrict place) const override
|
||||||
{
|
{
|
||||||
new (place) Data;
|
new (place) Data;
|
||||||
|
@ -985,18 +985,18 @@ std::string QueryAnalyzer::rewriteAggregateFunctionNameIfNeeded(
|
|||||||
{
|
{
|
||||||
result_aggregate_function_name = settings.count_distinct_implementation;
|
result_aggregate_function_name = settings.count_distinct_implementation;
|
||||||
}
|
}
|
||||||
else if (aggregate_function_name_lowercase == "countdistinctif" || aggregate_function_name_lowercase == "countifdistinct")
|
else if (aggregate_function_name_lowercase == "countifdistinct" ||
|
||||||
|
(settings.rewrite_count_distinct_if_with_count_distinct_implementation && aggregate_function_name_lowercase == "countdistinctif"))
|
||||||
{
|
{
|
||||||
result_aggregate_function_name = settings.count_distinct_implementation;
|
result_aggregate_function_name = settings.count_distinct_implementation;
|
||||||
result_aggregate_function_name += "If";
|
result_aggregate_function_name += "If";
|
||||||
}
|
}
|
||||||
|
else if (aggregate_function_name_lowercase.ends_with("ifdistinct"))
|
||||||
/// Replace aggregateFunctionIfDistinct into aggregateFunctionDistinctIf to make execution more optimal
|
|
||||||
if (result_aggregate_function_name.ends_with("ifdistinct"))
|
|
||||||
{
|
{
|
||||||
|
/// Replace aggregateFunctionIfDistinct into aggregateFunctionDistinctIf to make execution more optimal
|
||||||
size_t prefix_length = result_aggregate_function_name.size() - strlen("ifdistinct");
|
size_t prefix_length = result_aggregate_function_name.size() - strlen("ifdistinct");
|
||||||
result_aggregate_function_name = result_aggregate_function_name.substr(0, prefix_length) + "DistinctIf";
|
result_aggregate_function_name = result_aggregate_function_name.substr(0, prefix_length) + "DistinctIf";
|
||||||
}
|
}
|
||||||
|
|
||||||
bool need_add_or_null = settings.aggregate_functions_null_for_empty && !result_aggregate_function_name.ends_with("OrNull");
|
bool need_add_or_null = settings.aggregate_functions_null_for_empty && !result_aggregate_function_name.ends_with("OrNull");
|
||||||
if (need_add_or_null)
|
if (need_add_or_null)
|
||||||
|
@ -9,6 +9,8 @@
|
|||||||
#include <Interpreters/convertFieldToType.h>
|
#include <Interpreters/convertFieldToType.h>
|
||||||
#include <Interpreters/Set.h>
|
#include <Interpreters/Set.h>
|
||||||
|
|
||||||
|
#include <Common/assert_cast.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -54,8 +56,9 @@ size_t getCompoundTypeDepth(const IDataType & type)
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename Collection>
|
template <typename Collection>
|
||||||
Block createBlockFromCollection(const Collection & collection, const DataTypes & block_types, bool transform_null_in)
|
Block createBlockFromCollection(const Collection & collection, const DataTypes& value_types, const DataTypes & block_types, bool transform_null_in)
|
||||||
{
|
{
|
||||||
|
assert(collection.size() == value_types.size());
|
||||||
size_t columns_size = block_types.size();
|
size_t columns_size = block_types.size();
|
||||||
MutableColumns columns(columns_size);
|
MutableColumns columns(columns_size);
|
||||||
for (size_t i = 0; i < columns_size; ++i)
|
for (size_t i = 0; i < columns_size; ++i)
|
||||||
@ -66,13 +69,17 @@ Block createBlockFromCollection(const Collection & collection, const DataTypes &
|
|||||||
|
|
||||||
Row tuple_values;
|
Row tuple_values;
|
||||||
|
|
||||||
for (const auto & value : collection)
|
for (size_t collection_index = 0; collection_index < collection.size(); ++collection_index)
|
||||||
{
|
{
|
||||||
|
const auto & value = collection[collection_index];
|
||||||
if (columns_size == 1)
|
if (columns_size == 1)
|
||||||
{
|
{
|
||||||
auto field = convertFieldToTypeStrict(value, *block_types[0]);
|
const DataTypePtr & data_type = value_types[collection_index];
|
||||||
|
auto field = convertFieldToTypeStrict(value, *data_type, *block_types[0]);
|
||||||
if (!field)
|
if (!field)
|
||||||
|
{
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
bool need_insert_null = transform_null_in && block_types[0]->isNullable();
|
bool need_insert_null = transform_null_in && block_types[0]->isNullable();
|
||||||
if (!field->isNull() || need_insert_null)
|
if (!field->isNull() || need_insert_null)
|
||||||
@ -87,6 +94,9 @@ Block createBlockFromCollection(const Collection & collection, const DataTypes &
|
|||||||
value.getTypeName());
|
value.getTypeName());
|
||||||
|
|
||||||
const auto & tuple = value.template get<const Tuple &>();
|
const auto & tuple = value.template get<const Tuple &>();
|
||||||
|
const DataTypePtr & value_type = value_types[collection_index];
|
||||||
|
const DataTypes & tuple_value_type = typeid_cast<const DataTypeTuple *>(value_type.get())->getElements();
|
||||||
|
|
||||||
size_t tuple_size = tuple.size();
|
size_t tuple_size = tuple.size();
|
||||||
|
|
||||||
if (tuple_size != columns_size)
|
if (tuple_size != columns_size)
|
||||||
@ -101,7 +111,7 @@ Block createBlockFromCollection(const Collection & collection, const DataTypes &
|
|||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
for (; i < tuple_size; ++i)
|
for (; i < tuple_size; ++i)
|
||||||
{
|
{
|
||||||
auto converted_field = convertFieldToTypeStrict(tuple[i], *block_types[i]);
|
auto converted_field = convertFieldToTypeStrict(tuple[i], *tuple_value_type[i], *block_types[i]);
|
||||||
if (!converted_field)
|
if (!converted_field)
|
||||||
break;
|
break;
|
||||||
tuple_values[i] = std::move(*converted_field);
|
tuple_values[i] = std::move(*converted_field);
|
||||||
@ -147,20 +157,28 @@ Block getSetElementsForConstantValue(const DataTypePtr & expression_type, const
|
|||||||
if (lhs_type_depth == rhs_type_depth)
|
if (lhs_type_depth == rhs_type_depth)
|
||||||
{
|
{
|
||||||
/// 1 in 1; (1, 2) in (1, 2); identity(tuple(tuple(tuple(1)))) in tuple(tuple(tuple(1))); etc.
|
/// 1 in 1; (1, 2) in (1, 2); identity(tuple(tuple(tuple(1)))) in tuple(tuple(tuple(1))); etc.
|
||||||
|
|
||||||
Array array{value};
|
Array array{value};
|
||||||
result_block = createBlockFromCollection(array, set_element_types, transform_null_in);
|
DataTypes value_types{value_type};
|
||||||
|
result_block = createBlockFromCollection(array, value_types, set_element_types, transform_null_in);
|
||||||
}
|
}
|
||||||
else if (lhs_type_depth + 1 == rhs_type_depth)
|
else if (lhs_type_depth + 1 == rhs_type_depth)
|
||||||
{
|
{
|
||||||
/// 1 in (1, 2); (1, 2) in ((1, 2), (3, 4))
|
/// 1 in (1, 2); (1, 2) in ((1, 2), (3, 4))
|
||||||
|
|
||||||
WhichDataType rhs_which_type(value_type);
|
WhichDataType rhs_which_type(value_type);
|
||||||
|
|
||||||
if (rhs_which_type.isArray())
|
if (rhs_which_type.isArray())
|
||||||
result_block = createBlockFromCollection(value.get<const Array &>(), set_element_types, transform_null_in);
|
{
|
||||||
|
const DataTypeArray * value_array_type = assert_cast<const DataTypeArray *>(value_type.get());
|
||||||
|
size_t value_array_size = value.get<const Array &>().size();
|
||||||
|
DataTypes value_types(value_array_size, value_array_type->getNestedType());
|
||||||
|
result_block = createBlockFromCollection(value.get<const Array &>(), value_types, set_element_types, transform_null_in);
|
||||||
|
}
|
||||||
else if (rhs_which_type.isTuple())
|
else if (rhs_which_type.isTuple())
|
||||||
result_block = createBlockFromCollection(value.get<const Tuple &>(), set_element_types, transform_null_in);
|
{
|
||||||
|
const DataTypeTuple * value_tuple_type = assert_cast<const DataTypeTuple *>(value_type.get());
|
||||||
|
const DataTypes & value_types = value_tuple_type->getElements();
|
||||||
|
result_block = createBlockFromCollection(value.get<const Tuple &>(), value_types, set_element_types, transform_null_in);
|
||||||
|
}
|
||||||
else
|
else
|
||||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||||
"Unsupported type at the right-side of IN. Expected Array or Tuple. Actual {}",
|
"Unsupported type at the right-side of IN. Expected Array or Tuple. Actual {}",
|
||||||
|
@ -44,13 +44,12 @@
|
|||||||
#include <Parsers/ASTIdentifier.h>
|
#include <Parsers/ASTIdentifier.h>
|
||||||
#include <Parsers/ASTColumnDeclaration.h>
|
#include <Parsers/ASTColumnDeclaration.h>
|
||||||
#include <Parsers/ASTFunction.h>
|
#include <Parsers/ASTFunction.h>
|
||||||
#include <Parsers/Kusto/ParserKQLStatement.h>
|
|
||||||
#include <Parsers/PRQL/ParserPRQLQuery.h>
|
#include <Parsers/PRQL/ParserPRQLQuery.h>
|
||||||
|
#include <Parsers/Kusto/ParserKQLStatement.h>
|
||||||
#include <Parsers/Kusto/parseKQLQuery.h>
|
#include <Parsers/Kusto/parseKQLQuery.h>
|
||||||
|
|
||||||
#include <Processors/Formats/Impl/NullFormat.h>
|
#include <Processors/Formats/Impl/NullFormat.h>
|
||||||
#include <Processors/Formats/IInputFormat.h>
|
#include <Processors/Formats/IInputFormat.h>
|
||||||
#include <Processors/Formats/IOutputFormat.h>
|
|
||||||
#include <Processors/QueryPlan/QueryPlan.h>
|
#include <Processors/QueryPlan/QueryPlan.h>
|
||||||
#include <Processors/QueryPlan/BuildQueryPipelineSettings.h>
|
#include <Processors/QueryPlan/BuildQueryPipelineSettings.h>
|
||||||
#include <Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h>
|
#include <Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h>
|
||||||
|
@ -256,6 +256,17 @@ void HedgedConnections::sendCancel()
|
|||||||
if (!sent_query || cancelled)
|
if (!sent_query || cancelled)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot cancel. Either no query sent or already cancelled.");
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot cancel. Either no query sent or already cancelled.");
|
||||||
|
|
||||||
|
/// All hedged connections should be stopped, since otherwise before the
|
||||||
|
/// HedgedConnectionsFactory will be destroyed (that will happen from
|
||||||
|
/// QueryPipeline dtor) they could still do some work.
|
||||||
|
/// And not only this does not make sense, but it also could lead to
|
||||||
|
/// use-after-free of the current_thread, since the thread from which they
|
||||||
|
/// had been created differs from the thread where the dtor of
|
||||||
|
/// QueryPipeline will be called and the initial thread could be already
|
||||||
|
/// destroyed (especially when the system is under pressure).
|
||||||
|
if (hedged_connections_factory.hasEventsInProcess())
|
||||||
|
hedged_connections_factory.stopChoosingReplicas();
|
||||||
|
|
||||||
cancelled = true;
|
cancelled = true;
|
||||||
|
|
||||||
for (auto & offset_status : offset_states)
|
for (auto & offset_status : offset_states)
|
||||||
|
@ -602,6 +602,8 @@
|
|||||||
M(721, DEPRECATED_FUNCTION) \
|
M(721, DEPRECATED_FUNCTION) \
|
||||||
M(722, ASYNC_LOAD_WAIT_FAILED) \
|
M(722, ASYNC_LOAD_WAIT_FAILED) \
|
||||||
M(723, PARQUET_EXCEPTION) \
|
M(723, PARQUET_EXCEPTION) \
|
||||||
|
M(724, TOO_MANY_TABLES) \
|
||||||
|
M(725, TOO_MANY_DATABASES) \
|
||||||
\
|
\
|
||||||
M(900, DISTRIBUTED_CACHE_ERROR) \
|
M(900, DISTRIBUTED_CACHE_ERROR) \
|
||||||
M(901, CANNOT_USE_DISTRIBUTED_CACHE) \
|
M(901, CANNOT_USE_DISTRIBUTED_CACHE) \
|
||||||
|
161
src/Common/HilbertUtils.h
Normal file
161
src/Common/HilbertUtils.h
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Core/Types.h>
|
||||||
|
#include <Common/BitHelpers.h>
|
||||||
|
#include "base/types.h"
|
||||||
|
#include <Functions/hilbertDecode2DLUT.h>
|
||||||
|
#include <base/defines.h>
|
||||||
|
#include <array>
|
||||||
|
#include <set>
|
||||||
|
|
||||||
|
|
||||||
|
namespace HilbertDetails
|
||||||
|
{
|
||||||
|
|
||||||
|
struct Segment // represents [begin; end], all bounds are included
|
||||||
|
{
|
||||||
|
UInt64 begin;
|
||||||
|
UInt64 end;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Given the range of values of hilbert code - and this function will return segments of the Hilbert curve
|
||||||
|
such that each of them lies in a whole domain (aka square)
|
||||||
|
0 1
|
||||||
|
┌────────────────────────────────┐
|
||||||
|
│ │ │
|
||||||
|
│ │ │
|
||||||
|
0 │ 00xxx │ 11xxx │
|
||||||
|
│ | │ | │
|
||||||
|
│ | │ | │
|
||||||
|
│_______________│________________│
|
||||||
|
│ | │ | │
|
||||||
|
│ | │ | │
|
||||||
|
│ | │ | │
|
||||||
|
1 │ 01xxx______│_____10xxx │
|
||||||
|
│ │ │
|
||||||
|
│ │ │
|
||||||
|
└────────────────────────────────┘
|
||||||
|
Imagine a square, one side of which is a x-axis, other is a y-axis.
|
||||||
|
First approximation of the Hilbert curve is on the picture - U curve.
|
||||||
|
So we divide Hilbert Code Interval on 4 parts each of which is represented by a square
|
||||||
|
and look where the given interval [start, finish] is located:
|
||||||
|
[00xxxxxx | 01xxxxxx | 10xxxxxx | 11xxxxxx ]
|
||||||
|
1: [ ]
|
||||||
|
start = 0010111 end = 10111110
|
||||||
|
2: [ ] [ ]
|
||||||
|
If it contains a whole sector (that represents a domain=square),
|
||||||
|
then we take this range. In the example above - it is a sector [01000000, 01111111]
|
||||||
|
Then we dig into the recursion and check the remaining ranges.
|
||||||
|
Note that after the first call all other ranges in the recursion will have either start or finish on the end of a range,
|
||||||
|
so the complexity of the algorithm will be O(logN), where N is the maximum of hilbert code.
|
||||||
|
*/
|
||||||
|
template <typename F>
|
||||||
|
void segmentBinaryPartition(UInt64 start, UInt64 finish, UInt8 current_bits, F && callback)
|
||||||
|
{
|
||||||
|
if (current_bits == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
const auto next_bits = current_bits - 2;
|
||||||
|
const auto history = current_bits == 64 ? 0 : (start >> current_bits) << current_bits;
|
||||||
|
|
||||||
|
const auto chunk_mask = 0b11;
|
||||||
|
const auto start_chunk = (start >> next_bits) & chunk_mask;
|
||||||
|
const auto finish_chunk = (finish >> next_bits) & chunk_mask;
|
||||||
|
|
||||||
|
auto construct_range = [next_bits, history](UInt64 chunk)
|
||||||
|
{
|
||||||
|
return HilbertDetails::Segment{
|
||||||
|
.begin = history + (chunk << next_bits),
|
||||||
|
.end = history + ((chunk + 1) << next_bits) - 1
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
if (start_chunk == finish_chunk)
|
||||||
|
{
|
||||||
|
if ((finish - start + 1) == (1 << next_bits)) // it means that [begin, end] is a range
|
||||||
|
{
|
||||||
|
callback(HilbertDetails::Segment{.begin = start, .end = finish});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
segmentBinaryPartition(start, finish, next_bits, callback);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto range_chunk = start_chunk + 1; range_chunk < finish_chunk; ++range_chunk)
|
||||||
|
{
|
||||||
|
callback(construct_range(range_chunk));
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto start_range = construct_range(start_chunk);
|
||||||
|
if (start == start_range.begin)
|
||||||
|
{
|
||||||
|
callback(start_range);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
segmentBinaryPartition(start, start_range.end, next_bits, callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto finish_range = construct_range(finish_chunk);
|
||||||
|
if (finish == finish_range.end)
|
||||||
|
{
|
||||||
|
callback(finish_range);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
segmentBinaryPartition(finish_range.begin, finish, next_bits, callback);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given 2 points representing ends of the range of Hilbert Curve that lies in a whole domain.
|
||||||
|
// The are neighbour corners of some square - and the function returns ranges of both sides of this square
|
||||||
|
inline std::array<std::pair<UInt64, UInt64>, 2> createRangeFromCorners(UInt64 x1, UInt64 y1, UInt64 x2, UInt64 y2)
|
||||||
|
{
|
||||||
|
UInt64 dist_x = x1 > x2 ? x1 - x2 : x2 - x1;
|
||||||
|
UInt64 dist_y = y1 > y2 ? y1 - y2 : y2 - y1;
|
||||||
|
UInt64 range_size = std::max(dist_x, dist_y);
|
||||||
|
bool contains_minimum_vertice = x1 % (range_size + 1) == 0;
|
||||||
|
if (contains_minimum_vertice)
|
||||||
|
{
|
||||||
|
UInt64 x_min = std::min(x1, x2);
|
||||||
|
UInt64 y_min = std::min(y1, y2);
|
||||||
|
return {
|
||||||
|
std::pair<UInt64, UInt64>{x_min, x_min + range_size},
|
||||||
|
std::pair<UInt64, UInt64>{y_min, y_min + range_size}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
UInt64 x_max = std::max(x1, x2);
|
||||||
|
UInt64 y_max = std::max(y1, y2);
|
||||||
|
chassert(x_max >= range_size);
|
||||||
|
chassert(y_max >= range_size);
|
||||||
|
return {
|
||||||
|
std::pair<UInt64, UInt64>{x_max - range_size, x_max},
|
||||||
|
std::pair<UInt64, UInt64>{y_max - range_size, y_max}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Unpack an interval of Hilbert curve to hyperrectangles covered by it across N dimensions.
|
||||||
|
*/
|
||||||
|
template <typename F>
|
||||||
|
void hilbertIntervalToHyperrectangles2D(UInt64 first, UInt64 last, F && callback)
|
||||||
|
{
|
||||||
|
const auto equal_bits_count = getLeadingZeroBits(last | first);
|
||||||
|
const auto even_equal_bits_count = equal_bits_count - equal_bits_count % 2;
|
||||||
|
segmentBinaryPartition(first, last, 64 - even_equal_bits_count, [&](HilbertDetails::Segment range)
|
||||||
|
{
|
||||||
|
auto interval1 = DB::FunctionHilbertDecode2DWIthLookupTableImpl<3>::decode(range.begin);
|
||||||
|
auto interval2 = DB::FunctionHilbertDecode2DWIthLookupTableImpl<3>::decode(range.end);
|
||||||
|
|
||||||
|
std::array<std::pair<UInt64, UInt64>, 2> unpacked_range = createRangeFromCorners(
|
||||||
|
std::get<0>(interval1), std::get<1>(interval1),
|
||||||
|
std::get<0>(interval2), std::get<1>(interval2));
|
||||||
|
|
||||||
|
callback(unpacked_range);
|
||||||
|
});
|
||||||
|
}
|
@ -11,10 +11,10 @@
|
|||||||
#include <Poco/Util/XMLConfiguration.h>
|
#include <Poco/Util/XMLConfiguration.h>
|
||||||
|
|
||||||
#include <boost/noncopyable.hpp>
|
#include <boost/noncopyable.hpp>
|
||||||
|
#include <boost/intrusive/list.hpp>
|
||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <deque>
|
#include <deque>
|
||||||
#include <queue>
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
@ -30,6 +30,8 @@ namespace ErrorCodes
|
|||||||
}
|
}
|
||||||
|
|
||||||
class ISchedulerNode;
|
class ISchedulerNode;
|
||||||
|
class EventQueue;
|
||||||
|
using EventId = UInt64;
|
||||||
|
|
||||||
inline const Poco::Util::AbstractConfiguration & emptyConfig()
|
inline const Poco::Util::AbstractConfiguration & emptyConfig()
|
||||||
{
|
{
|
||||||
@ -82,6 +84,115 @@ struct SchedulerNodeInfo
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Node of hierarchy for scheduling requests for resource. Base class for all
|
||||||
|
* kinds of scheduling elements (queues, policies, constraints and schedulers).
|
||||||
|
*
|
||||||
|
* Root node is a scheduler, which has it's thread to dequeue requests,
|
||||||
|
* execute requests (see ResourceRequest) and process events in a thread-safe manner.
|
||||||
|
* Immediate children of the scheduler represent independent resources.
|
||||||
|
* Each resource has it's own hierarchy to achieve required scheduling policies.
|
||||||
|
* Non-leaf nodes do not hold requests, but keep scheduling state
|
||||||
|
* (e.g. consumption history, amount of in-flight requests, etc).
|
||||||
|
* Leafs of hierarchy are queues capable of holding pending requests.
|
||||||
|
*
|
||||||
|
* scheduler (SchedulerRoot)
|
||||||
|
* / \
|
||||||
|
* constraint constraint (SemaphoreConstraint)
|
||||||
|
* | |
|
||||||
|
* policy policy (PriorityPolicy)
|
||||||
|
* / \ / \
|
||||||
|
* q1 q2 q3 q4 (FifoQueue)
|
||||||
|
*
|
||||||
|
* Dequeueing request from an inner node will dequeue request from one of active leaf-queues in its subtree.
|
||||||
|
* Node is considered to be active iff:
|
||||||
|
* - it has at least one pending request in one of leaves of it's subtree;
|
||||||
|
* - and enforced constraints, if any, are satisfied
|
||||||
|
* (e.g. amount of concurrent requests is not greater than some number).
|
||||||
|
*
|
||||||
|
* All methods must be called only from scheduler thread for thread-safety.
|
||||||
|
*/
|
||||||
|
class ISchedulerNode : public boost::intrusive::list_base_hook<>, private boost::noncopyable
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit ISchedulerNode(EventQueue * event_queue_, const Poco::Util::AbstractConfiguration & config = emptyConfig(), const String & config_prefix = {})
|
||||||
|
: event_queue(event_queue_)
|
||||||
|
, info(config, config_prefix)
|
||||||
|
{}
|
||||||
|
|
||||||
|
virtual ~ISchedulerNode() = default;
|
||||||
|
|
||||||
|
/// Checks if two nodes configuration is equal
|
||||||
|
virtual bool equals(ISchedulerNode * other)
|
||||||
|
{
|
||||||
|
return info.equals(other->info);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attach new child
|
||||||
|
virtual void attachChild(const std::shared_ptr<ISchedulerNode> & child) = 0;
|
||||||
|
|
||||||
|
/// Detach and destroy child
|
||||||
|
virtual void removeChild(ISchedulerNode * child) = 0;
|
||||||
|
|
||||||
|
/// Get attached child by name
|
||||||
|
virtual ISchedulerNode * getChild(const String & child_name) = 0;
|
||||||
|
|
||||||
|
/// Activation of child due to the first pending request
|
||||||
|
/// Should be called on leaf node (i.e. queue) to propagate activation signal through chain to the root
|
||||||
|
virtual void activateChild(ISchedulerNode * child) = 0;
|
||||||
|
|
||||||
|
/// Returns true iff node is active
|
||||||
|
virtual bool isActive() = 0;
|
||||||
|
|
||||||
|
/// Returns number of active children
|
||||||
|
virtual size_t activeChildren() = 0;
|
||||||
|
|
||||||
|
/// Returns the first request to be executed as the first component of resulting pair.
|
||||||
|
/// The second pair component is `true` iff node is still active after dequeueing.
|
||||||
|
virtual std::pair<ResourceRequest *, bool> dequeueRequest() = 0;
|
||||||
|
|
||||||
|
/// Returns full path string using names of every parent
|
||||||
|
String getPath()
|
||||||
|
{
|
||||||
|
String result;
|
||||||
|
ISchedulerNode * ptr = this;
|
||||||
|
while (ptr->parent)
|
||||||
|
{
|
||||||
|
result = "/" + ptr->basename + result;
|
||||||
|
ptr = ptr->parent;
|
||||||
|
}
|
||||||
|
return result.empty() ? "/" : result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attach to a parent (used by attachChild)
|
||||||
|
virtual void setParent(ISchedulerNode * parent_)
|
||||||
|
{
|
||||||
|
parent = parent_;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
/// Notify parents about the first pending request or constraint becoming satisfied.
|
||||||
|
/// Postponed to be handled in scheduler thread, so it is intended to be called from outside.
|
||||||
|
void scheduleActivation();
|
||||||
|
|
||||||
|
public:
|
||||||
|
EventQueue * const event_queue;
|
||||||
|
String basename;
|
||||||
|
SchedulerNodeInfo info;
|
||||||
|
ISchedulerNode * parent = nullptr;
|
||||||
|
EventId activation_event_id = 0; // Valid for `ISchedulerNode` placed in EventQueue::activations
|
||||||
|
|
||||||
|
/// Introspection
|
||||||
|
std::atomic<UInt64> dequeued_requests{0};
|
||||||
|
std::atomic<UInt64> canceled_requests{0};
|
||||||
|
std::atomic<ResourceCost> dequeued_cost{0};
|
||||||
|
std::atomic<ResourceCost> canceled_cost{0};
|
||||||
|
std::atomic<UInt64> busy_periods{0};
|
||||||
|
};
|
||||||
|
|
||||||
|
using SchedulerNodePtr = std::shared_ptr<ISchedulerNode>;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Simple waitable thread-safe FIFO task queue.
|
* Simple waitable thread-safe FIFO task queue.
|
||||||
* Intended to hold postponed events for later handling (usually by scheduler thread).
|
* Intended to hold postponed events for later handling (usually by scheduler thread).
|
||||||
@ -89,57 +200,70 @@ struct SchedulerNodeInfo
|
|||||||
class EventQueue
|
class EventQueue
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
using Event = std::function<void()>;
|
using Task = std::function<void()>;
|
||||||
|
|
||||||
|
static constexpr EventId not_postponed = 0;
|
||||||
|
|
||||||
using TimePoint = std::chrono::system_clock::time_point;
|
using TimePoint = std::chrono::system_clock::time_point;
|
||||||
using Duration = std::chrono::system_clock::duration;
|
using Duration = std::chrono::system_clock::duration;
|
||||||
static constexpr UInt64 not_postponed = 0;
|
|
||||||
|
struct Event
|
||||||
|
{
|
||||||
|
const EventId event_id;
|
||||||
|
Task task;
|
||||||
|
|
||||||
|
Event(EventId event_id_, Task && task_)
|
||||||
|
: event_id(event_id_)
|
||||||
|
, task(std::move(task_))
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
|
||||||
struct Postponed
|
struct Postponed
|
||||||
{
|
{
|
||||||
TimePoint key;
|
TimePoint key;
|
||||||
UInt64 id; // for canceling
|
EventId event_id; // for canceling
|
||||||
std::unique_ptr<Event> event;
|
std::unique_ptr<Task> task;
|
||||||
|
|
||||||
Postponed(TimePoint key_, UInt64 id_, Event && event_)
|
Postponed(TimePoint key_, EventId event_id_, Task && task_)
|
||||||
: key(key_)
|
: key(key_)
|
||||||
, id(id_)
|
, event_id(event_id_)
|
||||||
, event(std::make_unique<Event>(std::move(event_)))
|
, task(std::make_unique<Task>(std::move(task_)))
|
||||||
{}
|
{}
|
||||||
|
|
||||||
bool operator<(const Postponed & rhs) const
|
bool operator<(const Postponed & rhs) const
|
||||||
{
|
{
|
||||||
return std::tie(key, id) > std::tie(rhs.key, rhs.id); // reversed for min-heap
|
return std::tie(key, event_id) > std::tie(rhs.key, rhs.event_id); // reversed for min-heap
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Add an `event` to be processed after `until` time point.
|
/// Add an `event` to be processed after `until` time point.
|
||||||
/// Returns a unique id for canceling.
|
/// Returns a unique event id for canceling.
|
||||||
[[nodiscard]] UInt64 postpone(TimePoint until, Event && event)
|
[[nodiscard]] EventId postpone(TimePoint until, Task && task)
|
||||||
{
|
{
|
||||||
std::unique_lock lock{mutex};
|
std::unique_lock lock{mutex};
|
||||||
if (postponed.empty() || until < postponed.front().key)
|
if (postponed.empty() || until < postponed.front().key)
|
||||||
pending.notify_one();
|
pending.notify_one();
|
||||||
auto id = ++last_id;
|
auto event_id = ++last_event_id;
|
||||||
postponed.emplace_back(until, id, std::move(event));
|
postponed.emplace_back(until, event_id, std::move(task));
|
||||||
std::push_heap(postponed.begin(), postponed.end());
|
std::push_heap(postponed.begin(), postponed.end());
|
||||||
return id;
|
return event_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cancel a postponed event using its unique id.
|
/// Cancel a postponed event using its unique id.
|
||||||
/// NOTE: Only postponed events can be canceled.
|
/// NOTE: Only postponed events can be canceled.
|
||||||
/// NOTE: If you need to cancel enqueued event, consider doing your actions inside another enqueued
|
/// NOTE: If you need to cancel enqueued event, consider doing your actions inside another enqueued
|
||||||
/// NOTE: event instead. This ensures that all previous events are processed.
|
/// NOTE: event instead. This ensures that all previous events are processed.
|
||||||
bool cancelPostponed(UInt64 postponed_id)
|
bool cancelPostponed(EventId postponed_event_id)
|
||||||
{
|
{
|
||||||
if (postponed_id == not_postponed)
|
if (postponed_event_id == not_postponed)
|
||||||
return false;
|
return false;
|
||||||
std::unique_lock lock{mutex};
|
std::unique_lock lock{mutex};
|
||||||
for (auto i = postponed.begin(), e = postponed.end(); i != e; ++i)
|
for (auto i = postponed.begin(), e = postponed.end(); i != e; ++i)
|
||||||
{
|
{
|
||||||
if (i->id == postponed_id)
|
if (i->event_id == postponed_event_id)
|
||||||
{
|
{
|
||||||
postponed.erase(i);
|
postponed.erase(i);
|
||||||
// It is O(n), but we do not expect either big heaps or frequent cancels. So it is fine.
|
// It is O(n), but we do not expect neither big heaps nor frequent cancels. So it is fine.
|
||||||
std::make_heap(postponed.begin(), postponed.end());
|
std::make_heap(postponed.begin(), postponed.end());
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -148,11 +272,23 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Add an `event` for immediate processing
|
/// Add an `event` for immediate processing
|
||||||
void enqueue(Event && event)
|
void enqueue(Task && task)
|
||||||
{
|
{
|
||||||
std::unique_lock lock{mutex};
|
std::unique_lock lock{mutex};
|
||||||
bool was_empty = queue.empty();
|
bool was_empty = events.empty() && activations.empty();
|
||||||
queue.emplace_back(event);
|
auto event_id = ++last_event_id;
|
||||||
|
events.emplace_back(event_id, std::move(task));
|
||||||
|
if (was_empty)
|
||||||
|
pending.notify_one();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add an activation `event` for immediate processing. Activations use a separate queue for performance reasons.
|
||||||
|
void enqueueActivation(ISchedulerNode * node)
|
||||||
|
{
|
||||||
|
std::unique_lock lock{mutex};
|
||||||
|
bool was_empty = events.empty() && activations.empty();
|
||||||
|
node->activation_event_id = ++last_event_id;
|
||||||
|
activations.push_back(*node);
|
||||||
if (was_empty)
|
if (was_empty)
|
||||||
pending.notify_one();
|
pending.notify_one();
|
||||||
}
|
}
|
||||||
@ -163,7 +299,7 @@ public:
|
|||||||
bool forceProcess()
|
bool forceProcess()
|
||||||
{
|
{
|
||||||
std::unique_lock lock{mutex};
|
std::unique_lock lock{mutex};
|
||||||
if (!queue.empty())
|
if (!events.empty() || !activations.empty())
|
||||||
{
|
{
|
||||||
processQueue(std::move(lock));
|
processQueue(std::move(lock));
|
||||||
return true;
|
return true;
|
||||||
@ -181,7 +317,7 @@ public:
|
|||||||
bool tryProcess()
|
bool tryProcess()
|
||||||
{
|
{
|
||||||
std::unique_lock lock{mutex};
|
std::unique_lock lock{mutex};
|
||||||
if (!queue.empty())
|
if (!events.empty() || !activations.empty())
|
||||||
{
|
{
|
||||||
processQueue(std::move(lock));
|
processQueue(std::move(lock));
|
||||||
return true;
|
return true;
|
||||||
@ -205,7 +341,7 @@ public:
|
|||||||
std::unique_lock lock{mutex};
|
std::unique_lock lock{mutex};
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
if (!queue.empty())
|
if (!events.empty() || !activations.empty())
|
||||||
{
|
{
|
||||||
processQueue(std::move(lock));
|
processQueue(std::move(lock));
|
||||||
return;
|
return;
|
||||||
@ -269,141 +405,69 @@ private:
|
|||||||
|
|
||||||
void processQueue(std::unique_lock<std::mutex> && lock)
|
void processQueue(std::unique_lock<std::mutex> && lock)
|
||||||
{
|
{
|
||||||
Event event = std::move(queue.front());
|
if (events.empty())
|
||||||
queue.pop_front();
|
{
|
||||||
|
processActivation(std::move(lock));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (activations.empty())
|
||||||
|
{
|
||||||
|
processEvent(std::move(lock));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (activations.front().activation_event_id < events.front().event_id)
|
||||||
|
processActivation(std::move(lock));
|
||||||
|
else
|
||||||
|
processEvent(std::move(lock));
|
||||||
|
}
|
||||||
|
|
||||||
|
void processActivation(std::unique_lock<std::mutex> && lock)
|
||||||
|
{
|
||||||
|
ISchedulerNode * node = &activations.front();
|
||||||
|
activations.pop_front();
|
||||||
|
node->activation_event_id = 0;
|
||||||
lock.unlock(); // do not hold queue mutex while processing events
|
lock.unlock(); // do not hold queue mutex while processing events
|
||||||
event();
|
node->parent->activateChild(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
void processEvent(std::unique_lock<std::mutex> && lock)
|
||||||
|
{
|
||||||
|
Task task = std::move(events.front().task);
|
||||||
|
events.pop_front();
|
||||||
|
lock.unlock(); // do not hold queue mutex while processing events
|
||||||
|
task();
|
||||||
}
|
}
|
||||||
|
|
||||||
void processPostponed(std::unique_lock<std::mutex> && lock)
|
void processPostponed(std::unique_lock<std::mutex> && lock)
|
||||||
{
|
{
|
||||||
Event event = std::move(*postponed.front().event);
|
Task task = std::move(*postponed.front().task);
|
||||||
std::pop_heap(postponed.begin(), postponed.end());
|
std::pop_heap(postponed.begin(), postponed.end());
|
||||||
postponed.pop_back();
|
postponed.pop_back();
|
||||||
lock.unlock(); // do not hold queue mutex while processing events
|
lock.unlock(); // do not hold queue mutex while processing events
|
||||||
event();
|
task();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::mutex mutex;
|
std::mutex mutex;
|
||||||
std::condition_variable pending;
|
std::condition_variable pending;
|
||||||
std::deque<Event> queue;
|
|
||||||
|
// `events` and `activations` logically represent one ordered queue. To preserve the common order we use `EventId`
|
||||||
|
// Activations are stored in a separate queue for performance reasons (mostly to avoid any allocations)
|
||||||
|
std::deque<Event> events;
|
||||||
|
boost::intrusive::list<ISchedulerNode> activations;
|
||||||
|
|
||||||
std::vector<Postponed> postponed;
|
std::vector<Postponed> postponed;
|
||||||
UInt64 last_id = 0;
|
EventId last_event_id = 0;
|
||||||
|
|
||||||
std::atomic<TimePoint> manual_time{TimePoint()}; // for tests only
|
std::atomic<TimePoint> manual_time{TimePoint()}; // for tests only
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
inline void ISchedulerNode::scheduleActivation()
|
||||||
* Node of hierarchy for scheduling requests for resource. Base class for all
|
|
||||||
* kinds of scheduling elements (queues, policies, constraints and schedulers).
|
|
||||||
*
|
|
||||||
* Root node is a scheduler, which has it's thread to dequeue requests,
|
|
||||||
* execute requests (see ResourceRequest) and process events in a thread-safe manner.
|
|
||||||
* Immediate children of the scheduler represent independent resources.
|
|
||||||
* Each resource has it's own hierarchy to achieve required scheduling policies.
|
|
||||||
* Non-leaf nodes do not hold requests, but keep scheduling state
|
|
||||||
* (e.g. consumption history, amount of in-flight requests, etc).
|
|
||||||
* Leafs of hierarchy are queues capable of holding pending requests.
|
|
||||||
*
|
|
||||||
* scheduler (SchedulerRoot)
|
|
||||||
* / \
|
|
||||||
* constraint constraint (SemaphoreConstraint)
|
|
||||||
* | |
|
|
||||||
* policy policy (PriorityPolicy)
|
|
||||||
* / \ / \
|
|
||||||
* q1 q2 q3 q4 (FifoQueue)
|
|
||||||
*
|
|
||||||
* Dequeueing request from an inner node will dequeue request from one of active leaf-queues in its subtree.
|
|
||||||
* Node is considered to be active iff:
|
|
||||||
* - it has at least one pending request in one of leaves of it's subtree;
|
|
||||||
* - and enforced constraints, if any, are satisfied
|
|
||||||
* (e.g. amount of concurrent requests is not greater than some number).
|
|
||||||
*
|
|
||||||
* All methods must be called only from scheduler thread for thread-safety.
|
|
||||||
*/
|
|
||||||
class ISchedulerNode : private boost::noncopyable
|
|
||||||
{
|
{
|
||||||
public:
|
if (likely(parent))
|
||||||
explicit ISchedulerNode(EventQueue * event_queue_, const Poco::Util::AbstractConfiguration & config = emptyConfig(), const String & config_prefix = {})
|
|
||||||
: event_queue(event_queue_)
|
|
||||||
, info(config, config_prefix)
|
|
||||||
{}
|
|
||||||
|
|
||||||
virtual ~ISchedulerNode() = default;
|
|
||||||
|
|
||||||
/// Checks if two nodes configuration is equal
|
|
||||||
virtual bool equals(ISchedulerNode * other)
|
|
||||||
{
|
{
|
||||||
return info.equals(other->info);
|
// The same as `enqueue([this] { parent->activateChild(this); });` but faster
|
||||||
|
event_queue->enqueueActivation(this);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
/// Attach new child
|
|
||||||
virtual void attachChild(const std::shared_ptr<ISchedulerNode> & child) = 0;
|
|
||||||
|
|
||||||
/// Detach and destroy child
|
|
||||||
virtual void removeChild(ISchedulerNode * child) = 0;
|
|
||||||
|
|
||||||
/// Get attached child by name
|
|
||||||
virtual ISchedulerNode * getChild(const String & child_name) = 0;
|
|
||||||
|
|
||||||
/// Activation of child due to the first pending request
|
|
||||||
/// Should be called on leaf node (i.e. queue) to propagate activation signal through chain to the root
|
|
||||||
virtual void activateChild(ISchedulerNode * child) = 0;
|
|
||||||
|
|
||||||
/// Returns true iff node is active
|
|
||||||
virtual bool isActive() = 0;
|
|
||||||
|
|
||||||
/// Returns number of active children
|
|
||||||
virtual size_t activeChildren() = 0;
|
|
||||||
|
|
||||||
/// Returns the first request to be executed as the first component of resulting pair.
|
|
||||||
/// The second pair component is `true` iff node is still active after dequeueing.
|
|
||||||
virtual std::pair<ResourceRequest *, bool> dequeueRequest() = 0;
|
|
||||||
|
|
||||||
/// Returns full path string using names of every parent
|
|
||||||
String getPath()
|
|
||||||
{
|
|
||||||
String result;
|
|
||||||
ISchedulerNode * ptr = this;
|
|
||||||
while (ptr->parent)
|
|
||||||
{
|
|
||||||
result = "/" + ptr->basename + result;
|
|
||||||
ptr = ptr->parent;
|
|
||||||
}
|
|
||||||
return result.empty() ? "/" : result;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Attach to a parent (used by attachChild)
|
|
||||||
virtual void setParent(ISchedulerNode * parent_)
|
|
||||||
{
|
|
||||||
parent = parent_;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected:
|
|
||||||
/// Notify parents about the first pending request or constraint becoming satisfied.
|
|
||||||
/// Postponed to be handled in scheduler thread, so it is intended to be called from outside.
|
|
||||||
void scheduleActivation()
|
|
||||||
{
|
|
||||||
if (likely(parent))
|
|
||||||
{
|
|
||||||
event_queue->enqueue([this] { parent->activateChild(this); });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
EventQueue * const event_queue;
|
|
||||||
String basename;
|
|
||||||
SchedulerNodeInfo info;
|
|
||||||
ISchedulerNode * parent = nullptr;
|
|
||||||
|
|
||||||
/// Introspection
|
|
||||||
std::atomic<UInt64> dequeued_requests{0};
|
|
||||||
std::atomic<UInt64> canceled_requests{0};
|
|
||||||
std::atomic<ResourceCost> dequeued_cost{0};
|
|
||||||
std::atomic<ResourceCost> canceled_cost{0};
|
|
||||||
std::atomic<UInt64> busy_periods{0};
|
|
||||||
};
|
|
||||||
|
|
||||||
using SchedulerNodePtr = std::shared_ptr<ISchedulerNode>;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
143
src/Common/Scheduler/Nodes/tests/gtest_event_queue.cpp
Normal file
143
src/Common/Scheduler/Nodes/tests/gtest_event_queue.cpp
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
#include <chrono>
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include <Common/Scheduler/ISchedulerNode.h>
|
||||||
|
|
||||||
|
using namespace DB;
|
||||||
|
|
||||||
|
class FakeSchedulerNode : public ISchedulerNode
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit FakeSchedulerNode(String & log_, EventQueue * event_queue_, const Poco::Util::AbstractConfiguration & config = emptyConfig(), const String & config_prefix = {})
|
||||||
|
: ISchedulerNode(event_queue_, config, config_prefix)
|
||||||
|
, log(log_)
|
||||||
|
{}
|
||||||
|
|
||||||
|
void attachChild(const SchedulerNodePtr & child) override
|
||||||
|
{
|
||||||
|
log += " +" + child->basename;
|
||||||
|
}
|
||||||
|
|
||||||
|
void removeChild(ISchedulerNode * child) override
|
||||||
|
{
|
||||||
|
log += " -" + child->basename;
|
||||||
|
}
|
||||||
|
|
||||||
|
ISchedulerNode * getChild(const String & /* child_name */) override
|
||||||
|
{
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void activateChild(ISchedulerNode * child) override
|
||||||
|
{
|
||||||
|
log += " A" + child->basename;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isActive() override
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t activeChildren() override
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::pair<ResourceRequest *, bool> dequeueRequest() override
|
||||||
|
{
|
||||||
|
log += " D";
|
||||||
|
return {nullptr, false};
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
String & log;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct QueueTest {
|
||||||
|
String log;
|
||||||
|
EventQueue event_queue;
|
||||||
|
FakeSchedulerNode root_node;
|
||||||
|
|
||||||
|
QueueTest()
|
||||||
|
: root_node(log, &event_queue)
|
||||||
|
{}
|
||||||
|
|
||||||
|
SchedulerNodePtr makeNode(const String & name)
|
||||||
|
{
|
||||||
|
auto node = std::make_shared<FakeSchedulerNode>(log, &event_queue);
|
||||||
|
node->basename = name;
|
||||||
|
node->setParent(&root_node);
|
||||||
|
return std::static_pointer_cast<ISchedulerNode>(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
void process(EventQueue::TimePoint now, const String & expected_log, size_t limit = size_t(-1))
|
||||||
|
{
|
||||||
|
event_queue.setManualTime(now);
|
||||||
|
for (;limit > 0; limit--)
|
||||||
|
{
|
||||||
|
if (!event_queue.tryProcess())
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
EXPECT_EQ(log, expected_log);
|
||||||
|
log.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
void activate(const SchedulerNodePtr & node)
|
||||||
|
{
|
||||||
|
event_queue.enqueueActivation(node.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
void event(const String & text)
|
||||||
|
{
|
||||||
|
event_queue.enqueue([this, text] { log += " " + text; });
|
||||||
|
}
|
||||||
|
|
||||||
|
EventId postpone(EventQueue::TimePoint until, const String & text)
|
||||||
|
{
|
||||||
|
return event_queue.postpone(until, [this, text] { log += " " + text; });
|
||||||
|
}
|
||||||
|
|
||||||
|
void cancel(EventId event_id)
|
||||||
|
{
|
||||||
|
event_queue.cancelPostponed(event_id);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST(SchedulerEventQueue, Smoke)
|
||||||
|
{
|
||||||
|
QueueTest t;
|
||||||
|
|
||||||
|
using namespace std::chrono_literals;
|
||||||
|
|
||||||
|
EventQueue::TimePoint start = std::chrono::system_clock::now();
|
||||||
|
t.process(start, "", 0);
|
||||||
|
|
||||||
|
// Activations
|
||||||
|
auto node1 = t.makeNode("1");
|
||||||
|
auto node2 = t.makeNode("2");
|
||||||
|
t.activate(node2);
|
||||||
|
t.activate(node1);
|
||||||
|
t.process(start + 42s, " A2 A1");
|
||||||
|
|
||||||
|
// Events
|
||||||
|
t.event("E1");
|
||||||
|
t.event("E2");
|
||||||
|
t.process(start + 100s, " E1 E2");
|
||||||
|
|
||||||
|
// Postponed events
|
||||||
|
t.postpone(start + 200s, "P200");
|
||||||
|
auto p190 = t.postpone(start + 200s, "P190");
|
||||||
|
t.postpone(start + 150s, "P150");
|
||||||
|
t.postpone(start + 175s, "P175");
|
||||||
|
t.process(start + 180s, " P150 P175");
|
||||||
|
t.event("E3");
|
||||||
|
t.cancel(p190);
|
||||||
|
t.process(start + 300s, " E3 P200");
|
||||||
|
|
||||||
|
// Ordering of events and activations
|
||||||
|
t.event("E1");
|
||||||
|
t.activate(node1);
|
||||||
|
t.event("E2");
|
||||||
|
t.activate(node2);
|
||||||
|
t.process(start + 300s, " E1 A1 E2 A2");
|
||||||
|
}
|
@ -5,8 +5,6 @@
|
|||||||
|
|
||||||
#include <Common/Scheduler/Nodes/FairPolicy.h>
|
#include <Common/Scheduler/Nodes/FairPolicy.h>
|
||||||
#include <Common/Scheduler/Nodes/ThrottlerConstraint.h>
|
#include <Common/Scheduler/Nodes/ThrottlerConstraint.h>
|
||||||
#include "Common/Scheduler/ISchedulerNode.h"
|
|
||||||
#include "Common/Scheduler/ResourceRequest.h"
|
|
||||||
|
|
||||||
using namespace DB;
|
using namespace DB;
|
||||||
|
|
||||||
|
@ -609,7 +609,10 @@ void KeeperStorage::UncommittedState::commit(int64_t commit_zxid)
|
|||||||
uncommitted_auth.pop_front();
|
uncommitted_auth.pop_front();
|
||||||
if (uncommitted_auth.empty())
|
if (uncommitted_auth.empty())
|
||||||
session_and_auth.erase(add_auth->session_id);
|
session_and_auth.erase(add_auth->session_id);
|
||||||
|
}
|
||||||
|
else if (auto * close_session = std::get_if<CloseSessionDelta>(&front_delta.operation))
|
||||||
|
{
|
||||||
|
closed_sessions.erase(close_session->session_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
deltas.pop_front();
|
deltas.pop_front();
|
||||||
@ -682,6 +685,10 @@ void KeeperStorage::UncommittedState::rollback(int64_t rollback_zxid)
|
|||||||
session_and_auth.erase(add_auth->session_id);
|
session_and_auth.erase(add_auth->session_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else if (auto * close_session = std::get_if<CloseSessionDelta>(&delta_it->operation))
|
||||||
|
{
|
||||||
|
closed_sessions.erase(close_session->session_id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (delta_it == deltas.rend())
|
if (delta_it == deltas.rend())
|
||||||
@ -878,6 +885,10 @@ Coordination::Error KeeperStorage::commit(int64_t commit_zxid)
|
|||||||
session_and_auth[operation.session_id].emplace_back(std::move(operation.auth_id));
|
session_and_auth[operation.session_id].emplace_back(std::move(operation.auth_id));
|
||||||
return Coordination::Error::ZOK;
|
return Coordination::Error::ZOK;
|
||||||
}
|
}
|
||||||
|
else if constexpr (std::same_as<DeltaType, KeeperStorage::CloseSessionDelta>)
|
||||||
|
{
|
||||||
|
return Coordination::Error::ZOK;
|
||||||
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// shouldn't be called in any process functions
|
// shouldn't be called in any process functions
|
||||||
@ -2366,12 +2377,15 @@ void KeeperStorage::preprocessRequest(
|
|||||||
|
|
||||||
ephemerals.erase(session_ephemerals);
|
ephemerals.erase(session_ephemerals);
|
||||||
}
|
}
|
||||||
|
new_deltas.emplace_back(transaction.zxid, CloseSessionDelta{session_id});
|
||||||
|
uncommitted_state.closed_sessions.insert(session_id);
|
||||||
|
|
||||||
new_digest = calculateNodesDigest(new_digest, new_deltas);
|
new_digest = calculateNodesDigest(new_digest, new_deltas);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (check_acl && !request_processor->checkAuth(*this, session_id, false))
|
if ((check_acl && !request_processor->checkAuth(*this, session_id, false)) ||
|
||||||
|
uncommitted_state.closed_sessions.contains(session_id)) // Is session closed but not committed yet
|
||||||
{
|
{
|
||||||
uncommitted_state.deltas.emplace_back(new_last_zxid, Coordination::Error::ZNOAUTH);
|
uncommitted_state.deltas.emplace_back(new_last_zxid, Coordination::Error::ZNOAUTH);
|
||||||
return;
|
return;
|
||||||
|
@ -314,8 +314,13 @@ public:
|
|||||||
AuthID auth_id;
|
AuthID auth_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct CloseSessionDelta
|
||||||
|
{
|
||||||
|
int64_t session_id;
|
||||||
|
};
|
||||||
|
|
||||||
using Operation = std::
|
using Operation = std::
|
||||||
variant<CreateNodeDelta, RemoveNodeDelta, UpdateNodeDelta, SetACLDelta, AddAuthDelta, ErrorDelta, SubDeltaEnd, FailedMultiDelta>;
|
variant<CreateNodeDelta, RemoveNodeDelta, UpdateNodeDelta, SetACLDelta, AddAuthDelta, ErrorDelta, SubDeltaEnd, FailedMultiDelta, CloseSessionDelta>;
|
||||||
|
|
||||||
struct Delta
|
struct Delta
|
||||||
{
|
{
|
||||||
@ -351,6 +356,7 @@ public:
|
|||||||
std::shared_ptr<Node> tryGetNodeFromStorage(StringRef path) const;
|
std::shared_ptr<Node> tryGetNodeFromStorage(StringRef path) const;
|
||||||
|
|
||||||
std::unordered_map<int64_t, std::list<const AuthID *>> session_and_auth;
|
std::unordered_map<int64_t, std::list<const AuthID *>> session_and_auth;
|
||||||
|
std::unordered_set<int64_t> closed_sessions;
|
||||||
|
|
||||||
struct UncommittedNode
|
struct UncommittedNode
|
||||||
{
|
{
|
||||||
|
@ -2019,6 +2019,67 @@ TEST_P(CoordinationTest, TestCreateNodeWithAuthSchemeForAclWhenAuthIsPrecommitte
|
|||||||
EXPECT_EQ(acls[0].permissions, 31);
|
EXPECT_EQ(acls[0].permissions, 31);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_P(CoordinationTest, TestPreprocessWhenCloseSessionIsPrecommitted)
|
||||||
|
{
|
||||||
|
using namespace Coordination;
|
||||||
|
using namespace DB;
|
||||||
|
|
||||||
|
ChangelogDirTest snapshots("./snapshots");
|
||||||
|
setSnapshotDirectory("./snapshots");
|
||||||
|
ResponsesQueue queue(std::numeric_limits<size_t>::max());
|
||||||
|
SnapshotsQueue snapshots_queue{1};
|
||||||
|
int64_t session_id = 1;
|
||||||
|
size_t term = 0;
|
||||||
|
|
||||||
|
auto state_machine = std::make_shared<KeeperStateMachine>(queue, snapshots_queue, keeper_context, nullptr);
|
||||||
|
state_machine->init();
|
||||||
|
|
||||||
|
auto & storage = state_machine->getStorageUnsafe();
|
||||||
|
const auto & uncommitted_state = storage.uncommitted_state;
|
||||||
|
|
||||||
|
// Create first node for the session
|
||||||
|
String node_path_1 = "/node_1";
|
||||||
|
std::shared_ptr<ZooKeeperCreateRequest> create_req_1 = std::make_shared<ZooKeeperCreateRequest>();
|
||||||
|
create_req_1->path = node_path_1;
|
||||||
|
auto create_entry_1 = getLogEntryFromZKRequest(term, session_id, state_machine->getNextZxid(), create_req_1);
|
||||||
|
|
||||||
|
state_machine->pre_commit(1, create_entry_1->get_buf());
|
||||||
|
EXPECT_TRUE(uncommitted_state.nodes.contains(node_path_1));
|
||||||
|
|
||||||
|
state_machine->commit(1, create_entry_1->get_buf());
|
||||||
|
EXPECT_TRUE(storage.container.contains(node_path_1));
|
||||||
|
|
||||||
|
// Close session
|
||||||
|
std::shared_ptr<ZooKeeperCloseRequest> close_req = std::make_shared<ZooKeeperCloseRequest>();
|
||||||
|
auto close_entry = getLogEntryFromZKRequest(term, session_id, state_machine->getNextZxid(), close_req);
|
||||||
|
// Pre-commit close session
|
||||||
|
state_machine->pre_commit(2, close_entry->get_buf());
|
||||||
|
|
||||||
|
// Try to create second node after close session is pre-committed
|
||||||
|
String node_path_2 = "/node_2";
|
||||||
|
std::shared_ptr<ZooKeeperCreateRequest> create_req_2 = std::make_shared<ZooKeeperCreateRequest>();
|
||||||
|
create_req_2->path = node_path_2;
|
||||||
|
auto create_entry_2 = getLogEntryFromZKRequest(term, session_id, state_machine->getNextZxid(), create_req_2);
|
||||||
|
|
||||||
|
// Pre-commit creating second node
|
||||||
|
state_machine->pre_commit(3, create_entry_2->get_buf());
|
||||||
|
// Second node wasn't created
|
||||||
|
EXPECT_FALSE(uncommitted_state.nodes.contains(node_path_2));
|
||||||
|
|
||||||
|
// Rollback pre-committed closing session
|
||||||
|
state_machine->rollback(3, create_entry_2->get_buf());
|
||||||
|
state_machine->rollback(2, close_entry->get_buf());
|
||||||
|
|
||||||
|
// Pre-commit creating second node
|
||||||
|
state_machine->pre_commit(2, create_entry_2->get_buf());
|
||||||
|
// Now second node was created
|
||||||
|
EXPECT_TRUE(uncommitted_state.nodes.contains(node_path_2));
|
||||||
|
|
||||||
|
state_machine->commit(2, create_entry_2->get_buf());
|
||||||
|
EXPECT_TRUE(storage.container.contains(node_path_1));
|
||||||
|
EXPECT_TRUE(storage.container.contains(node_path_2));
|
||||||
|
}
|
||||||
|
|
||||||
TEST_P(CoordinationTest, TestSetACLWithAuthSchemeForAclWhenAuthIsPrecommitted)
|
TEST_P(CoordinationTest, TestSetACLWithAuthSchemeForAclWhenAuthIsPrecommitted)
|
||||||
{
|
{
|
||||||
using namespace Coordination;
|
using namespace Coordination;
|
||||||
|
@ -102,6 +102,8 @@ namespace DB
|
|||||||
M(UInt64, max_dictionary_num_to_warn, 1000lu, "If the number of dictionaries is greater than this value, the server will create a warning that will displayed to user.", 0) \
|
M(UInt64, max_dictionary_num_to_warn, 1000lu, "If the number of dictionaries is greater than this value, the server will create a warning that will displayed to user.", 0) \
|
||||||
M(UInt64, max_database_num_to_warn, 1000lu, "If the number of databases is greater than this value, the server will create a warning that will displayed to user.", 0) \
|
M(UInt64, max_database_num_to_warn, 1000lu, "If the number of databases is greater than this value, the server will create a warning that will displayed to user.", 0) \
|
||||||
M(UInt64, max_part_num_to_warn, 100000lu, "If the number of parts is greater than this value, the server will create a warning that will displayed to user.", 0) \
|
M(UInt64, max_part_num_to_warn, 100000lu, "If the number of parts is greater than this value, the server will create a warning that will displayed to user.", 0) \
|
||||||
|
M(UInt64, max_table_num_to_throw, 0lu, "If number of tables is greater than this value, server will throw an exception. 0 means no limitation. View, remote tables, dictionary, system tables are not counted. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.", 0) \
|
||||||
|
M(UInt64, max_database_num_to_throw, 0lu, "If number of databases is greater than this value, server will throw an exception. 0 means no limitation.", 0) \
|
||||||
M(UInt64, concurrent_threads_soft_limit_num, 0, "Sets how many concurrent thread can be allocated before applying CPU pressure. Zero means unlimited.", 0) \
|
M(UInt64, concurrent_threads_soft_limit_num, 0, "Sets how many concurrent thread can be allocated before applying CPU pressure. Zero means unlimited.", 0) \
|
||||||
M(UInt64, concurrent_threads_soft_limit_ratio_to_cores, 0, "Same as concurrent_threads_soft_limit_num, but with ratio to cores.", 0) \
|
M(UInt64, concurrent_threads_soft_limit_ratio_to_cores, 0, "Same as concurrent_threads_soft_limit_num, but with ratio to cores.", 0) \
|
||||||
\
|
\
|
||||||
@ -146,6 +148,8 @@ namespace DB
|
|||||||
M(UInt64, global_profiler_real_time_period_ns, 0, "Period for real clock timer of global profiler (in nanoseconds). Set 0 value to turn off the real clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
M(UInt64, global_profiler_real_time_period_ns, 0, "Period for real clock timer of global profiler (in nanoseconds). Set 0 value to turn off the real clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
||||||
M(UInt64, global_profiler_cpu_time_period_ns, 0, "Period for CPU clock timer of global profiler (in nanoseconds). Set 0 value to turn off the CPU clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
M(UInt64, global_profiler_cpu_time_period_ns, 0, "Period for CPU clock timer of global profiler (in nanoseconds). Set 0 value to turn off the CPU clock global profiler. Recommended value is at least 10000000 (100 times a second) for single queries or 1000000000 (once a second) for cluster-wide profiling.", 0) \
|
||||||
M(Bool, enable_azure_sdk_logging, false, "Enables logging from Azure sdk", 0) \
|
M(Bool, enable_azure_sdk_logging, false, "Enables logging from Azure sdk", 0) \
|
||||||
|
M(String, merge_workload, "default", "Name of workload to be used to access resources for all merges (may be overridden by a merge tree setting)", 0) \
|
||||||
|
M(String, mutation_workload, "default", "Name of workload to be used to access resources for all mutations (may be overridden by a merge tree setting)", 0) \
|
||||||
M(Double, gwp_asan_force_sample_probability, 0, "Probability that an allocation from specific places will be sampled by GWP Asan (i.e. PODArray allocations)", 0) \
|
M(Double, gwp_asan_force_sample_probability, 0, "Probability that an allocation from specific places will be sampled by GWP Asan (i.e. PODArray allocations)", 0) \
|
||||||
|
|
||||||
/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp
|
/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp
|
||||||
|
@ -933,6 +933,7 @@ class IColumn;
|
|||||||
M(Int64, prefer_warmed_unmerged_parts_seconds, 0, "Only available in ClickHouse Cloud. If a merged part is less than this many seconds old and is not pre-warmed (see cache_populated_by_fetch), but all its source parts are available and pre-warmed, SELECT queries will read from those parts instead. Only for ReplicatedMergeTree. Note that this only checks whether CacheWarmer processed the part; if the part was fetched into cache by something else, it'll still be considered cold until CacheWarmer gets to it; if it was warmed, then evicted from cache, it'll still be considered warm.", 0) \
|
M(Int64, prefer_warmed_unmerged_parts_seconds, 0, "Only available in ClickHouse Cloud. If a merged part is less than this many seconds old and is not pre-warmed (see cache_populated_by_fetch), but all its source parts are available and pre-warmed, SELECT queries will read from those parts instead. Only for ReplicatedMergeTree. Note that this only checks whether CacheWarmer processed the part; if the part was fetched into cache by something else, it'll still be considered cold until CacheWarmer gets to it; if it was warmed, then evicted from cache, it'll still be considered warm.", 0) \
|
||||||
M(Bool, iceberg_engine_ignore_schema_evolution, false, "Ignore schema evolution in Iceberg table engine and read all data using latest schema saved on table creation. Note that it can lead to incorrect result", 0) \
|
M(Bool, iceberg_engine_ignore_schema_evolution, false, "Ignore schema evolution in Iceberg table engine and read all data using latest schema saved on table creation. Note that it can lead to incorrect result", 0) \
|
||||||
M(Bool, allow_deprecated_error_prone_window_functions, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)", 0) \
|
M(Bool, allow_deprecated_error_prone_window_functions, false, "Allow usage of deprecated error prone window functions (neighbor, runningAccumulate, runningDifferenceStartingWithFirstValue, runningDifference)", 0) \
|
||||||
|
M(Bool, uniform_snowflake_conversion_functions, true, "Enables functions snowflakeIDToDateTime[64] and dateTime[64]ToSnowflakeID while disabling functions snowflakeToDateTime[64] and dateTime[64]ToSnowflake.", 0) \
|
||||||
|
|
||||||
// End of COMMON_SETTINGS
|
// End of COMMON_SETTINGS
|
||||||
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS, move obsolete settings to OBSOLETE_SETTINGS and obsolete format settings to OBSOLETE_FORMAT_SETTINGS.
|
// Please add settings related to formats into the FORMAT_FACTORY_SETTINGS, move obsolete settings to OBSOLETE_SETTINGS and obsolete format settings to OBSOLETE_FORMAT_SETTINGS.
|
||||||
|
@ -101,6 +101,7 @@ static const std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges
|
|||||||
{"input_format_parquet_max_block_size", 8192, DEFAULT_BLOCK_SIZE, "Increase block size for parquet reader."},
|
{"input_format_parquet_max_block_size", 8192, DEFAULT_BLOCK_SIZE, "Increase block size for parquet reader."},
|
||||||
{"input_format_parquet_prefer_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader."},
|
{"input_format_parquet_prefer_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Average block bytes output by parquet reader."},
|
||||||
{"enable_blob_storage_log", true, true, "Write information about blob storage operations to system.blob_storage_log table"},
|
{"enable_blob_storage_log", true, true, "Write information about blob storage operations to system.blob_storage_log table"},
|
||||||
|
{"uniform_snowflake_conversion_functions", false, true, "Enable functions snowflakeIDToDateTime[64] and dateTime[64]ToSnowflakeID."},
|
||||||
{"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."},
|
{"allow_statistic_optimize", false, false, "Old setting which popped up here being renamed."},
|
||||||
{"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."},
|
{"allow_experimental_statistic", false, false, "Old setting which popped up here being renamed."},
|
||||||
{"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."},
|
{"allow_statistics_optimize", false, false, "The setting was renamed. The previous name is `allow_statistic_optimize`."},
|
||||||
|
@ -186,6 +186,7 @@ void DatabaseLazy::attachTable(ContextPtr /* context_ */, const String & table_n
|
|||||||
throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Table {}.{} already exists.", backQuote(database_name), backQuote(table_name));
|
throw Exception(ErrorCodes::TABLE_ALREADY_EXISTS, "Table {}.{} already exists.", backQuote(database_name), backQuote(table_name));
|
||||||
|
|
||||||
it->second.expiration_iterator = cache_expiration_queue.emplace(cache_expiration_queue.end(), current_time, table_name);
|
it->second.expiration_iterator = cache_expiration_queue.emplace(cache_expiration_queue.end(), current_time, table_name);
|
||||||
|
|
||||||
CurrentMetrics::add(CurrentMetrics::AttachedTable, 1);
|
CurrentMetrics::add(CurrentMetrics::AttachedTable, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,6 +203,7 @@ StoragePtr DatabaseLazy::detachTable(ContextPtr /* context */, const String & ta
|
|||||||
if (it->second.expiration_iterator != cache_expiration_queue.end())
|
if (it->second.expiration_iterator != cache_expiration_queue.end())
|
||||||
cache_expiration_queue.erase(it->second.expiration_iterator);
|
cache_expiration_queue.erase(it->second.expiration_iterator);
|
||||||
tables_cache.erase(it);
|
tables_cache.erase(it);
|
||||||
|
|
||||||
CurrentMetrics::sub(CurrentMetrics::AttachedTable, 1);
|
CurrentMetrics::sub(CurrentMetrics::AttachedTable, 1);
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
|
@ -73,9 +73,10 @@ zkutil::ZooKeeperPtr DatabaseReplicated::getZooKeeper() const
|
|||||||
return getContext()->getZooKeeper();
|
return getContext()->getZooKeeper();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline String getHostID(ContextPtr global_context, const UUID & db_uuid)
|
static inline String getHostID(ContextPtr global_context, const UUID & db_uuid, bool secure)
|
||||||
{
|
{
|
||||||
return Cluster::Address::toString(getFQDNOrHostName(), global_context->getTCPPort()) + ':' + toString(db_uuid);
|
UInt16 port = secure ? global_context->getTCPPortSecure().value_or(DBMS_DEFAULT_SECURE_PORT) : global_context->getTCPPort();
|
||||||
|
return Cluster::Address::toString(getFQDNOrHostName(), port) + ':' + toString(db_uuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline UInt64 getMetadataHash(const String & table_name, const String & metadata)
|
static inline UInt64 getMetadataHash(const String & table_name, const String & metadata)
|
||||||
@ -415,8 +416,10 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
String host_id = getHostID(getContext(), db_uuid);
|
String host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection);
|
||||||
if (is_create_query || replica_host_id != host_id)
|
String host_id_default = getHostID(getContext(), db_uuid, false);
|
||||||
|
|
||||||
|
if (is_create_query || (replica_host_id != host_id && replica_host_id != host_id_default))
|
||||||
{
|
{
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::REPLICA_ALREADY_EXISTS,
|
ErrorCodes::REPLICA_ALREADY_EXISTS,
|
||||||
@ -424,6 +427,14 @@ void DatabaseReplicated::tryConnectToZooKeeperAndInitDatabase(LoadingStrictnessL
|
|||||||
replica_name, shard_name, zookeeper_path, replica_host_id, host_id);
|
replica_name, shard_name, zookeeper_path, replica_host_id, host_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Before 24.6 we always created host_id with insecure port, even if cluster_auth_info.cluster_secure_connection was true.
|
||||||
|
/// So not to break compatibility, we need to update host_id to secure one if cluster_auth_info.cluster_secure_connection is true.
|
||||||
|
if (host_id != host_id_default && replica_host_id == host_id_default)
|
||||||
|
{
|
||||||
|
current_zookeeper->set(replica_path, host_id, -1);
|
||||||
|
createEmptyLogEntry(current_zookeeper);
|
||||||
|
}
|
||||||
|
|
||||||
/// Check that replica_group_name in ZooKeeper matches the local one and change it if necessary.
|
/// Check that replica_group_name in ZooKeeper matches the local one and change it if necessary.
|
||||||
String zk_replica_group_name;
|
String zk_replica_group_name;
|
||||||
if (!current_zookeeper->tryGet(replica_path + "/replica_group", zk_replica_group_name))
|
if (!current_zookeeper->tryGet(replica_path + "/replica_group", zk_replica_group_name))
|
||||||
@ -550,7 +561,7 @@ void DatabaseReplicated::createReplicaNodesInZooKeeper(const zkutil::ZooKeeperPt
|
|||||||
"already contains some data and it does not look like Replicated database path.", zookeeper_path);
|
"already contains some data and it does not look like Replicated database path.", zookeeper_path);
|
||||||
|
|
||||||
/// Write host name to replica_path, it will protect from multiple replicas with the same name
|
/// Write host name to replica_path, it will protect from multiple replicas with the same name
|
||||||
auto host_id = getHostID(getContext(), db_uuid);
|
auto host_id = getHostID(getContext(), db_uuid, cluster_auth_info.cluster_secure_connection);
|
||||||
|
|
||||||
for (int attempts = 10; attempts > 0; --attempts)
|
for (int attempts = 10; attempts > 0; --attempts)
|
||||||
{
|
{
|
||||||
|
@ -260,7 +260,9 @@ StoragePtr DatabaseWithOwnTablesBase::detachTableUnlocked(const String & table_n
|
|||||||
res = it->second;
|
res = it->second;
|
||||||
tables.erase(it);
|
tables.erase(it);
|
||||||
res->is_detached = true;
|
res->is_detached = true;
|
||||||
CurrentMetrics::sub(getAttachedCounterForStorage(res), 1);
|
|
||||||
|
if (res->isSystemStorage() == false)
|
||||||
|
CurrentMetrics::sub(getAttachedCounterForStorage(res), 1);
|
||||||
|
|
||||||
auto table_id = res->getStorageID();
|
auto table_id = res->getStorageID();
|
||||||
if (table_id.hasUUID())
|
if (table_id.hasUUID())
|
||||||
@ -301,7 +303,9 @@ void DatabaseWithOwnTablesBase::attachTableUnlocked(const String & table_name, c
|
|||||||
/// It is important to reset is_detached here since in case of RENAME in
|
/// It is important to reset is_detached here since in case of RENAME in
|
||||||
/// non-Atomic database the is_detached is set to true before RENAME.
|
/// non-Atomic database the is_detached is set to true before RENAME.
|
||||||
table->is_detached = false;
|
table->is_detached = false;
|
||||||
CurrentMetrics::add(getAttachedCounterForStorage(table), 1);
|
|
||||||
|
if (table->isSystemStorage() == false && table_id.database_name != DatabaseCatalog::SYSTEM_DATABASE)
|
||||||
|
CurrentMetrics::add(getAttachedCounterForStorage(table), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DatabaseWithOwnTablesBase::shutdown()
|
void DatabaseWithOwnTablesBase::shutdown()
|
||||||
|
@ -113,6 +113,36 @@ struct ByteHammingDistanceImpl
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void parseUTF8String(const char * __restrict data, size_t size, std::function<void(UInt32)> utf8_consumer, std::function<void(unsigned char)> ascii_consumer = nullptr)
|
||||||
|
{
|
||||||
|
const char * end = data + size;
|
||||||
|
while (data < end)
|
||||||
|
{
|
||||||
|
size_t len = UTF8::seqLength(*data);
|
||||||
|
if (len == 1)
|
||||||
|
{
|
||||||
|
if (ascii_consumer)
|
||||||
|
ascii_consumer(static_cast<unsigned char>(*data));
|
||||||
|
else
|
||||||
|
utf8_consumer(static_cast<UInt32>(*data));
|
||||||
|
++data;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto code_point = UTF8::convertUTF8ToCodePoint(data, end - data);
|
||||||
|
if (code_point.has_value())
|
||||||
|
{
|
||||||
|
utf8_consumer(code_point.value());
|
||||||
|
data += len;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Illegal UTF-8 sequence, while processing '{}'", StringRef(data, end - data));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
template <bool is_utf8>
|
template <bool is_utf8>
|
||||||
struct ByteJaccardIndexImpl
|
struct ByteJaccardIndexImpl
|
||||||
{
|
{
|
||||||
@ -138,57 +168,28 @@ struct ByteJaccardIndexImpl
|
|||||||
haystack_set.fill(0);
|
haystack_set.fill(0);
|
||||||
needle_set.fill(0);
|
needle_set.fill(0);
|
||||||
|
|
||||||
while (haystack < haystack_end)
|
if constexpr (is_utf8)
|
||||||
{
|
{
|
||||||
size_t len = 1;
|
parseUTF8String(
|
||||||
if constexpr (is_utf8)
|
haystack,
|
||||||
len = UTF8::seqLength(*haystack);
|
haystack_size,
|
||||||
|
[&](UInt32 data) { haystack_utf8_set.insert(data); },
|
||||||
if (len == 1)
|
[&](unsigned char data) { haystack_set[data] = 1; });
|
||||||
|
parseUTF8String(
|
||||||
|
needle, needle_size, [&](UInt32 data) { needle_utf8_set.insert(data); }, [&](unsigned char data) { needle_set[data] = 1; });
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
while (haystack < haystack_end)
|
||||||
{
|
{
|
||||||
haystack_set[static_cast<unsigned char>(*haystack)] = 1;
|
haystack_set[static_cast<unsigned char>(*haystack)] = 1;
|
||||||
++haystack;
|
++haystack;
|
||||||
}
|
}
|
||||||
else
|
while (needle < needle_end)
|
||||||
{
|
|
||||||
auto code_point = UTF8::convertUTF8ToCodePoint(haystack, haystack_end - haystack);
|
|
||||||
if (code_point.has_value())
|
|
||||||
{
|
|
||||||
haystack_utf8_set.insert(code_point.value());
|
|
||||||
haystack += len;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Illegal UTF-8 sequence, while processing '{}'", StringRef(haystack, haystack_end - haystack));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
while (needle < needle_end)
|
|
||||||
{
|
|
||||||
|
|
||||||
size_t len = 1;
|
|
||||||
if constexpr (is_utf8)
|
|
||||||
len = UTF8::seqLength(*needle);
|
|
||||||
|
|
||||||
if (len == 1)
|
|
||||||
{
|
{
|
||||||
needle_set[static_cast<unsigned char>(*needle)] = 1;
|
needle_set[static_cast<unsigned char>(*needle)] = 1;
|
||||||
++needle;
|
++needle;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
auto code_point = UTF8::convertUTF8ToCodePoint(needle, needle_end - needle);
|
|
||||||
if (code_point.has_value())
|
|
||||||
{
|
|
||||||
needle_utf8_set.insert(code_point.value());
|
|
||||||
needle += len;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Illegal UTF-8 sequence, while processing '{}'", StringRef(needle, needle_end - needle));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UInt8 intersection = 0;
|
UInt8 intersection = 0;
|
||||||
@ -226,6 +227,7 @@ struct ByteJaccardIndexImpl
|
|||||||
|
|
||||||
static constexpr size_t max_string_size = 1u << 16;
|
static constexpr size_t max_string_size = 1u << 16;
|
||||||
|
|
||||||
|
template<bool is_utf8>
|
||||||
struct ByteEditDistanceImpl
|
struct ByteEditDistanceImpl
|
||||||
{
|
{
|
||||||
using ResultType = UInt64;
|
using ResultType = UInt64;
|
||||||
@ -242,6 +244,16 @@ struct ByteEditDistanceImpl
|
|||||||
ErrorCodes::TOO_LARGE_STRING_SIZE,
|
ErrorCodes::TOO_LARGE_STRING_SIZE,
|
||||||
"The string size is too big for function editDistance, should be at most {}", max_string_size);
|
"The string size is too big for function editDistance, should be at most {}", max_string_size);
|
||||||
|
|
||||||
|
PaddedPODArray<UInt32> haystack_utf8;
|
||||||
|
PaddedPODArray<UInt32> needle_utf8;
|
||||||
|
if constexpr (is_utf8)
|
||||||
|
{
|
||||||
|
parseUTF8String(haystack, haystack_size, [&](UInt32 data) { haystack_utf8.push_back(data); });
|
||||||
|
parseUTF8String(needle, needle_size, [&](UInt32 data) { needle_utf8.push_back(data); });
|
||||||
|
haystack_size = haystack_utf8.size();
|
||||||
|
needle_size = needle_utf8.size();
|
||||||
|
}
|
||||||
|
|
||||||
PaddedPODArray<ResultType> distances0(haystack_size + 1, 0);
|
PaddedPODArray<ResultType> distances0(haystack_size + 1, 0);
|
||||||
PaddedPODArray<ResultType> distances1(haystack_size + 1, 0);
|
PaddedPODArray<ResultType> distances1(haystack_size + 1, 0);
|
||||||
|
|
||||||
@ -261,9 +273,16 @@ struct ByteEditDistanceImpl
|
|||||||
insertion = distances1[pos_haystack] + 1;
|
insertion = distances1[pos_haystack] + 1;
|
||||||
substitution = distances0[pos_haystack];
|
substitution = distances0[pos_haystack];
|
||||||
|
|
||||||
if (*(needle + pos_needle) != *(haystack + pos_haystack))
|
if constexpr (is_utf8)
|
||||||
substitution += 1;
|
{
|
||||||
|
if (needle_utf8[pos_needle] != haystack_utf8[pos_haystack])
|
||||||
|
substitution += 1;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (*(needle + pos_needle) != *(haystack + pos_haystack))
|
||||||
|
substitution += 1;
|
||||||
|
}
|
||||||
distances1[pos_haystack + 1] = std::min(deletion, std::min(substitution, insertion));
|
distances1[pos_haystack + 1] = std::min(deletion, std::min(substitution, insertion));
|
||||||
}
|
}
|
||||||
distances0.swap(distances1);
|
distances0.swap(distances1);
|
||||||
@ -457,7 +476,12 @@ struct NameEditDistance
|
|||||||
{
|
{
|
||||||
static constexpr auto name = "editDistance";
|
static constexpr auto name = "editDistance";
|
||||||
};
|
};
|
||||||
using FunctionEditDistance = FunctionsStringSimilarity<FunctionStringDistanceImpl<ByteEditDistanceImpl>, NameEditDistance>;
|
using FunctionEditDistance = FunctionsStringSimilarity<FunctionStringDistanceImpl<ByteEditDistanceImpl<false>>, NameEditDistance>;
|
||||||
|
struct NameEditDistanceUTF8
|
||||||
|
{
|
||||||
|
static constexpr auto name = "editDistanceUTF8";
|
||||||
|
};
|
||||||
|
using FunctionEditDistanceUTF8 = FunctionsStringSimilarity<FunctionStringDistanceImpl<ByteEditDistanceImpl<true>>, NameEditDistanceUTF8>;
|
||||||
|
|
||||||
struct NameDamerauLevenshteinDistance
|
struct NameDamerauLevenshteinDistance
|
||||||
{
|
{
|
||||||
@ -499,6 +523,10 @@ REGISTER_FUNCTION(StringDistance)
|
|||||||
FunctionDocumentation{.description = R"(Calculates the edit distance between two byte-strings.)"});
|
FunctionDocumentation{.description = R"(Calculates the edit distance between two byte-strings.)"});
|
||||||
factory.registerAlias("levenshteinDistance", NameEditDistance::name);
|
factory.registerAlias("levenshteinDistance", NameEditDistance::name);
|
||||||
|
|
||||||
|
factory.registerFunction<FunctionEditDistanceUTF8>(
|
||||||
|
FunctionDocumentation{.description = R"(Calculates the edit distance between two UTF8 strings.)"});
|
||||||
|
factory.registerAlias("levenshteinDistanceUTF8", NameEditDistanceUTF8::name);
|
||||||
|
|
||||||
factory.registerFunction<FunctionDamerauLevenshteinDistance>(
|
factory.registerFunction<FunctionDamerauLevenshteinDistance>(
|
||||||
FunctionDocumentation{.description = R"(Calculates the Damerau-Levenshtein distance two between two byte-string.)"});
|
FunctionDocumentation{.description = R"(Calculates the Damerau-Levenshtein distance two between two byte-string.)"});
|
||||||
|
|
||||||
|
181
src/Functions/dateTimeToSnowflakeID.cpp
Normal file
181
src/Functions/dateTimeToSnowflakeID.cpp
Normal file
@ -0,0 +1,181 @@
|
|||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
#include <Functions/IFunction.h>
|
||||||
|
#include <Functions/FunctionHelpers.h>
|
||||||
|
#include <DataTypes/DataTypeDateTime64.h>
|
||||||
|
#include <DataTypes/DataTypesDecimal.h>
|
||||||
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
|
#include <Columns/ColumnConst.h>
|
||||||
|
#include <Columns/ColumnsDateTime.h>
|
||||||
|
#include <Columns/ColumnsNumber.h>
|
||||||
|
#include <Core/DecimalFunctions.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int UNKNOWN_FUNCTION;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
/// See generateSnowflakeID.cpp
|
||||||
|
constexpr int time_shift = 22;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
class FunctionDateTimeToSnowflakeID : public IFunction
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
const bool uniform_snowflake_conversion_functions;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static constexpr auto name = "dateTimeToSnowflakeID";
|
||||||
|
|
||||||
|
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionDateTimeToSnowflakeID>(context); }
|
||||||
|
explicit FunctionDateTimeToSnowflakeID(ContextPtr context)
|
||||||
|
: uniform_snowflake_conversion_functions(context->getSettingsRef().uniform_snowflake_conversion_functions)
|
||||||
|
{}
|
||||||
|
|
||||||
|
String getName() const override { return name; }
|
||||||
|
size_t getNumberOfArguments() const override { return 0; }
|
||||||
|
bool isVariadic() const override { return true; }
|
||||||
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||||
|
|
||||||
|
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||||
|
{
|
||||||
|
FunctionArgumentDescriptors args{
|
||||||
|
{"value", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isDateTime), nullptr, "DateTime"}
|
||||||
|
};
|
||||||
|
FunctionArgumentDescriptors optional_args{
|
||||||
|
{"epoch", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isNativeUInt), isColumnConst, "UInt*"}
|
||||||
|
};
|
||||||
|
validateFunctionArgumentTypes(*this, arguments, args, optional_args);
|
||||||
|
|
||||||
|
return std::make_shared<DataTypeUInt64>();
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
|
{
|
||||||
|
if (!uniform_snowflake_conversion_functions)
|
||||||
|
throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "To use function {}, setting 'uniform_snowflake_conversion_functions' must be enabled", getName());
|
||||||
|
|
||||||
|
const auto & col_src = *arguments[0].column;
|
||||||
|
|
||||||
|
size_t epoch = 0;
|
||||||
|
if (arguments.size() == 2 && input_rows_count != 0)
|
||||||
|
{
|
||||||
|
const auto & col_epoch = *arguments[1].column;
|
||||||
|
epoch = col_epoch.getUInt(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto col_res = ColumnUInt64::create(input_rows_count);
|
||||||
|
auto & res_data = col_res->getData();
|
||||||
|
|
||||||
|
const auto & src_data = typeid_cast<const ColumnDateTime &>(col_src).getData();
|
||||||
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
|
res_data[i] = (static_cast<UInt64>(src_data[i]) * 1000 - epoch) << time_shift;
|
||||||
|
return col_res;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class FunctionDateTime64ToSnowflakeID : public IFunction
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
const bool uniform_snowflake_conversion_functions;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static constexpr auto name = "dateTime64ToSnowflakeID";
|
||||||
|
|
||||||
|
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionDateTime64ToSnowflakeID>(context); }
|
||||||
|
explicit FunctionDateTime64ToSnowflakeID(ContextPtr context)
|
||||||
|
: uniform_snowflake_conversion_functions(context->getSettingsRef().uniform_snowflake_conversion_functions)
|
||||||
|
{}
|
||||||
|
|
||||||
|
String getName() const override { return name; }
|
||||||
|
size_t getNumberOfArguments() const override { return 0; }
|
||||||
|
bool isVariadic() const override { return true; }
|
||||||
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||||
|
|
||||||
|
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||||
|
{
|
||||||
|
FunctionArgumentDescriptors args{
|
||||||
|
{"value", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isDateTime64), nullptr, "DateTime64"}
|
||||||
|
};
|
||||||
|
FunctionArgumentDescriptors optional_args{
|
||||||
|
{"epoch", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isNativeUInt), isColumnConst, "UInt*"}
|
||||||
|
};
|
||||||
|
validateFunctionArgumentTypes(*this, arguments, args, optional_args);
|
||||||
|
|
||||||
|
return std::make_shared<DataTypeUInt64>();
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
|
{
|
||||||
|
if (!uniform_snowflake_conversion_functions)
|
||||||
|
throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "To use function {}, setting 'uniform_snowflake_conversion_functions' must be enabled", getName());
|
||||||
|
|
||||||
|
const auto & col_src = *arguments[0].column;
|
||||||
|
const auto & src_data = typeid_cast<const ColumnDateTime64 &>(col_src).getData();
|
||||||
|
|
||||||
|
size_t epoch = 0;
|
||||||
|
if (arguments.size() == 2 && input_rows_count != 0)
|
||||||
|
{
|
||||||
|
const auto & col_epoch = *arguments[1].column;
|
||||||
|
epoch = col_epoch.getUInt(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto col_res = ColumnUInt64::create(input_rows_count);
|
||||||
|
auto & res_data = col_res->getData();
|
||||||
|
|
||||||
|
/// timestamps in snowflake-ids are millisecond-based, convert input to milliseconds
|
||||||
|
UInt32 src_scale = getDecimalScale(*arguments[0].type);
|
||||||
|
Int64 multiplier_msec = DecimalUtils::scaleMultiplier<DateTime64>(3);
|
||||||
|
Int64 multiplier_src = DecimalUtils::scaleMultiplier<DateTime64>(src_scale);
|
||||||
|
auto factor = multiplier_msec / static_cast<double>(multiplier_src);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
|
res_data[i] = static_cast<UInt64>(src_data[i] * factor - epoch) << time_shift;
|
||||||
|
|
||||||
|
return col_res;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
REGISTER_FUNCTION(DateTimeToSnowflakeID)
|
||||||
|
{
|
||||||
|
{
|
||||||
|
FunctionDocumentation::Description description = R"(Converts a [DateTime](../data-types/datetime.md) value to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.)";
|
||||||
|
FunctionDocumentation::Syntax syntax = "dateTimeToSnowflakeID(value[, epoch])";
|
||||||
|
FunctionDocumentation::Arguments arguments = {
|
||||||
|
{"value", "Date with time. [DateTime](../data-types/datetime.md)."},
|
||||||
|
{"epoch", "Epoch of the Snowflake ID in milliseconds since 1970-01-01. Defaults to 0 (1970-01-01). For the Twitter/X epoch (2015-01-01), provide 1288834974657. Optional. [UInt*](../data-types/int-uint.md)"}
|
||||||
|
};
|
||||||
|
FunctionDocumentation::ReturnedValue returned_value = "Input value converted to [UInt64](../data-types/int-uint.md) as the first Snowflake ID at that time.";
|
||||||
|
FunctionDocumentation::Examples examples = {{"simple", "SELECT dateTimeToSnowflakeID(toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai'))", "6832626392367104000"}};
|
||||||
|
FunctionDocumentation::Categories categories = {"Snowflake ID"};
|
||||||
|
|
||||||
|
factory.registerFunction<FunctionDateTimeToSnowflakeID>({description, syntax, arguments, returned_value, examples, categories});
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
FunctionDocumentation::Description description = R"(Converts a [DateTime64](../data-types/datetime64.md) value to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.)";
|
||||||
|
FunctionDocumentation::Syntax syntax = "dateTime64ToSnowflakeID(value[, epoch])";
|
||||||
|
FunctionDocumentation::Arguments arguments = {
|
||||||
|
{"value", "Date with time. [DateTime64](../data-types/datetime.md)."},
|
||||||
|
{"epoch", "Epoch of the Snowflake ID in milliseconds since 1970-01-01. Defaults to 0 (1970-01-01). For the Twitter/X epoch (2015-01-01), provide 1288834974657. Optional. [UInt*](../data-types/int-uint.md)"}
|
||||||
|
};
|
||||||
|
FunctionDocumentation::ReturnedValue returned_value = "Input value converted to [UInt64](../data-types/int-uint.md) as the first Snowflake ID at that time.";
|
||||||
|
FunctionDocumentation::Examples examples = {{"simple", "SELECT dateTime64ToSnowflakeID(toDateTime64('2021-08-15 18:57:56', 3, 'Asia/Shanghai'))", "6832626394434895872"}};
|
||||||
|
FunctionDocumentation::Categories categories = {"Snowflake ID"};
|
||||||
|
|
||||||
|
factory.registerFunction<FunctionDateTime64ToSnowflakeID>({description, syntax, arguments, returned_value, examples, categories});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -207,7 +207,7 @@ public:
|
|||||||
|
|
||||||
REGISTER_FUNCTION(GenerateSnowflakeID)
|
REGISTER_FUNCTION(GenerateSnowflakeID)
|
||||||
{
|
{
|
||||||
FunctionDocumentation::Description description = R"(Generates a Snowflake ID. The generated Snowflake ID contains the current Unix timestamp in milliseconds 41 (+ 1 top zero bit) bits, followed by machine id (10 bits), a counter (12 bits) to distinguish IDs within a millisecond. For any given timestamp (unix_ts_ms), the counter starts at 0 and is incremented by 1 for each new Snowflake ID until the timestamp changes. In case the counter overflows, the timestamp field is incremented by 1 and the counter is reset to 0. Function generateSnowflakeID guarantees that the counter field within a timestamp increments monotonically across all function invocations in concurrently running threads and queries.)";
|
FunctionDocumentation::Description description = R"(Generates a Snowflake ID. The generated Snowflake ID contains the current Unix timestamp in milliseconds (41 + 1 top zero bits), followed by a machine id (10 bits), and a counter (12 bits) to distinguish IDs within a millisecond. For any given timestamp (unix_ts_ms), the counter starts at 0 and is incremented by 1 for each new Snowflake ID until the timestamp changes. In case the counter overflows, the timestamp field is incremented by 1 and the counter is reset to 0. Function generateSnowflakeID guarantees that the counter field within a timestamp increments monotonically across all function invocations in concurrently running threads and queries.)";
|
||||||
FunctionDocumentation::Syntax syntax = "generateSnowflakeID([expression])";
|
FunctionDocumentation::Syntax syntax = "generateSnowflakeID([expression])";
|
||||||
FunctionDocumentation::Arguments arguments = {{"expression", "The expression is used to bypass common subexpression elimination if the function is called multiple times in a query but otherwise ignored. Optional."}};
|
FunctionDocumentation::Arguments arguments = {{"expression", "The expression is used to bypass common subexpression elimination if the function is called multiple times in a query but otherwise ignored. Optional."}};
|
||||||
FunctionDocumentation::ReturnedValue returned_value = "A value of type UInt64";
|
FunctionDocumentation::ReturnedValue returned_value = "A value of type UInt64";
|
||||||
|
@ -11,11 +11,17 @@
|
|||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
|
|
||||||
|
|
||||||
|
/// ------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
/// The functions in this file are deprecated and should be removed in favor of functions 'snowflakeIDToDateTime[64]' and
|
||||||
|
/// 'dateTime[64]ToSnowflakeID' by summer 2025. Please also mark setting `uniform_snowflake_conversion_functions` as obsolete then.
|
||||||
|
/// ------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
|
extern const int DEPRECATED_FUNCTION;
|
||||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -34,10 +40,19 @@ constexpr int time_shift = 22;
|
|||||||
class FunctionDateTimeToSnowflake : public IFunction
|
class FunctionDateTimeToSnowflake : public IFunction
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
const char * name;
|
const bool uniform_snowflake_conversion_functions;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit FunctionDateTimeToSnowflake(const char * name_) : name(name_) { }
|
static constexpr auto name = "dateTimeToSnowflake";
|
||||||
|
|
||||||
|
static FunctionPtr create(ContextPtr context)
|
||||||
|
{
|
||||||
|
return std::make_shared<FunctionDateTimeToSnowflake>(context);
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit FunctionDateTimeToSnowflake(ContextPtr context)
|
||||||
|
: uniform_snowflake_conversion_functions(context->getSettingsRef().uniform_snowflake_conversion_functions)
|
||||||
|
{}
|
||||||
|
|
||||||
String getName() const override { return name; }
|
String getName() const override { return name; }
|
||||||
size_t getNumberOfArguments() const override { return 1; }
|
size_t getNumberOfArguments() const override { return 1; }
|
||||||
@ -56,6 +71,9 @@ public:
|
|||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
{
|
{
|
||||||
|
if (uniform_snowflake_conversion_functions)
|
||||||
|
throw Exception(ErrorCodes::DEPRECATED_FUNCTION, "Function {} is deprecated, to enable it disable setting 'uniform_snowflake_conversion_functions'", getName());
|
||||||
|
|
||||||
const auto & src = arguments[0];
|
const auto & src = arguments[0];
|
||||||
const auto & src_column = *src.column;
|
const auto & src_column = *src.column;
|
||||||
|
|
||||||
@ -73,13 +91,20 @@ public:
|
|||||||
class FunctionSnowflakeToDateTime : public IFunction
|
class FunctionSnowflakeToDateTime : public IFunction
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
const char * name;
|
|
||||||
const bool allow_nonconst_timezone_arguments;
|
const bool allow_nonconst_timezone_arguments;
|
||||||
|
const bool uniform_snowflake_conversion_functions;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit FunctionSnowflakeToDateTime(const char * name_, ContextPtr context)
|
static constexpr auto name = "snowflakeToDateTime";
|
||||||
: name(name_)
|
|
||||||
, allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments)
|
static FunctionPtr create(ContextPtr context)
|
||||||
|
{
|
||||||
|
return std::make_shared<FunctionSnowflakeToDateTime>(context);
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit FunctionSnowflakeToDateTime(ContextPtr context)
|
||||||
|
: allow_nonconst_timezone_arguments(context->getSettingsRef().allow_nonconst_timezone_arguments)
|
||||||
|
, uniform_snowflake_conversion_functions(context->getSettingsRef().uniform_snowflake_conversion_functions)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
String getName() const override { return name; }
|
String getName() const override { return name; }
|
||||||
@ -107,6 +132,9 @@ public:
|
|||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
{
|
{
|
||||||
|
if (uniform_snowflake_conversion_functions)
|
||||||
|
throw Exception(ErrorCodes::DEPRECATED_FUNCTION, "Function {} is deprecated, to enable it disable setting 'uniform_snowflake_conversion_functions'", getName());
|
||||||
|
|
||||||
const auto & src = arguments[0];
|
const auto & src = arguments[0];
|
||||||
const auto & src_column = *src.column;
|
const auto & src_column = *src.column;
|
||||||
|
|
||||||
@ -138,10 +166,19 @@ public:
|
|||||||
class FunctionDateTime64ToSnowflake : public IFunction
|
class FunctionDateTime64ToSnowflake : public IFunction
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
const char * name;
|
const bool uniform_snowflake_conversion_functions;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit FunctionDateTime64ToSnowflake(const char * name_) : name(name_) { }
|
static constexpr auto name = "dateTime64ToSnowflake";
|
||||||
|
|
||||||
|
static FunctionPtr create(ContextPtr context)
|
||||||
|
{
|
||||||
|
return std::make_shared<FunctionDateTime64ToSnowflake>(context);
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit FunctionDateTime64ToSnowflake(ContextPtr context)
|
||||||
|
: uniform_snowflake_conversion_functions(context->getSettingsRef().uniform_snowflake_conversion_functions)
|
||||||
|
{}
|
||||||
|
|
||||||
String getName() const override { return name; }
|
String getName() const override { return name; }
|
||||||
size_t getNumberOfArguments() const override { return 1; }
|
size_t getNumberOfArguments() const override { return 1; }
|
||||||
@ -160,6 +197,9 @@ public:
|
|||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
{
|
{
|
||||||
|
if (uniform_snowflake_conversion_functions)
|
||||||
|
throw Exception(ErrorCodes::DEPRECATED_FUNCTION, "Function {} is deprecated, to enable it disable setting 'uniform_snowflake_conversion_functions'", getName());
|
||||||
|
|
||||||
const auto & src = arguments[0];
|
const auto & src = arguments[0];
|
||||||
|
|
||||||
const auto & src_column = *src.column;
|
const auto & src_column = *src.column;
|
||||||
@ -185,13 +225,20 @@ public:
|
|||||||
class FunctionSnowflakeToDateTime64 : public IFunction
|
class FunctionSnowflakeToDateTime64 : public IFunction
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
const char * name;
|
|
||||||
const bool allow_nonconst_timezone_arguments;
|
const bool allow_nonconst_timezone_arguments;
|
||||||
|
const bool uniform_snowflake_conversion_functions;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit FunctionSnowflakeToDateTime64(const char * name_, ContextPtr context)
|
static constexpr auto name = "snowflakeToDateTime64";
|
||||||
: name(name_)
|
|
||||||
, allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments)
|
static FunctionPtr create(ContextPtr context)
|
||||||
|
{
|
||||||
|
return std::make_shared<FunctionSnowflakeToDateTime64>(context);
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit FunctionSnowflakeToDateTime64(ContextPtr context)
|
||||||
|
: allow_nonconst_timezone_arguments(context->getSettingsRef().allow_nonconst_timezone_arguments)
|
||||||
|
, uniform_snowflake_conversion_functions(context->getSettingsRef().uniform_snowflake_conversion_functions)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
String getName() const override { return name; }
|
String getName() const override { return name; }
|
||||||
@ -219,6 +266,9 @@ public:
|
|||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
{
|
{
|
||||||
|
if (uniform_snowflake_conversion_functions)
|
||||||
|
throw Exception(ErrorCodes::DEPRECATED_FUNCTION, "Function {} is deprecated, to enable it disable setting 'uniform_snowflake_conversion_functions'", getName());
|
||||||
|
|
||||||
const auto & src = arguments[0];
|
const auto & src = arguments[0];
|
||||||
const auto & src_column = *src.column;
|
const auto & src_column = *src.column;
|
||||||
|
|
||||||
@ -246,27 +296,12 @@ public:
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
REGISTER_FUNCTION(DateTimeToSnowflake)
|
REGISTER_FUNCTION(LegacySnowflakeConversion)
|
||||||
{
|
{
|
||||||
factory.registerFunction("dateTimeToSnowflake",
|
factory.registerFunction<FunctionSnowflakeToDateTime>();
|
||||||
[](ContextPtr){ return std::make_shared<FunctionDateTimeToSnowflake>("dateTimeToSnowflake"); });
|
factory.registerFunction<FunctionSnowflakeToDateTime64>();
|
||||||
}
|
factory.registerFunction<FunctionDateTimeToSnowflake>();
|
||||||
|
factory.registerFunction<FunctionDateTime64ToSnowflake>();
|
||||||
REGISTER_FUNCTION(DateTime64ToSnowflake)
|
|
||||||
{
|
|
||||||
factory.registerFunction("dateTime64ToSnowflake",
|
|
||||||
[](ContextPtr){ return std::make_shared<FunctionDateTime64ToSnowflake>("dateTime64ToSnowflake"); });
|
|
||||||
}
|
|
||||||
|
|
||||||
REGISTER_FUNCTION(SnowflakeToDateTime)
|
|
||||||
{
|
|
||||||
factory.registerFunction("snowflakeToDateTime",
|
|
||||||
[](ContextPtr context){ return std::make_shared<FunctionSnowflakeToDateTime>("snowflakeToDateTime", context); });
|
|
||||||
}
|
|
||||||
REGISTER_FUNCTION(SnowflakeToDateTime64)
|
|
||||||
{
|
|
||||||
factory.registerFunction("snowflakeToDateTime64",
|
|
||||||
[](ContextPtr context){ return std::make_shared<FunctionSnowflakeToDateTime64>("snowflakeToDateTime64", context); });
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
217
src/Functions/snowflakeIDToDateTime.cpp
Normal file
217
src/Functions/snowflakeIDToDateTime.cpp
Normal file
@ -0,0 +1,217 @@
|
|||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
#include <Functions/extractTimeZoneFromFunctionArguments.h>
|
||||||
|
#include <Functions/IFunction.h>
|
||||||
|
#include <Functions/FunctionHelpers.h>
|
||||||
|
#include <DataTypes/DataTypeDateTime64.h>
|
||||||
|
#include <DataTypes/DataTypesDecimal.h>
|
||||||
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
|
#include <Columns/ColumnConst.h>
|
||||||
|
#include <Columns/ColumnsDateTime.h>
|
||||||
|
#include <Columns/ColumnsNumber.h>
|
||||||
|
#include <Core/DecimalFunctions.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
|
extern const int UNKNOWN_FUNCTION;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
/// See generateSnowflakeID.cpp
|
||||||
|
constexpr int time_shift = 22;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
class FunctionSnowflakeIDToDateTime : public IFunction
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
const bool uniform_snowflake_conversion_functions;
|
||||||
|
const bool allow_nonconst_timezone_arguments;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static constexpr auto name = "snowflakeIDToDateTime";
|
||||||
|
|
||||||
|
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionSnowflakeIDToDateTime>(context); }
|
||||||
|
explicit FunctionSnowflakeIDToDateTime(ContextPtr context)
|
||||||
|
: uniform_snowflake_conversion_functions(context->getSettingsRef().uniform_snowflake_conversion_functions)
|
||||||
|
, allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments)
|
||||||
|
{}
|
||||||
|
|
||||||
|
String getName() const override { return name; }
|
||||||
|
size_t getNumberOfArguments() const override { return 0; }
|
||||||
|
bool isVariadic() const override { return true; }
|
||||||
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||||
|
|
||||||
|
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||||
|
{
|
||||||
|
FunctionArgumentDescriptors args{
|
||||||
|
{"value", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isUInt64), nullptr, "UInt64"}
|
||||||
|
};
|
||||||
|
FunctionArgumentDescriptors optional_args{
|
||||||
|
{"epoch", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isNativeUInt), isColumnConst, "UInt*"},
|
||||||
|
{"time_zone", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"}
|
||||||
|
};
|
||||||
|
validateFunctionArgumentTypes(*this, arguments, args, optional_args);
|
||||||
|
|
||||||
|
String timezone;
|
||||||
|
if (arguments.size() == 3)
|
||||||
|
timezone = extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, allow_nonconst_timezone_arguments);
|
||||||
|
|
||||||
|
return std::make_shared<DataTypeDateTime>(timezone);
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
|
{
|
||||||
|
if (!uniform_snowflake_conversion_functions)
|
||||||
|
throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "To use function {}, setting 'uniform_snowflake_conversion_functions' must be enabled", getName());
|
||||||
|
|
||||||
|
const auto & col_src = *arguments[0].column;
|
||||||
|
|
||||||
|
size_t epoch = 0;
|
||||||
|
if (arguments.size() >= 2 && input_rows_count != 0)
|
||||||
|
{
|
||||||
|
const auto & col_epoch = *arguments[1].column;
|
||||||
|
epoch = col_epoch.getUInt(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto col_res = ColumnDateTime::create(input_rows_count);
|
||||||
|
auto & res_data = col_res->getData();
|
||||||
|
|
||||||
|
if (const auto * col_src_non_const = typeid_cast<const ColumnUInt64 *>(&col_src))
|
||||||
|
{
|
||||||
|
const auto & src_data = col_src_non_const->getData();
|
||||||
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
|
res_data[i] = static_cast<UInt32>(((src_data[i] >> time_shift) + epoch) / 1000);
|
||||||
|
}
|
||||||
|
else if (const auto * col_src_const = typeid_cast<const ColumnConst *>(&col_src))
|
||||||
|
{
|
||||||
|
UInt64 src_val = col_src_const->getValue<UInt64>();
|
||||||
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
|
res_data[i] = static_cast<UInt32>(((src_val >> time_shift) + epoch) / 1000);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal argument for function {}", name);
|
||||||
|
|
||||||
|
return col_res;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class FunctionSnowflakeIDToDateTime64 : public IFunction
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
const bool uniform_snowflake_conversion_functions;
|
||||||
|
const bool allow_nonconst_timezone_arguments;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static constexpr auto name = "snowflakeIDToDateTime64";
|
||||||
|
|
||||||
|
static FunctionPtr create(ContextPtr context) { return std::make_shared<FunctionSnowflakeIDToDateTime64>(context); }
|
||||||
|
explicit FunctionSnowflakeIDToDateTime64(ContextPtr context)
|
||||||
|
: uniform_snowflake_conversion_functions(context->getSettingsRef().uniform_snowflake_conversion_functions)
|
||||||
|
, allow_nonconst_timezone_arguments(context->getSettings().allow_nonconst_timezone_arguments)
|
||||||
|
{}
|
||||||
|
|
||||||
|
String getName() const override { return name; }
|
||||||
|
size_t getNumberOfArguments() const override { return 0; }
|
||||||
|
bool isVariadic() const override { return true; }
|
||||||
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
|
||||||
|
|
||||||
|
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
|
||||||
|
{
|
||||||
|
FunctionArgumentDescriptors args{
|
||||||
|
{"value", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isUInt64), nullptr, "UInt64"}
|
||||||
|
};
|
||||||
|
FunctionArgumentDescriptors optional_args{
|
||||||
|
{"epoch", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isNativeUInt), isColumnConst, "UInt*"},
|
||||||
|
{"time_zone", static_cast<FunctionArgumentDescriptor::TypeValidator>(&isString), nullptr, "String"}
|
||||||
|
};
|
||||||
|
validateFunctionArgumentTypes(*this, arguments, args, optional_args);
|
||||||
|
|
||||||
|
String timezone;
|
||||||
|
if (arguments.size() == 3)
|
||||||
|
timezone = extractTimeZoneNameFromFunctionArguments(arguments, 2, 0, allow_nonconst_timezone_arguments);
|
||||||
|
|
||||||
|
return std::make_shared<DataTypeDateTime64>(3, timezone);
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
|
||||||
|
{
|
||||||
|
if (!uniform_snowflake_conversion_functions)
|
||||||
|
throw Exception(ErrorCodes::UNKNOWN_FUNCTION, "To use function {}, setting 'uniform_snowflake_conversion_functions' must be enabled", getName());
|
||||||
|
|
||||||
|
const auto & col_src = *arguments[0].column;
|
||||||
|
|
||||||
|
size_t epoch = 0;
|
||||||
|
if (arguments.size() >= 2 && input_rows_count != 0)
|
||||||
|
{
|
||||||
|
const auto & col_epoch = *arguments[1].column;
|
||||||
|
epoch = col_epoch.getUInt(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto col_res = ColumnDateTime64::create(input_rows_count, 3);
|
||||||
|
auto & res_data = col_res->getData();
|
||||||
|
|
||||||
|
if (const auto * col_src_non_const = typeid_cast<const ColumnUInt64 *>(&col_src))
|
||||||
|
{
|
||||||
|
const auto & src_data = col_src_non_const->getData();
|
||||||
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
|
res_data[i] = (src_data[i] >> time_shift) + epoch;
|
||||||
|
}
|
||||||
|
else if (const auto * col_src_const = typeid_cast<const ColumnConst *>(&col_src))
|
||||||
|
{
|
||||||
|
UInt64 src_val = col_src_const->getValue<UInt64>();
|
||||||
|
for (size_t i = 0; i < input_rows_count; ++i)
|
||||||
|
res_data[i] = (src_val >> time_shift) + epoch;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal argument for function {}", name);
|
||||||
|
|
||||||
|
return col_res;
|
||||||
|
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
REGISTER_FUNCTION(SnowflakeIDToDateTime)
|
||||||
|
{
|
||||||
|
{
|
||||||
|
FunctionDocumentation::Description description = R"(Returns the timestamp component of a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as a value of type [DateTime](../data-types/datetime.md).)";
|
||||||
|
FunctionDocumentation::Syntax syntax = "snowflakeIDToDateTime(value[, epoch[, time_zone]])";
|
||||||
|
FunctionDocumentation::Arguments arguments = {
|
||||||
|
{"value", "Snowflake ID. [UInt64](../data-types/int-uint.md)"},
|
||||||
|
{"epoch", "Epoch of the Snowflake ID in milliseconds since 1970-01-01. Defaults to 0 (1970-01-01). For the Twitter/X epoch (2015-01-01), provide 1288834974657. Optional. [UInt*](../data-types/int-uint.md)"},
|
||||||
|
{"time_zone", "[Timezone](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../data-types/string.md)"}
|
||||||
|
};
|
||||||
|
FunctionDocumentation::ReturnedValue returned_value = "The timestamp component of `value` as a [DateTime](../data-types/datetime.md) value.";
|
||||||
|
FunctionDocumentation::Examples examples = {{"simple", "SELECT snowflakeIDToDateTime(7204436857747984384)", "2024-06-06 10:59:58"}};
|
||||||
|
FunctionDocumentation::Categories categories = {"Snowflake ID"};
|
||||||
|
|
||||||
|
factory.registerFunction<FunctionSnowflakeIDToDateTime>({description, syntax, arguments, returned_value, examples, categories});
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
FunctionDocumentation::Description description = R"(Returns the timestamp component of a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as a value of type [DateTime64](../data-types/datetime64.md).)";
|
||||||
|
FunctionDocumentation::Syntax syntax = "snowflakeIDToDateTime64(value[, epoch[, time_zone]])";
|
||||||
|
FunctionDocumentation::Arguments arguments = {
|
||||||
|
{"value", "Snowflake ID. [UInt64](../data-types/int-uint.md)"},
|
||||||
|
{"epoch", "Epoch of the Snowflake ID in milliseconds since 1970-01-01. Defaults to 0 (1970-01-01). For the Twitter/X epoch (2015-01-01), provide 1288834974657. Optional. [UInt*](../data-types/int-uint.md)"},
|
||||||
|
{"time_zone", "[Timezone](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../data-types/string.md)"}
|
||||||
|
};
|
||||||
|
FunctionDocumentation::ReturnedValue returned_value = "The timestamp component of `value` as a [DateTime64](../data-types/datetime64.md) with scale = 3, i.e. millisecond precision.";
|
||||||
|
FunctionDocumentation::Examples examples = {{"simple", "SELECT snowflakeIDToDateTime64(7204436857747984384)", "2024-06-06 10:59:58"}};
|
||||||
|
FunctionDocumentation::Categories categories = {"Snowflake ID"};
|
||||||
|
|
||||||
|
factory.registerFunction<FunctionSnowflakeIDToDateTime64>({description, syntax, arguments, returned_value, examples, categories});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -30,10 +30,6 @@
|
|||||||
#include <base/sleep.h>
|
#include <base/sleep.h>
|
||||||
|
|
||||||
|
|
||||||
#ifdef ADDRESS_SANITIZER
|
|
||||||
#include <sanitizer/lsan_interface.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
{
|
{
|
||||||
extern const Event S3WriteRequestsErrors;
|
extern const Event S3WriteRequestsErrors;
|
||||||
@ -880,14 +876,7 @@ void ClientCacheRegistry::clearCacheForAll()
|
|||||||
ClientFactory::ClientFactory()
|
ClientFactory::ClientFactory()
|
||||||
{
|
{
|
||||||
aws_options = Aws::SDKOptions{};
|
aws_options = Aws::SDKOptions{};
|
||||||
{
|
Aws::InitAPI(aws_options);
|
||||||
#ifdef ADDRESS_SANITIZER
|
|
||||||
/// Leak sanitizer (part of address sanitizer) thinks that memory in OpenSSL (called by AWS SDK) is allocated but not
|
|
||||||
/// released. Actually, the memory is released at the end of the program (ClientFactory is a singleton, see the dtor).
|
|
||||||
__lsan::ScopedDisabler lsan_disabler;
|
|
||||||
#endif
|
|
||||||
Aws::InitAPI(aws_options);
|
|
||||||
}
|
|
||||||
Aws::Utils::Logging::InitializeAWSLogging(std::make_shared<AWSLogger>(false));
|
Aws::Utils::Logging::InitializeAWSLogging(std::make_shared<AWSLogger>(false));
|
||||||
Aws::Http::SetHttpClientFactory(std::make_shared<PocoHTTPClientFactory>());
|
Aws::Http::SetHttpClientFactory(std::make_shared<PocoHTTPClientFactory>());
|
||||||
}
|
}
|
||||||
|
@ -535,7 +535,7 @@ void PocoHTTPClient::makeRequestInternalImpl(
|
|||||||
const static std::string_view needle = "<Error>";
|
const static std::string_view needle = "<Error>";
|
||||||
if (auto it = std::search(response_string.begin(), response_string.end(), std::default_searcher(needle.begin(), needle.end())); it != response_string.end())
|
if (auto it = std::search(response_string.begin(), response_string.end(), std::default_searcher(needle.begin(), needle.end())); it != response_string.end())
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, "Response for request contain <Error> tag in body, settings internal server error (500 code)");
|
LOG_WARNING(log, "Response for the request contains an <Error> tag in the body, will treat it as an internal server error (code 500)");
|
||||||
response->SetResponseCode(Aws::Http::HttpResponseCode::INTERNAL_SERVER_ERROR);
|
response->SetResponseCode(Aws::Http::HttpResponseCode::INTERNAL_SERVER_ERROR);
|
||||||
|
|
||||||
addMetric(request, S3MetricType::Errors);
|
addMetric(request, S3MetricType::Errors);
|
||||||
|
@ -118,7 +118,7 @@ namespace
|
|||||||
/// Checks if the current user has enough access rights granted with grant option to grant or revoke specified access rights.
|
/// Checks if the current user has enough access rights granted with grant option to grant or revoke specified access rights.
|
||||||
void checkGrantOption(
|
void checkGrantOption(
|
||||||
const AccessControl & access_control,
|
const AccessControl & access_control,
|
||||||
const ContextAccess & current_user_access,
|
const ContextAccessWrapper & current_user_access,
|
||||||
const std::vector<UUID> & grantees_from_query,
|
const std::vector<UUID> & grantees_from_query,
|
||||||
bool & need_check_grantees_are_allowed,
|
bool & need_check_grantees_are_allowed,
|
||||||
const AccessRightsElements & elements_to_grant,
|
const AccessRightsElements & elements_to_grant,
|
||||||
@ -200,7 +200,7 @@ namespace
|
|||||||
/// Checks if the current user has enough roles granted with admin option to grant or revoke specified roles.
|
/// Checks if the current user has enough roles granted with admin option to grant or revoke specified roles.
|
||||||
void checkAdminOption(
|
void checkAdminOption(
|
||||||
const AccessControl & access_control,
|
const AccessControl & access_control,
|
||||||
const ContextAccess & current_user_access,
|
const ContextAccessWrapper & current_user_access,
|
||||||
const std::vector<UUID> & grantees_from_query,
|
const std::vector<UUID> & grantees_from_query,
|
||||||
bool & need_check_grantees_are_allowed,
|
bool & need_check_grantees_are_allowed,
|
||||||
const std::vector<UUID> & roles_to_grant,
|
const std::vector<UUID> & roles_to_grant,
|
||||||
@ -277,7 +277,7 @@ namespace
|
|||||||
/// This function is less accurate than checkAdminOption() because it cannot use any information about
|
/// This function is less accurate than checkAdminOption() because it cannot use any information about
|
||||||
/// granted roles the grantees currently have (due to those grantees are located on multiple nodes,
|
/// granted roles the grantees currently have (due to those grantees are located on multiple nodes,
|
||||||
/// we just don't have the full information about them).
|
/// we just don't have the full information about them).
|
||||||
void checkAdminOptionForExecutingOnCluster(const ContextAccess & current_user_access,
|
void checkAdminOptionForExecutingOnCluster(const ContextAccessWrapper & current_user_access,
|
||||||
const std::vector<UUID> roles_to_grant,
|
const std::vector<UUID> roles_to_grant,
|
||||||
const RolesOrUsersSet & roles_to_revoke)
|
const RolesOrUsersSet & roles_to_revoke)
|
||||||
{
|
{
|
||||||
@ -376,7 +376,7 @@ namespace
|
|||||||
/// Calculates all available rights to grant with current user intersection.
|
/// Calculates all available rights to grant with current user intersection.
|
||||||
void calculateCurrentGrantRightsWithIntersection(
|
void calculateCurrentGrantRightsWithIntersection(
|
||||||
AccessRights & rights,
|
AccessRights & rights,
|
||||||
std::shared_ptr<const ContextAccess> current_user_access,
|
std::shared_ptr<const ContextAccessWrapper> current_user_access,
|
||||||
const AccessRightsElements & elements_to_grant)
|
const AccessRightsElements & elements_to_grant)
|
||||||
{
|
{
|
||||||
AccessRightsElements current_user_grantable_elements;
|
AccessRightsElements current_user_grantable_elements;
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
#include <Common/FieldVisitorsAccurateComparison.h>
|
#include <Common/FieldVisitorsAccurateComparison.h>
|
||||||
#include <Common/checkStackSize.h>
|
#include <Common/checkStackSize.h>
|
||||||
|
#include <Common/assert_cast.h>
|
||||||
|
|
||||||
#include <Core/ColumnNumbers.h>
|
#include <Core/ColumnNumbers.h>
|
||||||
#include <Core/ColumnWithTypeAndName.h>
|
#include <Core/ColumnWithTypeAndName.h>
|
||||||
@ -102,7 +103,7 @@ static size_t getTypeDepth(const DataTypePtr & type)
|
|||||||
/// 33.33 in the set is converted to 33.3, but it is not equal to 33.3 in the column, so the result should still be empty.
|
/// 33.33 in the set is converted to 33.3, but it is not equal to 33.3 in the column, so the result should still be empty.
|
||||||
/// We can not include values that don't represent any possible value from the type of filtered column to the set.
|
/// We can not include values that don't represent any possible value from the type of filtered column to the set.
|
||||||
template<typename Collection>
|
template<typename Collection>
|
||||||
static Block createBlockFromCollection(const Collection & collection, const DataTypes & types, bool transform_null_in)
|
static Block createBlockFromCollection(const Collection & collection, const DataTypes & value_types, const DataTypes & types, bool transform_null_in)
|
||||||
{
|
{
|
||||||
size_t columns_num = types.size();
|
size_t columns_num = types.size();
|
||||||
MutableColumns columns(columns_num);
|
MutableColumns columns(columns_num);
|
||||||
@ -113,11 +114,12 @@ static Block createBlockFromCollection(const Collection & collection, const Data
|
|||||||
}
|
}
|
||||||
|
|
||||||
Row tuple_values;
|
Row tuple_values;
|
||||||
for (const auto & value : collection)
|
for (size_t collection_index = 0; collection_index < collection.size(); ++collection_index)
|
||||||
{
|
{
|
||||||
|
const auto& value = collection[collection_index];
|
||||||
if (columns_num == 1)
|
if (columns_num == 1)
|
||||||
{
|
{
|
||||||
auto field = convertFieldToTypeStrict(value, *types[0]);
|
auto field = convertFieldToTypeStrict(value, *value_types[collection_index], *types[0]);
|
||||||
bool need_insert_null = transform_null_in && types[0]->isNullable();
|
bool need_insert_null = transform_null_in && types[0]->isNullable();
|
||||||
if (field && (!field->isNull() || need_insert_null))
|
if (field && (!field->isNull() || need_insert_null))
|
||||||
columns[0]->insert(*field);
|
columns[0]->insert(*field);
|
||||||
@ -130,7 +132,6 @@ static Block createBlockFromCollection(const Collection & collection, const Data
|
|||||||
|
|
||||||
const auto & tuple = value.template get<const Tuple &>();
|
const auto & tuple = value.template get<const Tuple &>();
|
||||||
size_t tuple_size = tuple.size();
|
size_t tuple_size = tuple.size();
|
||||||
|
|
||||||
if (tuple_size != columns_num)
|
if (tuple_size != columns_num)
|
||||||
throw Exception(ErrorCodes::INCORRECT_ELEMENT_OF_SET, "Incorrect size of tuple in set: {} instead of {}",
|
throw Exception(ErrorCodes::INCORRECT_ELEMENT_OF_SET, "Incorrect size of tuple in set: {} instead of {}",
|
||||||
tuple_size, columns_num);
|
tuple_size, columns_num);
|
||||||
@ -138,10 +139,13 @@ static Block createBlockFromCollection(const Collection & collection, const Data
|
|||||||
if (tuple_values.empty())
|
if (tuple_values.empty())
|
||||||
tuple_values.resize(tuple_size);
|
tuple_values.resize(tuple_size);
|
||||||
|
|
||||||
|
const DataTypePtr & value_type = value_types[collection_index];
|
||||||
|
const DataTypes & tuple_value_type = typeid_cast<const DataTypeTuple *>(value_type.get())->getElements();
|
||||||
|
|
||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
for (; i < tuple_size; ++i)
|
for (; i < tuple_size; ++i)
|
||||||
{
|
{
|
||||||
auto converted_field = convertFieldToTypeStrict(tuple[i], *types[i]);
|
auto converted_field = convertFieldToTypeStrict(tuple[i], *tuple_value_type[i], *types[i]);
|
||||||
if (!converted_field)
|
if (!converted_field)
|
||||||
break;
|
break;
|
||||||
tuple_values[i] = std::move(*converted_field);
|
tuple_values[i] = std::move(*converted_field);
|
||||||
@ -317,16 +321,25 @@ Block createBlockForSet(
|
|||||||
if (left_type_depth == right_type_depth)
|
if (left_type_depth == right_type_depth)
|
||||||
{
|
{
|
||||||
Array array{right_arg_value};
|
Array array{right_arg_value};
|
||||||
block = createBlockFromCollection(array, set_element_types, tranform_null_in);
|
DataTypes value_types{right_arg_type};
|
||||||
|
block = createBlockFromCollection(array, value_types, set_element_types, tranform_null_in);
|
||||||
}
|
}
|
||||||
/// 1 in (1, 2); (1, 2) in ((1, 2), (3, 4)); etc.
|
/// 1 in (1, 2); (1, 2) in ((1, 2), (3, 4)); etc.
|
||||||
else if (left_type_depth + 1 == right_type_depth)
|
else if (left_type_depth + 1 == right_type_depth)
|
||||||
{
|
{
|
||||||
auto type_index = right_arg_type->getTypeId();
|
auto type_index = right_arg_type->getTypeId();
|
||||||
if (type_index == TypeIndex::Tuple)
|
if (type_index == TypeIndex::Tuple)
|
||||||
block = createBlockFromCollection(right_arg_value.get<const Tuple &>(), set_element_types, tranform_null_in);
|
{
|
||||||
|
const DataTypes & value_types = assert_cast<const DataTypeTuple *>(right_arg_type.get())->getElements();
|
||||||
|
block = createBlockFromCollection(right_arg_value.get<const Tuple &>(), value_types, set_element_types, tranform_null_in);
|
||||||
|
}
|
||||||
else if (type_index == TypeIndex::Array)
|
else if (type_index == TypeIndex::Array)
|
||||||
block = createBlockFromCollection(right_arg_value.get<const Array &>(), set_element_types, tranform_null_in);
|
{
|
||||||
|
const auto* right_arg_array_type = assert_cast<const DataTypeArray *>(right_arg_type.get());
|
||||||
|
size_t right_arg_array_size = right_arg_value.get<const Array &>().size();
|
||||||
|
DataTypes value_types(right_arg_array_size, right_arg_array_type->getNestedType());
|
||||||
|
block = createBlockFromCollection(right_arg_value.get<const Array &>(), value_types, set_element_types, tranform_null_in);
|
||||||
|
}
|
||||||
else
|
else
|
||||||
throw_unsupported_type(right_arg_type);
|
throw_unsupported_type(right_arg_type);
|
||||||
}
|
}
|
||||||
|
@ -281,6 +281,8 @@ struct ContextSharedPart : boost::noncopyable
|
|||||||
String default_profile_name; /// Default profile name used for default values.
|
String default_profile_name; /// Default profile name used for default values.
|
||||||
String system_profile_name; /// Profile used by system processes
|
String system_profile_name; /// Profile used by system processes
|
||||||
String buffer_profile_name; /// Profile used by Buffer engine for flushing to the underlying
|
String buffer_profile_name; /// Profile used by Buffer engine for flushing to the underlying
|
||||||
|
String merge_workload TSA_GUARDED_BY(mutex); /// Workload setting value that is used by all merges
|
||||||
|
String mutation_workload TSA_GUARDED_BY(mutex); /// Workload setting value that is used by all mutations
|
||||||
std::unique_ptr<AccessControl> access_control TSA_GUARDED_BY(mutex);
|
std::unique_ptr<AccessControl> access_control TSA_GUARDED_BY(mutex);
|
||||||
mutable OnceFlag resource_manager_initialized;
|
mutable OnceFlag resource_manager_initialized;
|
||||||
mutable ResourceManagerPtr resource_manager;
|
mutable ResourceManagerPtr resource_manager;
|
||||||
@ -833,6 +835,7 @@ ContextMutablePtr Context::createGlobal(ContextSharedPart * shared_part)
|
|||||||
auto res = std::shared_ptr<Context>(new Context);
|
auto res = std::shared_ptr<Context>(new Context);
|
||||||
res->shared = shared_part;
|
res->shared = shared_part;
|
||||||
res->query_access_info = std::make_shared<QueryAccessInfo>();
|
res->query_access_info = std::make_shared<QueryAccessInfo>();
|
||||||
|
res->query_privileges_info = std::make_shared<QueryPrivilegesInfo>();
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1425,7 +1428,7 @@ void Context::checkAccess(const AccessFlags & flags, const StorageID & table_id,
|
|||||||
void Context::checkAccess(const AccessRightsElement & element) const { checkAccessImpl(element); }
|
void Context::checkAccess(const AccessRightsElement & element) const { checkAccessImpl(element); }
|
||||||
void Context::checkAccess(const AccessRightsElements & elements) const { checkAccessImpl(elements); }
|
void Context::checkAccess(const AccessRightsElements & elements) const { checkAccessImpl(elements); }
|
||||||
|
|
||||||
std::shared_ptr<const ContextAccess> Context::getAccess() const
|
std::shared_ptr<const ContextAccessWrapper> Context::getAccess() const
|
||||||
{
|
{
|
||||||
/// A helper function to collect parameters for calculating access rights, called with Context::getLocalSharedLock() acquired.
|
/// A helper function to collect parameters for calculating access rights, called with Context::getLocalSharedLock() acquired.
|
||||||
auto get_params = [this]()
|
auto get_params = [this]()
|
||||||
@ -1442,14 +1445,14 @@ std::shared_ptr<const ContextAccess> Context::getAccess() const
|
|||||||
{
|
{
|
||||||
SharedLockGuard lock(mutex);
|
SharedLockGuard lock(mutex);
|
||||||
if (access && !need_recalculate_access)
|
if (access && !need_recalculate_access)
|
||||||
return access; /// No need to recalculate access rights.
|
return std::make_shared<const ContextAccessWrapper>(access, shared_from_this()); /// No need to recalculate access rights.
|
||||||
|
|
||||||
params.emplace(get_params());
|
params.emplace(get_params());
|
||||||
|
|
||||||
if (access && (access->getParams() == *params))
|
if (access && (access->getParams() == *params))
|
||||||
{
|
{
|
||||||
need_recalculate_access = false;
|
need_recalculate_access = false;
|
||||||
return access; /// No need to recalculate access rights.
|
return std::make_shared<const ContextAccessWrapper>(access, shared_from_this()); /// No need to recalculate access rights.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1469,7 +1472,7 @@ std::shared_ptr<const ContextAccess> Context::getAccess() const
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return res;
|
return std::make_shared<const ContextAccessWrapper>(res, shared_from_this());
|
||||||
}
|
}
|
||||||
|
|
||||||
RowPolicyFilterPtr Context::getRowPolicyFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type) const
|
RowPolicyFilterPtr Context::getRowPolicyFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type) const
|
||||||
@ -1561,11 +1564,36 @@ ResourceManagerPtr Context::getResourceManager() const
|
|||||||
ClassifierPtr Context::getWorkloadClassifier() const
|
ClassifierPtr Context::getWorkloadClassifier() const
|
||||||
{
|
{
|
||||||
std::lock_guard lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
|
// NOTE: Workload cannot be changed after query start, and getWorkloadClassifier() should not be called before proper `workload` is set
|
||||||
if (!classifier)
|
if (!classifier)
|
||||||
classifier = getResourceManager()->acquire(getSettingsRef().workload);
|
classifier = getResourceManager()->acquire(getSettingsRef().workload);
|
||||||
return classifier;
|
return classifier;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
String Context::getMergeWorkload() const
|
||||||
|
{
|
||||||
|
SharedLockGuard lock(shared->mutex);
|
||||||
|
return shared->merge_workload;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Context::setMergeWorkload(const String & value)
|
||||||
|
{
|
||||||
|
std::lock_guard lock(shared->mutex);
|
||||||
|
shared->merge_workload = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
String Context::getMutationWorkload() const
|
||||||
|
{
|
||||||
|
SharedLockGuard lock(shared->mutex);
|
||||||
|
return shared->mutation_workload;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Context::setMutationWorkload(const String & value)
|
||||||
|
{
|
||||||
|
std::lock_guard lock(shared->mutex);
|
||||||
|
shared->mutation_workload = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Scalars Context::getScalars() const
|
Scalars Context::getScalars() const
|
||||||
{
|
{
|
||||||
@ -1830,6 +1858,15 @@ void Context::addQueryFactoriesInfo(QueryLogFactories factory_type, const String
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Context::addQueryPrivilegesInfo(const String & privilege, bool granted) const
|
||||||
|
{
|
||||||
|
std::lock_guard lock(query_privileges_info->mutex);
|
||||||
|
if (granted)
|
||||||
|
query_privileges_info->used_privileges.emplace(privilege);
|
||||||
|
else
|
||||||
|
query_privileges_info->missing_privileges.emplace(privilege);
|
||||||
|
}
|
||||||
|
|
||||||
static bool findIdentifier(const ASTFunction * function)
|
static bool findIdentifier(const ASTFunction * function)
|
||||||
{
|
{
|
||||||
if (!function || !function->arguments)
|
if (!function || !function->arguments)
|
||||||
@ -2511,6 +2548,21 @@ void Context::makeQueryContext()
|
|||||||
local_read_query_throttler.reset();
|
local_read_query_throttler.reset();
|
||||||
local_write_query_throttler.reset();
|
local_write_query_throttler.reset();
|
||||||
backups_query_throttler.reset();
|
backups_query_throttler.reset();
|
||||||
|
query_privileges_info = std::make_shared<QueryPrivilegesInfo>(*query_privileges_info);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Context::makeQueryContextForMerge(const MergeTreeSettings & merge_tree_settings)
|
||||||
|
{
|
||||||
|
makeQueryContext();
|
||||||
|
classifier.reset(); // It is assumed that there are no active queries running using this classifier, otherwise this will lead to crashes
|
||||||
|
settings.workload = merge_tree_settings.merge_workload.value.empty() ? getMergeWorkload() : merge_tree_settings.merge_workload;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Context::makeQueryContextForMutate(const MergeTreeSettings & merge_tree_settings)
|
||||||
|
{
|
||||||
|
makeQueryContext();
|
||||||
|
classifier.reset(); // It is assumed that there are no active queries running using this classifier, otherwise this will lead to crashes
|
||||||
|
settings.workload = merge_tree_settings.mutation_workload.value.empty() ? getMutationWorkload() : merge_tree_settings.mutation_workload;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Context::makeSessionContext()
|
void Context::makeSessionContext()
|
||||||
|
@ -50,6 +50,7 @@ class ASTSelectQuery;
|
|||||||
|
|
||||||
struct ContextSharedPart;
|
struct ContextSharedPart;
|
||||||
class ContextAccess;
|
class ContextAccess;
|
||||||
|
class ContextAccessWrapper;
|
||||||
struct User;
|
struct User;
|
||||||
using UserPtr = std::shared_ptr<const User>;
|
using UserPtr = std::shared_ptr<const User>;
|
||||||
struct SettingsProfilesInfo;
|
struct SettingsProfilesInfo;
|
||||||
@ -405,9 +406,31 @@ public:
|
|||||||
mutable std::mutex mutex;
|
mutable std::mutex mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct QueryPrivilegesInfo
|
||||||
|
{
|
||||||
|
QueryPrivilegesInfo() = default;
|
||||||
|
|
||||||
|
QueryPrivilegesInfo(const QueryPrivilegesInfo & rhs)
|
||||||
|
{
|
||||||
|
std::lock_guard<std::mutex> lock(rhs.mutex);
|
||||||
|
used_privileges = rhs.used_privileges;
|
||||||
|
missing_privileges = rhs.missing_privileges;
|
||||||
|
}
|
||||||
|
|
||||||
|
QueryPrivilegesInfo(QueryPrivilegesInfo && rhs) = delete;
|
||||||
|
|
||||||
|
std::unordered_set<std::string> used_privileges TSA_GUARDED_BY(mutex);
|
||||||
|
std::unordered_set<std::string> missing_privileges TSA_GUARDED_BY(mutex);
|
||||||
|
|
||||||
|
mutable std::mutex mutex;
|
||||||
|
};
|
||||||
|
|
||||||
|
using QueryPrivilegesInfoPtr = std::shared_ptr<QueryPrivilegesInfo>;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
/// Needs to be changed while having const context in factories methods
|
/// Needs to be changed while having const context in factories methods
|
||||||
mutable QueryFactoriesInfo query_factories_info;
|
mutable QueryFactoriesInfo query_factories_info;
|
||||||
|
QueryPrivilegesInfoPtr query_privileges_info;
|
||||||
/// Query metrics for reading data asynchronously with IAsynchronousReader.
|
/// Query metrics for reading data asynchronously with IAsynchronousReader.
|
||||||
mutable std::shared_ptr<AsyncReadCounters> async_read_counters;
|
mutable std::shared_ptr<AsyncReadCounters> async_read_counters;
|
||||||
|
|
||||||
@ -616,7 +639,7 @@ public:
|
|||||||
void checkAccess(const AccessRightsElement & element) const;
|
void checkAccess(const AccessRightsElement & element) const;
|
||||||
void checkAccess(const AccessRightsElements & elements) const;
|
void checkAccess(const AccessRightsElements & elements) const;
|
||||||
|
|
||||||
std::shared_ptr<const ContextAccess> getAccess() const;
|
std::shared_ptr<const ContextAccessWrapper> getAccess() const;
|
||||||
|
|
||||||
RowPolicyFilterPtr getRowPolicyFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type) const;
|
RowPolicyFilterPtr getRowPolicyFilter(const String & database, const String & table_name, RowPolicyFilterType filter_type) const;
|
||||||
|
|
||||||
@ -626,6 +649,10 @@ public:
|
|||||||
/// Resource management related
|
/// Resource management related
|
||||||
ResourceManagerPtr getResourceManager() const;
|
ResourceManagerPtr getResourceManager() const;
|
||||||
ClassifierPtr getWorkloadClassifier() const;
|
ClassifierPtr getWorkloadClassifier() const;
|
||||||
|
String getMergeWorkload() const;
|
||||||
|
void setMergeWorkload(const String & value);
|
||||||
|
String getMutationWorkload() const;
|
||||||
|
void setMutationWorkload(const String & value);
|
||||||
|
|
||||||
/// We have to copy external tables inside executeQuery() to track limits. Therefore, set callback for it. Must set once.
|
/// We have to copy external tables inside executeQuery() to track limits. Therefore, set callback for it. Must set once.
|
||||||
void setExternalTablesInitializer(ExternalTablesInitializer && initializer);
|
void setExternalTablesInitializer(ExternalTablesInitializer && initializer);
|
||||||
@ -741,6 +768,10 @@ public:
|
|||||||
QueryFactoriesInfo getQueryFactoriesInfo() const;
|
QueryFactoriesInfo getQueryFactoriesInfo() const;
|
||||||
void addQueryFactoriesInfo(QueryLogFactories factory_type, const String & created_object) const;
|
void addQueryFactoriesInfo(QueryLogFactories factory_type, const String & created_object) const;
|
||||||
|
|
||||||
|
const QueryPrivilegesInfo & getQueryPrivilegesInfo() const { return *getQueryPrivilegesInfoPtr(); }
|
||||||
|
QueryPrivilegesInfoPtr getQueryPrivilegesInfoPtr() const { return query_privileges_info; }
|
||||||
|
void addQueryPrivilegesInfo(const String & privilege, bool granted) const;
|
||||||
|
|
||||||
/// For table functions s3/file/url/hdfs/input we can use structure from
|
/// For table functions s3/file/url/hdfs/input we can use structure from
|
||||||
/// insertion table depending on select expression.
|
/// insertion table depending on select expression.
|
||||||
StoragePtr executeTableFunction(const ASTPtr & table_expression, const ASTSelectQuery * select_query_hint = nullptr);
|
StoragePtr executeTableFunction(const ASTPtr & table_expression, const ASTSelectQuery * select_query_hint = nullptr);
|
||||||
@ -911,6 +942,8 @@ public:
|
|||||||
void setSessionContext(ContextMutablePtr context_) { session_context = context_; }
|
void setSessionContext(ContextMutablePtr context_) { session_context = context_; }
|
||||||
|
|
||||||
void makeQueryContext();
|
void makeQueryContext();
|
||||||
|
void makeQueryContextForMerge(const MergeTreeSettings & merge_tree_settings);
|
||||||
|
void makeQueryContextForMutate(const MergeTreeSettings & merge_tree_settings);
|
||||||
void makeSessionContext();
|
void makeSessionContext();
|
||||||
void makeGlobalContext();
|
void makeGlobalContext();
|
||||||
|
|
||||||
|
@ -129,6 +129,7 @@ public:
|
|||||||
static constexpr const char * SYSTEM_DATABASE = "system";
|
static constexpr const char * SYSTEM_DATABASE = "system";
|
||||||
static constexpr const char * INFORMATION_SCHEMA = "information_schema";
|
static constexpr const char * INFORMATION_SCHEMA = "information_schema";
|
||||||
static constexpr const char * INFORMATION_SCHEMA_UPPERCASE = "INFORMATION_SCHEMA";
|
static constexpr const char * INFORMATION_SCHEMA_UPPERCASE = "INFORMATION_SCHEMA";
|
||||||
|
static constexpr const char * DEFAULT_DATABASE = "default";
|
||||||
|
|
||||||
/// Returns true if a passed name is one of the predefined databases' names.
|
/// Returns true if a passed name is one of the predefined databases' names.
|
||||||
static bool isPredefinedDatabase(std::string_view database_name);
|
static bool isPredefinedDatabase(std::string_view database_name);
|
||||||
|
@ -88,6 +88,11 @@
|
|||||||
#include <Interpreters/ReplaceQueryParameterVisitor.h>
|
#include <Interpreters/ReplaceQueryParameterVisitor.h>
|
||||||
#include <Parsers/QueryParameterVisitor.h>
|
#include <Parsers/QueryParameterVisitor.h>
|
||||||
|
|
||||||
|
namespace CurrentMetrics
|
||||||
|
{
|
||||||
|
extern const Metric AttachedTable;
|
||||||
|
}
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -113,6 +118,8 @@ namespace ErrorCodes
|
|||||||
extern const int UNKNOWN_STORAGE;
|
extern const int UNKNOWN_STORAGE;
|
||||||
extern const int SYNTAX_ERROR;
|
extern const int SYNTAX_ERROR;
|
||||||
extern const int SUPPORT_IS_DISABLED;
|
extern const int SUPPORT_IS_DISABLED;
|
||||||
|
extern const int TOO_MANY_TABLES;
|
||||||
|
extern const int TOO_MANY_DATABASES;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
@ -138,6 +145,31 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create)
|
|||||||
throw Exception(ErrorCodes::DATABASE_ALREADY_EXISTS, "Database {} already exists.", database_name);
|
throw Exception(ErrorCodes::DATABASE_ALREADY_EXISTS, "Database {} already exists.", database_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto db_num_limit = getContext()->getGlobalContext()->getServerSettings().max_database_num_to_throw;
|
||||||
|
if (db_num_limit > 0)
|
||||||
|
{
|
||||||
|
size_t db_count = DatabaseCatalog::instance().getDatabases().size();
|
||||||
|
std::vector<String> system_databases = {
|
||||||
|
DatabaseCatalog::TEMPORARY_DATABASE,
|
||||||
|
DatabaseCatalog::SYSTEM_DATABASE,
|
||||||
|
DatabaseCatalog::INFORMATION_SCHEMA,
|
||||||
|
DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE,
|
||||||
|
DatabaseCatalog::DEFAULT_DATABASE
|
||||||
|
};
|
||||||
|
|
||||||
|
for (const auto & system_database : system_databases)
|
||||||
|
{
|
||||||
|
if (db_count > 0 && DatabaseCatalog::instance().isDatabaseExist(system_database))
|
||||||
|
db_count--;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (db_count >= db_num_limit)
|
||||||
|
throw Exception(ErrorCodes::TOO_MANY_DATABASES,
|
||||||
|
"Too many databases in the Clickhouse. "
|
||||||
|
"The limit (setting 'max_database_num_to_throw') is set to {}, current number of databases is {}",
|
||||||
|
db_num_limit, db_count);
|
||||||
|
}
|
||||||
|
|
||||||
/// Will write file with database metadata, if needed.
|
/// Will write file with database metadata, if needed.
|
||||||
String database_name_escaped = escapeForFileName(database_name);
|
String database_name_escaped = escapeForFileName(database_name);
|
||||||
fs::path metadata_path = fs::weakly_canonical(getContext()->getPath());
|
fs::path metadata_path = fs::weakly_canonical(getContext()->getPath());
|
||||||
@ -1543,6 +1575,17 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
UInt64 table_num_limit = getContext()->getGlobalContext()->getServerSettings().max_table_num_to_throw;
|
||||||
|
if (table_num_limit > 0 && create.getDatabase() != DatabaseCatalog::SYSTEM_DATABASE)
|
||||||
|
{
|
||||||
|
UInt64 table_count = CurrentMetrics::get(CurrentMetrics::AttachedTable);
|
||||||
|
if (table_count >= table_num_limit)
|
||||||
|
throw Exception(ErrorCodes::TOO_MANY_TABLES,
|
||||||
|
"Too many tables in the Clickhouse. "
|
||||||
|
"The limit (setting 'max_table_num_to_throw') is set to {}, current number of tables is {}",
|
||||||
|
table_num_limit, table_count);
|
||||||
|
}
|
||||||
|
|
||||||
database->createTable(getContext(), create.getTable(), res, query_ptr);
|
database->createTable(getContext(), create.getTable(), res, query_ptr);
|
||||||
|
|
||||||
/// Move table data to the proper place. Wo do not move data earlier to avoid situations
|
/// Move table data to the proper place. Wo do not move data earlier to avoid situations
|
||||||
|
@ -26,7 +26,8 @@
|
|||||||
#include <Processors/Transforms/CountingTransform.h>
|
#include <Processors/Transforms/CountingTransform.h>
|
||||||
#include <Processors/Transforms/ExpressionTransform.h>
|
#include <Processors/Transforms/ExpressionTransform.h>
|
||||||
#include <Processors/Transforms/MaterializingTransform.h>
|
#include <Processors/Transforms/MaterializingTransform.h>
|
||||||
#include <Processors/Transforms/SquashingChunksTransform.h>
|
#include <Processors/Transforms/SquashingTransform.h>
|
||||||
|
#include <Processors/Transforms/PlanSquashingTransform.h>
|
||||||
#include <Processors/Transforms/getSourceFromASTInsertQuery.h>
|
#include <Processors/Transforms/getSourceFromASTInsertQuery.h>
|
||||||
#include <Processors/QueryPlan/QueryPlan.h>
|
#include <Processors/QueryPlan/QueryPlan.h>
|
||||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||||
@ -625,9 +626,15 @@ BlockIO InterpreterInsertQuery::execute()
|
|||||||
{
|
{
|
||||||
bool table_prefers_large_blocks = table->prefersLargeBlocks();
|
bool table_prefers_large_blocks = table->prefersLargeBlocks();
|
||||||
|
|
||||||
|
pipeline.addTransform(std::make_shared<PlanSquashingTransform>(
|
||||||
|
header,
|
||||||
|
table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size,
|
||||||
|
table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL,
|
||||||
|
presink_chains.size()));
|
||||||
|
|
||||||
pipeline.addSimpleTransform([&](const Block & in_header) -> ProcessorPtr
|
pipeline.addSimpleTransform([&](const Block & in_header) -> ProcessorPtr
|
||||||
{
|
{
|
||||||
return std::make_shared<SimpleSquashingChunksTransform>(
|
return std::make_shared<ApplySquashingTransform>(
|
||||||
in_header,
|
in_header,
|
||||||
table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size,
|
table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size,
|
||||||
table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL);
|
table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL);
|
||||||
@ -683,12 +690,20 @@ BlockIO InterpreterInsertQuery::execute()
|
|||||||
{
|
{
|
||||||
bool table_prefers_large_blocks = table->prefersLargeBlocks();
|
bool table_prefers_large_blocks = table->prefersLargeBlocks();
|
||||||
|
|
||||||
auto squashing = std::make_shared<SimpleSquashingChunksTransform>(
|
auto squashing = std::make_shared<ApplySquashingTransform>(
|
||||||
chain.getInputHeader(),
|
chain.getInputHeader(),
|
||||||
table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size,
|
table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size,
|
||||||
table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL);
|
table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL);
|
||||||
|
|
||||||
chain.addSource(std::move(squashing));
|
chain.addSource(std::move(squashing));
|
||||||
|
|
||||||
|
auto balancing = std::make_shared<PlanSquashingTransform>(
|
||||||
|
chain.getInputHeader(),
|
||||||
|
table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size,
|
||||||
|
table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL,
|
||||||
|
presink_chains.size());
|
||||||
|
|
||||||
|
chain.addSource(std::move(balancing));
|
||||||
}
|
}
|
||||||
|
|
||||||
auto context_ptr = getContext();
|
auto context_ptr = getContext();
|
||||||
|
@ -1481,6 +1481,9 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional<P
|
|||||||
if (expressions.hasHaving() && query.group_by_with_totals && (query.group_by_with_rollup || query.group_by_with_cube))
|
if (expressions.hasHaving() && query.group_by_with_totals && (query.group_by_with_rollup || query.group_by_with_cube))
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "WITH TOTALS and WITH ROLLUP or CUBE are not supported together in presence of HAVING");
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "WITH TOTALS and WITH ROLLUP or CUBE are not supported together in presence of HAVING");
|
||||||
|
|
||||||
|
if (query.qualify())
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "QUALIFY clause is not supported in the old analyzer");
|
||||||
|
|
||||||
if (options.only_analyze)
|
if (options.only_analyze)
|
||||||
{
|
{
|
||||||
auto read_nothing = std::make_unique<ReadNothingStep>(source_header);
|
auto read_nothing = std::make_unique<ReadNothingStep>(source_header);
|
||||||
|
@ -136,6 +136,9 @@ ColumnsDescription QueryLogElement::getColumnsDescription()
|
|||||||
|
|
||||||
{"used_row_policies", array_low_cardinality_string, "The list of row policies names that were used during query execution."},
|
{"used_row_policies", array_low_cardinality_string, "The list of row policies names that were used during query execution."},
|
||||||
|
|
||||||
|
{"used_privileges", array_low_cardinality_string, "Privileges which were successfully checked during query execution."},
|
||||||
|
{"missing_privileges", array_low_cardinality_string, "Privileges that are missing during query execution."},
|
||||||
|
|
||||||
{"transaction_id", getTransactionIDDataType(), "The identifier of the transaction in scope of which this query was executed."},
|
{"transaction_id", getTransactionIDDataType(), "The identifier of the transaction in scope of which this query was executed."},
|
||||||
|
|
||||||
{"query_cache_usage", std::move(query_cache_usage_datatype), "Usage of the query cache during query execution. Values: 'Unknown' = Status unknown, 'None' = The query result was neither written into nor read from the query cache, 'Write' = The query result was written into the query cache, 'Read' = The query result was read from the query cache."},
|
{"query_cache_usage", std::move(query_cache_usage_datatype), "Usage of the query cache during query execution. Values: 'Unknown' = Status unknown, 'None' = The query result was neither written into nor read from the query cache, 'Write' = The query result was written into the query cache, 'Read' = The query result was read from the query cache."},
|
||||||
@ -267,6 +270,8 @@ void QueryLogElement::appendToBlock(MutableColumns & columns) const
|
|||||||
auto & column_storage_factory_objects = typeid_cast<ColumnArray &>(*columns[i++]);
|
auto & column_storage_factory_objects = typeid_cast<ColumnArray &>(*columns[i++]);
|
||||||
auto & column_table_function_factory_objects = typeid_cast<ColumnArray &>(*columns[i++]);
|
auto & column_table_function_factory_objects = typeid_cast<ColumnArray &>(*columns[i++]);
|
||||||
auto & column_row_policies_names = typeid_cast<ColumnArray &>(*columns[i++]);
|
auto & column_row_policies_names = typeid_cast<ColumnArray &>(*columns[i++]);
|
||||||
|
auto & column_used_privileges = typeid_cast<ColumnArray &>(*columns[i++]);
|
||||||
|
auto & column_missing_privileges = typeid_cast<ColumnArray &>(*columns[i++]);
|
||||||
|
|
||||||
auto fill_column = [](const auto & data, ColumnArray & column)
|
auto fill_column = [](const auto & data, ColumnArray & column)
|
||||||
{
|
{
|
||||||
@ -290,6 +295,8 @@ void QueryLogElement::appendToBlock(MutableColumns & columns) const
|
|||||||
fill_column(used_storages, column_storage_factory_objects);
|
fill_column(used_storages, column_storage_factory_objects);
|
||||||
fill_column(used_table_functions, column_table_function_factory_objects);
|
fill_column(used_table_functions, column_table_function_factory_objects);
|
||||||
fill_column(used_row_policies, column_row_policies_names);
|
fill_column(used_row_policies, column_row_policies_names);
|
||||||
|
fill_column(used_privileges, column_used_privileges);
|
||||||
|
fill_column(missing_privileges, column_missing_privileges);
|
||||||
}
|
}
|
||||||
|
|
||||||
columns[i++]->insert(Tuple{tid.start_csn, tid.local_tid, tid.host_id});
|
columns[i++]->insert(Tuple{tid.start_csn, tid.local_tid, tid.host_id});
|
||||||
|
@ -81,6 +81,8 @@ struct QueryLogElement
|
|||||||
std::unordered_set<String> used_storages;
|
std::unordered_set<String> used_storages;
|
||||||
std::unordered_set<String> used_table_functions;
|
std::unordered_set<String> used_table_functions;
|
||||||
std::set<String> used_row_policies;
|
std::set<String> used_row_policies;
|
||||||
|
std::unordered_set<String> used_privileges;
|
||||||
|
std::unordered_set<String> missing_privileges;
|
||||||
|
|
||||||
Int32 exception_code{}; // because ErrorCodes are int
|
Int32 exception_code{}; // because ErrorCodes are int
|
||||||
String exception;
|
String exception;
|
||||||
|
@ -532,7 +532,7 @@ ContextMutablePtr Session::makeSessionContext()
|
|||||||
session_context->checkSettingsConstraints(settings_from_auth_server, SettingSource::QUERY);
|
session_context->checkSettingsConstraints(settings_from_auth_server, SettingSource::QUERY);
|
||||||
session_context->applySettingsChanges(settings_from_auth_server);
|
session_context->applySettingsChanges(settings_from_auth_server);
|
||||||
|
|
||||||
recordLoginSucess(session_context);
|
recordLoginSuccess(session_context);
|
||||||
|
|
||||||
return session_context;
|
return session_context;
|
||||||
}
|
}
|
||||||
@ -596,7 +596,7 @@ ContextMutablePtr Session::makeSessionContext(const String & session_name_, std:
|
|||||||
{ session_name_ },
|
{ session_name_ },
|
||||||
max_sessions_for_user);
|
max_sessions_for_user);
|
||||||
|
|
||||||
recordLoginSucess(session_context);
|
recordLoginSuccess(session_context);
|
||||||
|
|
||||||
return session_context;
|
return session_context;
|
||||||
}
|
}
|
||||||
@ -672,13 +672,13 @@ ContextMutablePtr Session::makeQueryContextImpl(const ClientInfo * client_info_t
|
|||||||
user = query_context->getUser();
|
user = query_context->getUser();
|
||||||
|
|
||||||
/// Interserver does not create session context
|
/// Interserver does not create session context
|
||||||
recordLoginSucess(query_context);
|
recordLoginSuccess(query_context);
|
||||||
|
|
||||||
return query_context;
|
return query_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Session::recordLoginSucess(ContextPtr login_context) const
|
void Session::recordLoginSuccess(ContextPtr login_context) const
|
||||||
{
|
{
|
||||||
if (notified_session_log_about_login)
|
if (notified_session_log_about_login)
|
||||||
return;
|
return;
|
||||||
@ -694,7 +694,7 @@ void Session::recordLoginSucess(ContextPtr login_context) const
|
|||||||
session_log->addLoginSuccess(auth_id,
|
session_log->addLoginSuccess(auth_id,
|
||||||
named_session ? named_session->key.second : "",
|
named_session ? named_session->key.second : "",
|
||||||
settings,
|
settings,
|
||||||
access,
|
access->getAccess(),
|
||||||
getClientInfo(),
|
getClientInfo(),
|
||||||
user);
|
user);
|
||||||
}
|
}
|
||||||
|
@ -102,8 +102,7 @@ public:
|
|||||||
private:
|
private:
|
||||||
std::shared_ptr<SessionLog> getSessionLog() const;
|
std::shared_ptr<SessionLog> getSessionLog() const;
|
||||||
ContextMutablePtr makeQueryContextImpl(const ClientInfo * client_info_to_copy, ClientInfo * client_info_to_move) const;
|
ContextMutablePtr makeQueryContextImpl(const ClientInfo * client_info_to_copy, ClientInfo * client_info_to_move) const;
|
||||||
void recordLoginSucess(ContextPtr login_context) const;
|
void recordLoginSuccess(ContextPtr login_context) const;
|
||||||
|
|
||||||
|
|
||||||
mutable bool notified_session_log_about_login = false;
|
mutable bool notified_session_log_about_login = false;
|
||||||
const UUID auth_id;
|
const UUID auth_id;
|
||||||
|
@ -214,7 +214,7 @@ void SessionLog::addLoginSuccess(const UUID & auth_id,
|
|||||||
const ClientInfo & client_info,
|
const ClientInfo & client_info,
|
||||||
const UserPtr & login_user)
|
const UserPtr & login_user)
|
||||||
{
|
{
|
||||||
DB::SessionLogElement log_entry(auth_id, SESSION_LOGIN_SUCCESS);
|
SessionLogElement log_entry(auth_id, SESSION_LOGIN_SUCCESS);
|
||||||
log_entry.client_info = client_info;
|
log_entry.client_info = client_info;
|
||||||
|
|
||||||
if (login_user)
|
if (login_user)
|
||||||
|
159
src/Interpreters/Squashing.cpp
Normal file
159
src/Interpreters/Squashing.cpp
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
#include <vector>
|
||||||
|
#include <Interpreters/Squashing.h>
|
||||||
|
#include <Common/CurrentThread.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
Squashing::Squashing(Block header_, size_t min_block_size_rows_, size_t min_block_size_bytes_)
|
||||||
|
: header(header_)
|
||||||
|
, min_block_size_rows(min_block_size_rows_)
|
||||||
|
, min_block_size_bytes(min_block_size_bytes_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
Chunk Squashing::flush()
|
||||||
|
{
|
||||||
|
return convertToChunk(std::move(chunks_to_merge_vec));
|
||||||
|
}
|
||||||
|
|
||||||
|
Chunk Squashing::squash(Chunk && input_chunk)
|
||||||
|
{
|
||||||
|
if (!input_chunk.hasChunkInfo())
|
||||||
|
return Chunk();
|
||||||
|
|
||||||
|
const auto *info = getInfoFromChunk(input_chunk);
|
||||||
|
return squash(info->chunks);
|
||||||
|
}
|
||||||
|
|
||||||
|
Chunk Squashing::add(Chunk && input_chunk)
|
||||||
|
{
|
||||||
|
if (!input_chunk)
|
||||||
|
return {};
|
||||||
|
|
||||||
|
/// Just read block is already enough.
|
||||||
|
if (isEnoughSize(input_chunk.getNumRows(), input_chunk.bytes()))
|
||||||
|
{
|
||||||
|
/// If no accumulated data, return just read block.
|
||||||
|
if (chunks_to_merge_vec.empty())
|
||||||
|
{
|
||||||
|
chunks_to_merge_vec.push_back(std::move(input_chunk));
|
||||||
|
Chunk res_chunk = convertToChunk(std::move(chunks_to_merge_vec));
|
||||||
|
chunks_to_merge_vec.clear();
|
||||||
|
return res_chunk;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return accumulated data (maybe it has small size) and place new block to accumulated data.
|
||||||
|
Chunk res_chunk = convertToChunk(std::move(chunks_to_merge_vec));
|
||||||
|
chunks_to_merge_vec.clear();
|
||||||
|
changeCurrentSize(input_chunk.getNumRows(), input_chunk.bytes());
|
||||||
|
chunks_to_merge_vec.push_back(std::move(input_chunk));
|
||||||
|
return res_chunk;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Accumulated block is already enough.
|
||||||
|
if (isEnoughSize(accumulated_size.rows, accumulated_size.bytes))
|
||||||
|
{
|
||||||
|
/// Return accumulated data and place new block to accumulated data.
|
||||||
|
Chunk res_chunk = convertToChunk(std::move(chunks_to_merge_vec));
|
||||||
|
chunks_to_merge_vec.clear();
|
||||||
|
changeCurrentSize(input_chunk.getNumRows(), input_chunk.bytes());
|
||||||
|
chunks_to_merge_vec.push_back(std::move(input_chunk));
|
||||||
|
return res_chunk;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Pushing data into accumulating vector
|
||||||
|
expandCurrentSize(input_chunk.getNumRows(), input_chunk.bytes());
|
||||||
|
chunks_to_merge_vec.push_back(std::move(input_chunk));
|
||||||
|
|
||||||
|
/// If accumulated data is big enough, we send it
|
||||||
|
if (isEnoughSize(accumulated_size.rows, accumulated_size.bytes))
|
||||||
|
{
|
||||||
|
Chunk res_chunk = convertToChunk(std::move(chunks_to_merge_vec));
|
||||||
|
changeCurrentSize(0, 0);
|
||||||
|
chunks_to_merge_vec.clear();
|
||||||
|
return res_chunk;
|
||||||
|
}
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
Chunk Squashing::convertToChunk(std::vector<Chunk> && chunks) const
|
||||||
|
{
|
||||||
|
if (chunks.empty())
|
||||||
|
return {};
|
||||||
|
|
||||||
|
auto info = std::make_shared<ChunksToSquash>();
|
||||||
|
info->chunks = std::move(chunks);
|
||||||
|
|
||||||
|
chunks.clear();
|
||||||
|
|
||||||
|
return Chunk(header.cloneEmptyColumns(), 0, info);
|
||||||
|
}
|
||||||
|
|
||||||
|
Chunk Squashing::squash(std::vector<Chunk> & input_chunks)
|
||||||
|
{
|
||||||
|
Chunk accumulated_chunk;
|
||||||
|
std::vector<IColumn::MutablePtr> mutable_columns = {};
|
||||||
|
size_t rows = 0;
|
||||||
|
for (const Chunk & chunk : input_chunks)
|
||||||
|
rows += chunk.getNumRows();
|
||||||
|
|
||||||
|
{
|
||||||
|
auto & first_chunk = input_chunks[0];
|
||||||
|
Columns columns = first_chunk.detachColumns();
|
||||||
|
for (auto & column : columns)
|
||||||
|
{
|
||||||
|
mutable_columns.push_back(IColumn::mutate(std::move(column)));
|
||||||
|
mutable_columns.back()->reserve(rows);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (size_t i = 1; i < input_chunks.size(); ++i) // We've already processed the first chunk above
|
||||||
|
{
|
||||||
|
Columns columns = input_chunks[i].detachColumns();
|
||||||
|
for (size_t j = 0, size = mutable_columns.size(); j < size; ++j)
|
||||||
|
{
|
||||||
|
const auto source_column = columns[j];
|
||||||
|
|
||||||
|
mutable_columns[j]->insertRangeFrom(*source_column, 0, source_column->size());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
accumulated_chunk.setColumns(std::move(mutable_columns), rows);
|
||||||
|
return accumulated_chunk;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ChunksToSquash* Squashing::getInfoFromChunk(const Chunk & chunk)
|
||||||
|
{
|
||||||
|
const auto& info = chunk.getChunkInfo();
|
||||||
|
const auto * agg_info = typeid_cast<const ChunksToSquash *>(info.get());
|
||||||
|
|
||||||
|
if (!agg_info)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no ChunksToSquash in ChunkInfoPtr");
|
||||||
|
|
||||||
|
return agg_info;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Squashing::expandCurrentSize(size_t rows, size_t bytes)
|
||||||
|
{
|
||||||
|
accumulated_size.rows += rows;
|
||||||
|
accumulated_size.bytes += bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Squashing::changeCurrentSize(size_t rows, size_t bytes)
|
||||||
|
{
|
||||||
|
accumulated_size.rows = rows;
|
||||||
|
accumulated_size.bytes = bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Squashing::isEnoughSize(size_t rows, size_t bytes) const
|
||||||
|
{
|
||||||
|
return (!min_block_size_rows && !min_block_size_bytes)
|
||||||
|
|| (min_block_size_rows && rows >= min_block_size_rows)
|
||||||
|
|| (min_block_size_bytes && bytes >= min_block_size_bytes);
|
||||||
|
}
|
||||||
|
}
|
69
src/Interpreters/Squashing.h
Normal file
69
src/Interpreters/Squashing.h
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include <Core/Block.h>
|
||||||
|
#include <Processors/Chunk.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
struct ChunksToSquash : public ChunkInfo
|
||||||
|
{
|
||||||
|
mutable std::vector<Chunk> chunks = {};
|
||||||
|
};
|
||||||
|
|
||||||
|
/** Merging consecutive passed blocks to specified minimum size.
|
||||||
|
*
|
||||||
|
* (But if one of input blocks has already at least specified size,
|
||||||
|
* then don't merge it with neighbours, even if neighbours are small.)
|
||||||
|
*
|
||||||
|
* Used to prepare blocks to adequate size for INSERT queries,
|
||||||
|
* because such storages as Memory, StripeLog, Log, TinyLog...
|
||||||
|
* store or compress data in blocks exactly as passed to it,
|
||||||
|
* and blocks of small size are not efficient.
|
||||||
|
*
|
||||||
|
* Order of data is kept.
|
||||||
|
*/
|
||||||
|
|
||||||
|
class Squashing
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit Squashing(Block header_, size_t min_block_size_rows_, size_t min_block_size_bytes_);
|
||||||
|
Squashing(Squashing && other) = default;
|
||||||
|
|
||||||
|
Chunk add(Chunk && input_chunk);
|
||||||
|
static Chunk squash(Chunk && input_chunk);
|
||||||
|
Chunk flush();
|
||||||
|
|
||||||
|
bool isDataLeft()
|
||||||
|
{
|
||||||
|
return !chunks_to_merge_vec.empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
Block header;
|
||||||
|
private:
|
||||||
|
struct CurrentSize
|
||||||
|
{
|
||||||
|
size_t rows = 0;
|
||||||
|
size_t bytes = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<Chunk> chunks_to_merge_vec = {};
|
||||||
|
size_t min_block_size_rows;
|
||||||
|
size_t min_block_size_bytes;
|
||||||
|
|
||||||
|
CurrentSize accumulated_size;
|
||||||
|
|
||||||
|
static const ChunksToSquash * getInfoFromChunk(const Chunk & chunk);
|
||||||
|
|
||||||
|
static Chunk squash(std::vector<Chunk> & input_chunks);
|
||||||
|
|
||||||
|
void expandCurrentSize(size_t rows, size_t bytes);
|
||||||
|
void changeCurrentSize(size_t rows, size_t bytes);
|
||||||
|
bool isEnoughSize(size_t rows, size_t bytes) const;
|
||||||
|
|
||||||
|
Chunk convertToChunk(std::vector<Chunk> && chunks) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -1,145 +0,0 @@
|
|||||||
#include <Interpreters/SquashingTransform.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
|
||||||
{
|
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int SIZES_OF_COLUMNS_DOESNT_MATCH;
|
|
||||||
extern const int LOGICAL_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
SquashingTransform::SquashingTransform(size_t min_block_size_rows_, size_t min_block_size_bytes_)
|
|
||||||
: min_block_size_rows(min_block_size_rows_)
|
|
||||||
, min_block_size_bytes(min_block_size_bytes_)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
Block SquashingTransform::add(Block && input_block)
|
|
||||||
{
|
|
||||||
return addImpl<Block &&>(std::move(input_block));
|
|
||||||
}
|
|
||||||
|
|
||||||
Block SquashingTransform::add(const Block & input_block)
|
|
||||||
{
|
|
||||||
return addImpl<const Block &>(input_block);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* To minimize copying, accept two types of argument: const reference for output
|
|
||||||
* stream, and rvalue reference for input stream, and decide whether to copy
|
|
||||||
* inside this function. This allows us not to copy Block unless we absolutely
|
|
||||||
* have to.
|
|
||||||
*/
|
|
||||||
template <typename ReferenceType>
|
|
||||||
Block SquashingTransform::addImpl(ReferenceType input_block)
|
|
||||||
{
|
|
||||||
/// End of input stream.
|
|
||||||
if (!input_block)
|
|
||||||
{
|
|
||||||
Block to_return;
|
|
||||||
std::swap(to_return, accumulated_block);
|
|
||||||
return to_return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Just read block is already enough.
|
|
||||||
if (isEnoughSize(input_block))
|
|
||||||
{
|
|
||||||
/// If no accumulated data, return just read block.
|
|
||||||
if (!accumulated_block)
|
|
||||||
{
|
|
||||||
return std::move(input_block);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return accumulated data (maybe it has small size) and place new block to accumulated data.
|
|
||||||
Block to_return = std::move(input_block);
|
|
||||||
std::swap(to_return, accumulated_block);
|
|
||||||
return to_return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Accumulated block is already enough.
|
|
||||||
if (isEnoughSize(accumulated_block))
|
|
||||||
{
|
|
||||||
/// Return accumulated data and place new block to accumulated data.
|
|
||||||
Block to_return = std::move(input_block);
|
|
||||||
std::swap(to_return, accumulated_block);
|
|
||||||
return to_return;
|
|
||||||
}
|
|
||||||
|
|
||||||
append<ReferenceType>(std::move(input_block));
|
|
||||||
if (isEnoughSize(accumulated_block))
|
|
||||||
{
|
|
||||||
Block to_return;
|
|
||||||
std::swap(to_return, accumulated_block);
|
|
||||||
return to_return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Squashed block is not ready.
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
template <typename ReferenceType>
|
|
||||||
void SquashingTransform::append(ReferenceType input_block)
|
|
||||||
{
|
|
||||||
if (!accumulated_block)
|
|
||||||
{
|
|
||||||
accumulated_block = std::move(input_block);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(blocksHaveEqualStructure(input_block, accumulated_block));
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
for (size_t i = 0, size = accumulated_block.columns(); i < size; ++i)
|
|
||||||
{
|
|
||||||
const auto source_column = input_block.getByPosition(i).column;
|
|
||||||
|
|
||||||
auto mutable_column = IColumn::mutate(std::move(accumulated_block.getByPosition(i).column));
|
|
||||||
mutable_column->insertRangeFrom(*source_column, 0, source_column->size());
|
|
||||||
accumulated_block.getByPosition(i).column = std::move(mutable_column);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
/// add() may be called again even after a previous add() threw an exception.
|
|
||||||
/// Keep accumulated_block in a valid state.
|
|
||||||
/// Seems ok to discard accumulated data because we're throwing an exception, which the caller will
|
|
||||||
/// hopefully interpret to mean "this block and all *previous* blocks are potentially lost".
|
|
||||||
accumulated_block.clear();
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool SquashingTransform::isEnoughSize(const Block & block)
|
|
||||||
{
|
|
||||||
size_t rows = 0;
|
|
||||||
size_t bytes = 0;
|
|
||||||
|
|
||||||
for (const auto & [column, type, name] : block)
|
|
||||||
{
|
|
||||||
if (!column)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Invalid column in block.");
|
|
||||||
|
|
||||||
if (!rows)
|
|
||||||
rows = column->size();
|
|
||||||
else if (rows != column->size())
|
|
||||||
throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, "Sizes of columns doesn't match");
|
|
||||||
|
|
||||||
bytes += column->byteSize();
|
|
||||||
}
|
|
||||||
|
|
||||||
return isEnoughSize(rows, bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool SquashingTransform::isEnoughSize(size_t rows, size_t bytes) const
|
|
||||||
{
|
|
||||||
return (!min_block_size_rows && !min_block_size_bytes)
|
|
||||||
|| (min_block_size_rows && rows >= min_block_size_rows)
|
|
||||||
|| (min_block_size_bytes && bytes >= min_block_size_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -1,50 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <Core/Block.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
|
||||||
{
|
|
||||||
|
|
||||||
|
|
||||||
/** Merging consecutive passed blocks to specified minimum size.
|
|
||||||
*
|
|
||||||
* (But if one of input blocks has already at least specified size,
|
|
||||||
* then don't merge it with neighbours, even if neighbours are small.)
|
|
||||||
*
|
|
||||||
* Used to prepare blocks to adequate size for INSERT queries,
|
|
||||||
* because such storages as Memory, StripeLog, Log, TinyLog...
|
|
||||||
* store or compress data in blocks exactly as passed to it,
|
|
||||||
* and blocks of small size are not efficient.
|
|
||||||
*
|
|
||||||
* Order of data is kept.
|
|
||||||
*/
|
|
||||||
class SquashingTransform
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
/// Conditions on rows and bytes are OR-ed. If one of them is zero, then corresponding condition is ignored.
|
|
||||||
SquashingTransform(size_t min_block_size_rows_, size_t min_block_size_bytes_);
|
|
||||||
|
|
||||||
/** Add next block and possibly returns squashed block.
|
|
||||||
* At end, you need to pass empty block. As the result for last (empty) block, you will get last Result with ready = true.
|
|
||||||
*/
|
|
||||||
Block add(Block && block);
|
|
||||||
Block add(const Block & block);
|
|
||||||
|
|
||||||
private:
|
|
||||||
size_t min_block_size_rows;
|
|
||||||
size_t min_block_size_bytes;
|
|
||||||
|
|
||||||
Block accumulated_block;
|
|
||||||
|
|
||||||
template <typename ReferenceType>
|
|
||||||
Block addImpl(ReferenceType block);
|
|
||||||
|
|
||||||
template <typename ReferenceType>
|
|
||||||
void append(ReferenceType block);
|
|
||||||
|
|
||||||
bool isEnoughSize(const Block & block);
|
|
||||||
bool isEnoughSize(size_t rows, size_t bytes) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
@ -615,9 +615,9 @@ static bool decimalEqualsFloat(Field field, Float64 float_value)
|
|||||||
return decimal_to_float == float_value;
|
return decimal_to_float == float_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<Field> convertFieldToTypeStrict(const Field & from_value, const IDataType & to_type)
|
std::optional<Field> convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type)
|
||||||
{
|
{
|
||||||
Field result_value = convertFieldToType(from_value, to_type);
|
Field result_value = convertFieldToType(from_value, to_type, &from_type);
|
||||||
|
|
||||||
if (Field::isDecimal(from_value.getType()) && Field::isDecimal(result_value.getType()))
|
if (Field::isDecimal(from_value.getType()) && Field::isDecimal(result_value.getType()))
|
||||||
{
|
{
|
||||||
|
@ -22,6 +22,6 @@ Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_t
|
|||||||
|
|
||||||
/// Applies stricter rules than convertFieldToType, doesn't allow loss of precision converting to Decimal.
|
/// Applies stricter rules than convertFieldToType, doesn't allow loss of precision converting to Decimal.
|
||||||
/// Returns `Field` if the conversion was successful and the result is equal to the original value, otherwise returns nullopt.
|
/// Returns `Field` if the conversion was successful and the result is equal to the original value, otherwise returns nullopt.
|
||||||
std::optional<Field> convertFieldToTypeStrict(const Field & from_value, const IDataType & to_type);
|
std::optional<Field> convertFieldToTypeStrict(const Field & from_value, const IDataType & from_type, const IDataType & to_type);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -44,6 +44,7 @@
|
|||||||
#include <Formats/FormatFactory.h>
|
#include <Formats/FormatFactory.h>
|
||||||
#include <Storages/StorageInput.h>
|
#include <Storages/StorageInput.h>
|
||||||
|
|
||||||
|
#include <Access/ContextAccess.h>
|
||||||
#include <Access/EnabledQuota.h>
|
#include <Access/EnabledQuota.h>
|
||||||
#include <Interpreters/ApplyWithGlobalVisitor.h>
|
#include <Interpreters/ApplyWithGlobalVisitor.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
@ -221,6 +222,17 @@ static void logException(ContextPtr context, QueryLogElement & elem, bool log_er
|
|||||||
LOG_INFO(getLogger("executeQuery"), message);
|
LOG_INFO(getLogger("executeQuery"), message);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
addPrivilegesInfoToQueryLogElement(QueryLogElement & element, const ContextPtr context_ptr)
|
||||||
|
{
|
||||||
|
const auto & privileges_info = context_ptr->getQueryPrivilegesInfo();
|
||||||
|
{
|
||||||
|
std::lock_guard lock(privileges_info.mutex);
|
||||||
|
element.used_privileges = privileges_info.used_privileges;
|
||||||
|
element.missing_privileges = privileges_info.missing_privileges;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
addStatusInfoToQueryLogElement(QueryLogElement & element, const QueryStatusInfo & info, const ASTPtr query_ast, const ContextPtr context_ptr)
|
addStatusInfoToQueryLogElement(QueryLogElement & element, const QueryStatusInfo & info, const ASTPtr query_ast, const ContextPtr context_ptr)
|
||||||
{
|
{
|
||||||
@ -286,6 +298,7 @@ addStatusInfoToQueryLogElement(QueryLogElement & element, const QueryStatusInfo
|
|||||||
}
|
}
|
||||||
|
|
||||||
element.async_read_counters = context_ptr->getAsyncReadCounters();
|
element.async_read_counters = context_ptr->getAsyncReadCounters();
|
||||||
|
addPrivilegesInfoToQueryLogElement(element, context_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -601,6 +614,8 @@ void logExceptionBeforeStart(
|
|||||||
elem.formatted_query = queryToString(ast);
|
elem.formatted_query = queryToString(ast);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
addPrivilegesInfoToQueryLogElement(elem, context);
|
||||||
|
|
||||||
// We don't calculate databases, tables and columns when the query isn't able to start
|
// We don't calculate databases, tables and columns when the query isn't able to start
|
||||||
|
|
||||||
elem.exception_code = getCurrentExceptionCode();
|
elem.exception_code = getCurrentExceptionCode();
|
||||||
|
@ -2179,7 +2179,7 @@ public:
|
|||||||
|
|
||||||
bool parse(IParser::Pos & pos, Expected & expected, Action & /*action*/) override
|
bool parse(IParser::Pos & pos, Expected & expected, Action & /*action*/) override
|
||||||
{
|
{
|
||||||
/// kql(table|project ...)
|
/// kql('table|project ...')
|
||||||
/// 0. Parse the kql query
|
/// 0. Parse the kql query
|
||||||
/// 1. Parse closing token
|
/// 1. Parse closing token
|
||||||
if (state == 0)
|
if (state == 0)
|
||||||
|
@ -853,7 +853,7 @@ Please note that the functions listed below only take constant parameters for no
|
|||||||
## KQL() function
|
## KQL() function
|
||||||
|
|
||||||
- create table
|
- create table
|
||||||
`CREATE TABLE kql_table4 ENGINE = Memory AS select *, now() as new_column From kql(Customers | project LastName,Age);`
|
`CREATE TABLE kql_table4 ENGINE = Memory AS select *, now() as new_column From kql($$Customers | project LastName,Age$$);`
|
||||||
verify the content of `kql_table`
|
verify the content of `kql_table`
|
||||||
`select * from kql_table`
|
`select * from kql_table`
|
||||||
|
|
||||||
@ -867,12 +867,12 @@ Please note that the functions listed below only take constant parameters for no
|
|||||||
Age Nullable(UInt8)
|
Age Nullable(UInt8)
|
||||||
) ENGINE = Memory;
|
) ENGINE = Memory;
|
||||||
```
|
```
|
||||||
`INSERT INTO temp select * from kql(Customers|project FirstName,LastName,Age);`
|
`INSERT INTO temp select * from kql($$Customers|project FirstName,LastName,Age$$);`
|
||||||
verify the content of `temp`
|
verify the content of `temp`
|
||||||
`select * from temp`
|
`select * from temp`
|
||||||
|
|
||||||
- Select from kql()
|
- Select from kql(...)
|
||||||
`Select * from kql(Customers|project FirstName)`
|
`Select * from kql($$Customers|project FirstName$$)`
|
||||||
|
|
||||||
## KQL operators:
|
## KQL operators:
|
||||||
- Tabular expression statements
|
- Tabular expression statements
|
||||||
@ -993,4 +993,3 @@ Please note that the functions listed below only take constant parameters for no
|
|||||||
- dcount()
|
- dcount()
|
||||||
- dcountif()
|
- dcountif()
|
||||||
- bin
|
- bin
|
||||||
|
|
@ -301,8 +301,8 @@ String IParserKQLFunction::kqlCallToExpression(
|
|||||||
});
|
});
|
||||||
|
|
||||||
const auto kql_call = std::format("{}({})", function_name, params_str);
|
const auto kql_call = std::format("{}({})", function_name, params_str);
|
||||||
DB::Tokens call_tokens(kql_call.c_str(), kql_call.c_str() + kql_call.length());
|
Tokens call_tokens(kql_call.data(), kql_call.data() + kql_call.length(), 0, true);
|
||||||
DB::IParser::Pos tokens_pos(call_tokens, max_depth, max_backtracks);
|
IParser::Pos tokens_pos(call_tokens, max_depth, max_backtracks);
|
||||||
return DB::IParserKQLFunction::getExpression(tokens_pos);
|
return DB::IParserKQLFunction::getExpression(tokens_pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ bool ParserKQLDistinct::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
|
|
||||||
expr = getExprFromToken(pos);
|
expr = getExprFromToken(pos);
|
||||||
|
|
||||||
Tokens tokens(expr.c_str(), expr.c_str() + expr.size());
|
Tokens tokens(expr.data(), expr.data() + expr.size(), 0, true);
|
||||||
IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks);
|
IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
if (!ParserNotEmptyExpressionList(false).parse(new_pos, select_expression_list, expected))
|
if (!ParserNotEmptyExpressionList(false).parse(new_pos, select_expression_list, expected))
|
||||||
|
@ -22,7 +22,7 @@ bool ParserKQLExtend ::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
|
|
||||||
String except_str;
|
String except_str;
|
||||||
String new_extend_str;
|
String new_extend_str;
|
||||||
Tokens ntokens(extend_expr.c_str(), extend_expr.c_str() + extend_expr.size());
|
Tokens ntokens(extend_expr.data(), extend_expr.data() + extend_expr.size(), 0, true);
|
||||||
IParser::Pos npos(ntokens, pos.max_depth, pos.max_backtracks);
|
IParser::Pos npos(ntokens, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
String alias;
|
String alias;
|
||||||
@ -76,7 +76,7 @@ bool ParserKQLExtend ::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
apply_alias();
|
apply_alias();
|
||||||
|
|
||||||
String expr = std::format("SELECT * {}, {} from prev", except_str, new_extend_str);
|
String expr = std::format("SELECT * {}, {} from prev", except_str, new_extend_str);
|
||||||
Tokens tokens(expr.c_str(), expr.c_str() + expr.size());
|
Tokens tokens(expr.data(), expr.data() + expr.size(), 0, true);
|
||||||
IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks);
|
IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
if (!ParserSelectQuery().parse(new_pos, select_query, expected))
|
if (!ParserSelectQuery().parse(new_pos, select_query, expected))
|
||||||
|
@ -13,7 +13,7 @@ bool ParserKQLFilter::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
String expr = getExprFromToken(pos);
|
String expr = getExprFromToken(pos);
|
||||||
ASTPtr where_expression;
|
ASTPtr where_expression;
|
||||||
|
|
||||||
Tokens token_filter(expr.c_str(), expr.c_str() + expr.size());
|
Tokens token_filter(expr.data(), expr.data() + expr.size(), 0, true);
|
||||||
IParser::Pos pos_filter(token_filter, pos.max_depth, pos.max_backtracks);
|
IParser::Pos pos_filter(token_filter, pos.max_depth, pos.max_backtracks);
|
||||||
if (!ParserExpressionWithOptionalAlias(false).parse(pos_filter, where_expression, expected))
|
if (!ParserExpressionWithOptionalAlias(false).parse(pos_filter, where_expression, expected))
|
||||||
return false;
|
return false;
|
||||||
|
@ -13,7 +13,7 @@ bool ParserKQLLimit::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
|
|
||||||
auto expr = getExprFromToken(pos);
|
auto expr = getExprFromToken(pos);
|
||||||
|
|
||||||
Tokens tokens(expr.c_str(), expr.c_str() + expr.size());
|
Tokens tokens(expr.data(), expr.data() + expr.size(), 0, true);
|
||||||
IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks);
|
IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
if (!ParserExpressionWithOptionalAlias(false).parse(new_pos, limit_length, expected))
|
if (!ParserExpressionWithOptionalAlias(false).parse(new_pos, limit_length, expected))
|
||||||
|
@ -298,7 +298,7 @@ bool ParserKQLMVExpand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
const String setting_str = "enable_unaligned_array_join = 1";
|
const String setting_str = "enable_unaligned_array_join = 1";
|
||||||
Tokens token_settings(setting_str.c_str(), setting_str.c_str() + setting_str.size());
|
Tokens token_settings(setting_str.data(), setting_str.data() + setting_str.size(), 0, true);
|
||||||
IParser::Pos pos_settings(token_settings, pos.max_depth, pos.max_backtracks);
|
IParser::Pos pos_settings(token_settings, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
if (!ParserSetQuery(true).parse(pos_settings, setting, expected))
|
if (!ParserSetQuery(true).parse(pos_settings, setting, expected))
|
||||||
|
@ -173,7 +173,7 @@ bool ParserKQLMakeSeries ::makeSeries(KQLMakeSeries & kql_make_series, ASTPtr &
|
|||||||
|
|
||||||
auto date_type_cast = [&](String & src)
|
auto date_type_cast = [&](String & src)
|
||||||
{
|
{
|
||||||
Tokens tokens(src.c_str(), src.c_str() + src.size());
|
Tokens tokens(src.data(), src.data() + src.size(), 0, true);
|
||||||
IParser::Pos pos(tokens, max_depth, max_backtracks);
|
IParser::Pos pos(tokens, max_depth, max_backtracks);
|
||||||
String res;
|
String res;
|
||||||
while (isValidKQLPos(pos))
|
while (isValidKQLPos(pos))
|
||||||
@ -200,7 +200,7 @@ bool ParserKQLMakeSeries ::makeSeries(KQLMakeSeries & kql_make_series, ASTPtr &
|
|||||||
auto get_group_expression_alias = [&]
|
auto get_group_expression_alias = [&]
|
||||||
{
|
{
|
||||||
std::vector<String> group_expression_tokens;
|
std::vector<String> group_expression_tokens;
|
||||||
Tokens tokens(group_expression.c_str(), group_expression.c_str() + group_expression.size());
|
Tokens tokens(group_expression.data(), group_expression.data() + group_expression.size(), 0, true);
|
||||||
IParser::Pos pos(tokens, max_depth, max_backtracks);
|
IParser::Pos pos(tokens, max_depth, max_backtracks);
|
||||||
while (isValidKQLPos(pos))
|
while (isValidKQLPos(pos))
|
||||||
{
|
{
|
||||||
@ -413,7 +413,7 @@ bool ParserKQLMakeSeries ::parseImpl(Pos & pos, ASTPtr & node, Expected & expect
|
|||||||
|
|
||||||
makeSeries(kql_make_series, node, pos.max_depth, pos.max_backtracks);
|
makeSeries(kql_make_series, node, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
Tokens token_main_query(kql_make_series.main_query.c_str(), kql_make_series.main_query.c_str() + kql_make_series.main_query.size());
|
Tokens token_main_query(kql_make_series.main_query.data(), kql_make_series.main_query.data() + kql_make_series.main_query.size(), 0, true);
|
||||||
IParser::Pos pos_main_query(token_main_query, pos.max_depth, pos.max_backtracks);
|
IParser::Pos pos_main_query(token_main_query, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
if (!ParserNotEmptyExpressionList(true).parse(pos_main_query, select_expression_list, expected))
|
if (!ParserNotEmptyExpressionList(true).parse(pos_main_query, select_expression_list, expected))
|
||||||
|
@ -1,20 +1,26 @@
|
|||||||
#include <Parsers/ASTLiteral.h>
|
#include <Parsers/ASTLiteral.h>
|
||||||
#include <Parsers/CommonParsers.h>
|
#include <Parsers/CommonParsers.h>
|
||||||
#include <Parsers/Kusto/KustoFunctions/IParserKQLFunction.h>
|
#include <Parsers/Kusto/KustoFunctions/IParserKQLFunction.h>
|
||||||
#include <Parsers/Kusto/KustoFunctions/KQLFunctionFactory.h>
|
|
||||||
#include <Parsers/Kusto/ParserKQLOperators.h>
|
#include <Parsers/Kusto/ParserKQLOperators.h>
|
||||||
#include <Parsers/Kusto/ParserKQLQuery.h>
|
|
||||||
#include <Parsers/Kusto/ParserKQLStatement.h>
|
#include <Parsers/Kusto/ParserKQLStatement.h>
|
||||||
#include <Parsers/Kusto/Utilities.h>
|
#include <Parsers/Kusto/Utilities.h>
|
||||||
#include <Parsers/ASTFunction.h>
|
#include <Parsers/ASTFunction.h>
|
||||||
#include <Parsers/ASTIdentifier.h>
|
#include <Parsers/ASTIdentifier.h>
|
||||||
#include <Parsers/formatAST.h>
|
#include <Parsers/formatAST.h>
|
||||||
#include "KustoFunctions/IParserKQLFunction.h"
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int SYNTAX_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
|
||||||
enum class KQLOperatorValue : uint16_t
|
enum class KQLOperatorValue
|
||||||
{
|
{
|
||||||
none,
|
none,
|
||||||
between,
|
between,
|
||||||
@ -56,7 +62,8 @@ enum class KQLOperatorValue : uint16_t
|
|||||||
not_startswith_cs,
|
not_startswith_cs,
|
||||||
};
|
};
|
||||||
|
|
||||||
const std::unordered_map<String, KQLOperatorValue> KQLOperator = {
|
const std::unordered_map<String, KQLOperatorValue> KQLOperator =
|
||||||
|
{
|
||||||
{"between", KQLOperatorValue::between},
|
{"between", KQLOperatorValue::between},
|
||||||
{"!between", KQLOperatorValue::not_between},
|
{"!between", KQLOperatorValue::not_between},
|
||||||
{"contains", KQLOperatorValue::contains},
|
{"contains", KQLOperatorValue::contains},
|
||||||
@ -96,44 +103,37 @@ const std::unordered_map<String, KQLOperatorValue> KQLOperator = {
|
|||||||
{"!startswith_cs", KQLOperatorValue::not_startswith_cs},
|
{"!startswith_cs", KQLOperatorValue::not_startswith_cs},
|
||||||
};
|
};
|
||||||
|
|
||||||
void rebuildSubqueryForInOperator(DB::ASTPtr & node, bool useLowerCase)
|
void rebuildSubqueryForInOperator(ASTPtr & node, bool useLowerCase)
|
||||||
{
|
{
|
||||||
//A sub-query for in operator in kql can have multiple columns, but only takes the first column.
|
//A sub-query for in operator in kql can have multiple columns, but only takes the first column.
|
||||||
//A sub-query for in operator in ClickHouse can not have multiple columns
|
//A sub-query for in operator in ClickHouse can not have multiple columns
|
||||||
//So only take the first column if there are multiple columns.
|
//So only take the first column if there are multiple columns.
|
||||||
//select * not working for subquery. (a tabular statement without project)
|
//select * not working for subquery. (a tabular statement without project)
|
||||||
|
|
||||||
const auto selectColumns = node->children[0]->children[0]->as<DB::ASTSelectQuery>()->select();
|
const auto selectColumns = node->children[0]->children[0]->as<ASTSelectQuery>()->select();
|
||||||
while (selectColumns->children.size() > 1)
|
while (selectColumns->children.size() > 1)
|
||||||
selectColumns->children.pop_back();
|
selectColumns->children.pop_back();
|
||||||
|
|
||||||
if (useLowerCase)
|
if (useLowerCase)
|
||||||
{
|
{
|
||||||
auto args = std::make_shared<DB::ASTExpressionList>();
|
auto args = std::make_shared<ASTExpressionList>();
|
||||||
args->children.push_back(selectColumns->children[0]);
|
args->children.push_back(selectColumns->children[0]);
|
||||||
auto func_lower = std::make_shared<DB::ASTFunction>();
|
auto func_lower = std::make_shared<ASTFunction>();
|
||||||
func_lower->name = "lower";
|
func_lower->name = "lower";
|
||||||
func_lower->children.push_back(selectColumns->children[0]);
|
func_lower->children.push_back(selectColumns->children[0]);
|
||||||
func_lower->arguments = args;
|
func_lower->arguments = args;
|
||||||
if (selectColumns->children[0]->as<DB::ASTIdentifier>())
|
if (selectColumns->children[0]->as<ASTIdentifier>())
|
||||||
func_lower->alias = std::move(selectColumns->children[0]->as<DB::ASTIdentifier>()->alias);
|
func_lower->alias = std::move(selectColumns->children[0]->as<ASTIdentifier>()->alias);
|
||||||
else if (selectColumns->children[0]->as<DB::ASTFunction>())
|
else if (selectColumns->children[0]->as<ASTFunction>())
|
||||||
func_lower->alias = std::move(selectColumns->children[0]->as<DB::ASTFunction>()->alias);
|
func_lower->alias = std::move(selectColumns->children[0]->as<ASTFunction>()->alias);
|
||||||
|
|
||||||
auto funcs = std::make_shared<DB::ASTExpressionList>();
|
auto funcs = std::make_shared<ASTExpressionList>();
|
||||||
funcs->children.push_back(func_lower);
|
funcs->children.push_back(func_lower);
|
||||||
selectColumns->children[0] = std::move(funcs);
|
selectColumns->children[0] = std::move(funcs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
namespace DB
|
|
||||||
{
|
|
||||||
|
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int SYNTAX_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
String KQLOperators::genHasAnyAllOpExpr(std::vector<String> & tokens, IParser::Pos & token_pos, String kql_op, String ch_op)
|
String KQLOperators::genHasAnyAllOpExpr(std::vector<String> & tokens, IParser::Pos & token_pos, String kql_op, String ch_op)
|
||||||
{
|
{
|
||||||
@ -166,7 +166,7 @@ String KQLOperators::genHasAnyAllOpExpr(std::vector<String> & tokens, IParser::P
|
|||||||
return new_expr;
|
return new_expr;
|
||||||
}
|
}
|
||||||
|
|
||||||
String genEqOpExprCis(std::vector<String> & tokens, DB::IParser::Pos & token_pos, const String & ch_op)
|
String genEqOpExprCis(std::vector<String> & tokens, IParser::Pos & token_pos, const String & ch_op)
|
||||||
{
|
{
|
||||||
String tmp_arg(token_pos->begin, token_pos->end);
|
String tmp_arg(token_pos->begin, token_pos->end);
|
||||||
|
|
||||||
@ -178,30 +178,30 @@ String genEqOpExprCis(std::vector<String> & tokens, DB::IParser::Pos & token_pos
|
|||||||
new_expr += ch_op + " ";
|
new_expr += ch_op + " ";
|
||||||
++token_pos;
|
++token_pos;
|
||||||
|
|
||||||
if (token_pos->type == DB::TokenType::StringLiteral || token_pos->type == DB::TokenType::QuotedIdentifier)
|
if (token_pos->type == TokenType::StringLiteral || token_pos->type == TokenType::QuotedIdentifier)
|
||||||
new_expr += "lower('" + DB::IParserKQLFunction::escapeSingleQuotes(String(token_pos->begin + 1, token_pos->end - 1)) + "')";
|
new_expr += "lower('" + IParserKQLFunction::escapeSingleQuotes(String(token_pos->begin + 1, token_pos->end - 1)) + "')";
|
||||||
else
|
else
|
||||||
new_expr += "lower(" + DB::IParserKQLFunction::getExpression(token_pos) + ")";
|
new_expr += "lower(" + IParserKQLFunction::getExpression(token_pos) + ")";
|
||||||
|
|
||||||
tokens.pop_back();
|
tokens.pop_back();
|
||||||
return new_expr;
|
return new_expr;
|
||||||
}
|
}
|
||||||
|
|
||||||
String genInOpExprCis(std::vector<String> & tokens, DB::IParser::Pos & token_pos, const String & kql_op, const String & ch_op)
|
String genInOpExprCis(std::vector<String> & tokens, IParser::Pos & token_pos, const String & kql_op, const String & ch_op)
|
||||||
{
|
{
|
||||||
DB::ParserKQLTableFunction kqlfun_p;
|
ParserKQLTableFunction kqlfun_p;
|
||||||
DB::ParserToken s_lparen(DB::TokenType::OpeningRoundBracket);
|
ParserToken s_lparen(TokenType::OpeningRoundBracket);
|
||||||
|
|
||||||
DB::ASTPtr select;
|
ASTPtr select;
|
||||||
DB::Expected expected;
|
Expected expected;
|
||||||
String new_expr;
|
String new_expr;
|
||||||
|
|
||||||
++token_pos;
|
++token_pos;
|
||||||
if (!s_lparen.ignore(token_pos, expected))
|
if (!s_lparen.ignore(token_pos, expected))
|
||||||
throw DB::Exception(DB::ErrorCodes::SYNTAX_ERROR, "Syntax error near {}", kql_op);
|
throw Exception(ErrorCodes::SYNTAX_ERROR, "Syntax error near {}", kql_op);
|
||||||
|
|
||||||
if (tokens.empty())
|
if (tokens.empty())
|
||||||
throw DB::Exception(DB::ErrorCodes::SYNTAX_ERROR, "Syntax error near {}", kql_op);
|
throw Exception(ErrorCodes::SYNTAX_ERROR, "Syntax error near {}", kql_op);
|
||||||
|
|
||||||
new_expr = "lower(" + tokens.back() + ") ";
|
new_expr = "lower(" + tokens.back() + ") ";
|
||||||
tokens.pop_back();
|
tokens.pop_back();
|
||||||
@ -218,39 +218,39 @@ String genInOpExprCis(std::vector<String> & tokens, DB::IParser::Pos & token_pos
|
|||||||
--token_pos;
|
--token_pos;
|
||||||
|
|
||||||
new_expr += ch_op;
|
new_expr += ch_op;
|
||||||
while (isValidKQLPos(token_pos) && token_pos->type != DB::TokenType::PipeMark && token_pos->type != DB::TokenType::Semicolon)
|
while (isValidKQLPos(token_pos) && token_pos->type != TokenType::PipeMark && token_pos->type != TokenType::Semicolon)
|
||||||
{
|
{
|
||||||
auto tmp_arg = String(token_pos->begin, token_pos->end);
|
auto tmp_arg = String(token_pos->begin, token_pos->end);
|
||||||
if (token_pos->type != DB::TokenType::Comma && token_pos->type != DB::TokenType::ClosingRoundBracket
|
if (token_pos->type != TokenType::Comma && token_pos->type != TokenType::ClosingRoundBracket
|
||||||
&& token_pos->type != DB::TokenType::OpeningRoundBracket && token_pos->type != DB::TokenType::OpeningSquareBracket
|
&& token_pos->type != TokenType::OpeningRoundBracket && token_pos->type != TokenType::OpeningSquareBracket
|
||||||
&& token_pos->type != DB::TokenType::ClosingSquareBracket && tmp_arg != "~" && tmp_arg != "dynamic")
|
&& token_pos->type != TokenType::ClosingSquareBracket && tmp_arg != "~" && tmp_arg != "dynamic")
|
||||||
{
|
{
|
||||||
if (token_pos->type == DB::TokenType::StringLiteral || token_pos->type == DB::TokenType::QuotedIdentifier)
|
if (token_pos->type == TokenType::StringLiteral || token_pos->type == TokenType::QuotedIdentifier)
|
||||||
new_expr += "lower('" + DB::IParserKQLFunction::escapeSingleQuotes(String(token_pos->begin + 1, token_pos->end - 1)) + "')";
|
new_expr += "lower('" + IParserKQLFunction::escapeSingleQuotes(String(token_pos->begin + 1, token_pos->end - 1)) + "')";
|
||||||
else
|
else
|
||||||
new_expr += "lower(" + tmp_arg + ")";
|
new_expr += "lower(" + tmp_arg + ")";
|
||||||
}
|
}
|
||||||
else if (tmp_arg != "~" && tmp_arg != "dynamic" && tmp_arg != "[" && tmp_arg != "]")
|
else if (tmp_arg != "~" && tmp_arg != "dynamic" && tmp_arg != "[" && tmp_arg != "]")
|
||||||
new_expr += tmp_arg;
|
new_expr += tmp_arg;
|
||||||
|
|
||||||
if (token_pos->type == DB::TokenType::ClosingRoundBracket)
|
if (token_pos->type == TokenType::ClosingRoundBracket)
|
||||||
break;
|
break;
|
||||||
++token_pos;
|
++token_pos;
|
||||||
}
|
}
|
||||||
return new_expr;
|
return new_expr;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string genInOpExpr(DB::IParser::Pos & token_pos, const std::string & kql_op, const std::string & ch_op)
|
std::string genInOpExpr(IParser::Pos & token_pos, const std::string & kql_op, const std::string & ch_op)
|
||||||
{
|
{
|
||||||
DB::ParserKQLTableFunction kqlfun_p;
|
ParserKQLTableFunction kqlfun_p;
|
||||||
DB::ParserToken s_lparen(DB::TokenType::OpeningRoundBracket);
|
ParserToken s_lparen(TokenType::OpeningRoundBracket);
|
||||||
|
|
||||||
DB::ASTPtr select;
|
ASTPtr select;
|
||||||
DB::Expected expected;
|
Expected expected;
|
||||||
|
|
||||||
++token_pos;
|
++token_pos;
|
||||||
if (!s_lparen.ignore(token_pos, expected))
|
if (!s_lparen.ignore(token_pos, expected))
|
||||||
throw DB::Exception(DB::ErrorCodes::SYNTAX_ERROR, "Syntax error near {}", kql_op);
|
throw Exception(ErrorCodes::SYNTAX_ERROR, "Syntax error near {}", kql_op);
|
||||||
|
|
||||||
auto pos = token_pos;
|
auto pos = token_pos;
|
||||||
if (kqlfun_p.parse(pos, select, expected))
|
if (kqlfun_p.parse(pos, select, expected))
|
||||||
|
@ -9,7 +9,7 @@ bool ParserKQLPrint::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
ASTPtr select_expression_list;
|
ASTPtr select_expression_list;
|
||||||
const String expr = getExprFromToken(pos);
|
const String expr = getExprFromToken(pos);
|
||||||
|
|
||||||
Tokens tokens(expr.c_str(), expr.c_str() + expr.size());
|
Tokens tokens(expr.data(), expr.data() + expr.size(), 0, true);
|
||||||
IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks);
|
IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
if (!ParserNotEmptyExpressionList(true).parse(new_pos, select_expression_list, expected))
|
if (!ParserNotEmptyExpressionList(true).parse(new_pos, select_expression_list, expected))
|
||||||
|
@ -11,7 +11,7 @@ bool ParserKQLProject ::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
|
|
||||||
expr = getExprFromToken(pos);
|
expr = getExprFromToken(pos);
|
||||||
|
|
||||||
Tokens tokens(expr.c_str(), expr.c_str() + expr.size());
|
Tokens tokens(expr.data(), expr.data() + expr.size(), 0, true);
|
||||||
IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks);
|
IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
if (!ParserNotEmptyExpressionList(false).parse(new_pos, select_expression_list, expected))
|
if (!ParserNotEmptyExpressionList(false).parse(new_pos, select_expression_list, expected))
|
||||||
|
@ -37,7 +37,7 @@ bool ParserKQLBase::parseByString(String expr, ASTPtr & node, uint32_t max_depth
|
|||||||
{
|
{
|
||||||
Expected expected;
|
Expected expected;
|
||||||
|
|
||||||
Tokens tokens(expr.c_str(), expr.c_str() + expr.size());
|
Tokens tokens(expr.data(), expr.data() + expr.size(), 0, true);
|
||||||
IParser::Pos pos(tokens, max_depth, max_backtracks);
|
IParser::Pos pos(tokens, max_depth, max_backtracks);
|
||||||
return parse(pos, node, expected);
|
return parse(pos, node, expected);
|
||||||
}
|
}
|
||||||
@ -45,7 +45,7 @@ bool ParserKQLBase::parseByString(String expr, ASTPtr & node, uint32_t max_depth
|
|||||||
bool ParserKQLBase::parseSQLQueryByString(ParserPtr && parser, String & query, ASTPtr & select_node, uint32_t max_depth, uint32_t max_backtracks)
|
bool ParserKQLBase::parseSQLQueryByString(ParserPtr && parser, String & query, ASTPtr & select_node, uint32_t max_depth, uint32_t max_backtracks)
|
||||||
{
|
{
|
||||||
Expected expected;
|
Expected expected;
|
||||||
Tokens token_subquery(query.c_str(), query.c_str() + query.size());
|
Tokens token_subquery(query.data(), query.data() + query.size(), 0, true);
|
||||||
IParser::Pos pos_subquery(token_subquery, max_depth, max_backtracks);
|
IParser::Pos pos_subquery(token_subquery, max_depth, max_backtracks);
|
||||||
if (!parser->parse(pos_subquery, select_node, expected))
|
if (!parser->parse(pos_subquery, select_node, expected))
|
||||||
return false;
|
return false;
|
||||||
@ -123,7 +123,7 @@ bool ParserKQLBase::setSubQuerySource(ASTPtr & select_query, ASTPtr & source, bo
|
|||||||
|
|
||||||
String ParserKQLBase::getExprFromToken(const String & text, uint32_t max_depth, uint32_t max_backtracks)
|
String ParserKQLBase::getExprFromToken(const String & text, uint32_t max_depth, uint32_t max_backtracks)
|
||||||
{
|
{
|
||||||
Tokens tokens(text.c_str(), text.c_str() + text.size());
|
Tokens tokens(text.data(), text.data() + text.size(), 0, true);
|
||||||
IParser::Pos pos(tokens, max_depth, max_backtracks);
|
IParser::Pos pos(tokens, max_depth, max_backtracks);
|
||||||
|
|
||||||
return getExprFromToken(pos);
|
return getExprFromToken(pos);
|
||||||
@ -522,7 +522,7 @@ bool ParserKQLQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
--last_pos;
|
--last_pos;
|
||||||
|
|
||||||
String sub_query = std::format("({})", String(operation_pos.front().second->begin, last_pos->end));
|
String sub_query = std::format("({})", String(operation_pos.front().second->begin, last_pos->end));
|
||||||
Tokens token_subquery(sub_query.c_str(), sub_query.c_str() + sub_query.size());
|
Tokens token_subquery(sub_query.data(), sub_query.data() + sub_query.size(), 0, true);
|
||||||
IParser::Pos pos_subquery(token_subquery, pos.max_depth, pos.max_backtracks);
|
IParser::Pos pos_subquery(token_subquery, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
if (!ParserKQLSubquery().parse(pos_subquery, tables, expected))
|
if (!ParserKQLSubquery().parse(pos_subquery, tables, expected))
|
||||||
@ -543,7 +543,7 @@ bool ParserKQLQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
auto oprator = getOperator(op_str);
|
auto oprator = getOperator(op_str);
|
||||||
if (oprator)
|
if (oprator)
|
||||||
{
|
{
|
||||||
Tokens token_clause(op_calsue.c_str(), op_calsue.c_str() + op_calsue.size());
|
Tokens token_clause(op_calsue.data(), op_calsue.data() + op_calsue.size(), 0, true);
|
||||||
IParser::Pos pos_clause(token_clause, pos.max_depth, pos.max_backtracks);
|
IParser::Pos pos_clause(token_clause, pos.max_depth, pos.max_backtracks);
|
||||||
if (!oprator->parse(pos_clause, node, expected))
|
if (!oprator->parse(pos_clause, node, expected))
|
||||||
return false;
|
return false;
|
||||||
@ -576,7 +576,7 @@ bool ParserKQLQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
if (!node->as<ASTSelectQuery>()->select())
|
if (!node->as<ASTSelectQuery>()->select())
|
||||||
{
|
{
|
||||||
auto expr = String("*");
|
auto expr = String("*");
|
||||||
Tokens tokens(expr.c_str(), expr.c_str() + expr.size());
|
Tokens tokens(expr.data(), expr.data() + expr.size(), 0, true);
|
||||||
IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks);
|
IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks);
|
||||||
if (!std::make_unique<ParserKQLProject>()->parse(new_pos, node, expected))
|
if (!std::make_unique<ParserKQLProject>()->parse(new_pos, node, expected))
|
||||||
return false;
|
return false;
|
||||||
|
@ -18,7 +18,7 @@ bool ParserKQLSort::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
|
|
||||||
auto expr = getExprFromToken(pos);
|
auto expr = getExprFromToken(pos);
|
||||||
|
|
||||||
Tokens tokens(expr.c_str(), expr.c_str() + expr.size());
|
Tokens tokens(expr.data(), expr.data() + expr.size(), 0, true);
|
||||||
IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks);
|
IParser::Pos new_pos(tokens, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
auto pos_backup = new_pos;
|
auto pos_backup = new_pos;
|
||||||
|
@ -2,13 +2,13 @@
|
|||||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||||
#include <Parsers/CommonParsers.h>
|
#include <Parsers/CommonParsers.h>
|
||||||
#include <Parsers/IParserBase.h>
|
#include <Parsers/IParserBase.h>
|
||||||
#include <Parsers/Kusto/KustoFunctions/KQLFunctionFactory.h>
|
|
||||||
#include <Parsers/Kusto/ParserKQLQuery.h>
|
#include <Parsers/Kusto/ParserKQLQuery.h>
|
||||||
#include <Parsers/Kusto/ParserKQLStatement.h>
|
#include <Parsers/Kusto/ParserKQLStatement.h>
|
||||||
#include <Parsers/Kusto/Utilities.h>
|
#include <Parsers/Kusto/Utilities.h>
|
||||||
#include <Parsers/ParserSetQuery.h>
|
#include <Parsers/ParserSetQuery.h>
|
||||||
#include <Parsers/ASTLiteral.h>
|
#include <Parsers/ASTLiteral.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -63,6 +63,8 @@ bool ParserKQLWithUnionQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & exp
|
|||||||
|
|
||||||
bool ParserKQLTableFunction::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
bool ParserKQLTableFunction::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||||
{
|
{
|
||||||
|
/// TODO: This code is idiotic, see https://github.com/ClickHouse/ClickHouse/issues/61742
|
||||||
|
|
||||||
ParserToken lparen(TokenType::OpeningRoundBracket);
|
ParserToken lparen(TokenType::OpeningRoundBracket);
|
||||||
|
|
||||||
ASTPtr string_literal;
|
ASTPtr string_literal;
|
||||||
@ -101,13 +103,16 @@ bool ParserKQLTableFunction::parseImpl(Pos & pos, ASTPtr & node, Expected & expe
|
|||||||
++pos;
|
++pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
Tokens token_kql(kql_statement.data(), kql_statement.data() + kql_statement.size());
|
Tokens tokens_kql(kql_statement.data(), kql_statement.data() + kql_statement.size(), 0, true);
|
||||||
IParser::Pos pos_kql(token_kql, pos.max_depth, pos.max_backtracks);
|
IParser::Pos pos_kql(tokens_kql, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
Expected kql_expected;
|
Expected kql_expected;
|
||||||
kql_expected.enable_highlighting = false;
|
kql_expected.enable_highlighting = false;
|
||||||
if (!ParserKQLWithUnionQuery().parse(pos_kql, node, kql_expected))
|
if (!ParserKQLWithUnionQuery().parse(pos_kql, node, kql_expected))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
++pos;
|
++pos;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ protected:
|
|||||||
class ParserKQLTableFunction : public IParserBase
|
class ParserKQLTableFunction : public IParserBase
|
||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
const char * getName() const override { return "KQL() function"; }
|
const char * getName() const override { return "KQL function"; }
|
||||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -194,7 +194,7 @@ bool ParserKQLSummarize::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
|||||||
|
|
||||||
String converted_columns = getExprFromToken(expr_columns, pos.max_depth, pos.max_backtracks);
|
String converted_columns = getExprFromToken(expr_columns, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
Tokens token_converted_columns(converted_columns.c_str(), converted_columns.c_str() + converted_columns.size());
|
Tokens token_converted_columns(converted_columns.data(), converted_columns.data() + converted_columns.size(), 0, true);
|
||||||
IParser::Pos pos_converted_columns(token_converted_columns, pos.max_depth, pos.max_backtracks);
|
IParser::Pos pos_converted_columns(token_converted_columns, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
if (!ParserNotEmptyExpressionList(true).parse(pos_converted_columns, select_expression_list, expected))
|
if (!ParserNotEmptyExpressionList(true).parse(pos_converted_columns, select_expression_list, expected))
|
||||||
@ -206,7 +206,7 @@ bool ParserKQLSummarize::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
|||||||
{
|
{
|
||||||
String converted_groupby = getExprFromToken(expr_groupby, pos.max_depth, pos.max_backtracks);
|
String converted_groupby = getExprFromToken(expr_groupby, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
Tokens token_converted_groupby(converted_groupby.c_str(), converted_groupby.c_str() + converted_groupby.size());
|
Tokens token_converted_groupby(converted_groupby.data(), converted_groupby.data() + converted_groupby.size(), 0, true);
|
||||||
IParser::Pos postoken_converted_groupby(token_converted_groupby, pos.max_depth, pos.max_backtracks);
|
IParser::Pos postoken_converted_groupby(token_converted_groupby, pos.max_depth, pos.max_backtracks);
|
||||||
|
|
||||||
if (!ParserNotEmptyExpressionList(false).parse(postoken_converted_groupby, group_expression_list, expected))
|
if (!ParserNotEmptyExpressionList(false).parse(postoken_converted_groupby, group_expression_list, expected))
|
||||||
|
@ -21,6 +21,7 @@ class Tokens
|
|||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
std::vector<Token> data;
|
std::vector<Token> data;
|
||||||
|
size_t max_pos = 0;
|
||||||
Lexer lexer;
|
Lexer lexer;
|
||||||
bool skip_insignificant;
|
bool skip_insignificant;
|
||||||
|
|
||||||
@ -35,10 +36,16 @@ public:
|
|||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
if (index < data.size())
|
if (index < data.size())
|
||||||
|
{
|
||||||
|
max_pos = std::max(max_pos, index);
|
||||||
return data[index];
|
return data[index];
|
||||||
|
}
|
||||||
|
|
||||||
if (!data.empty() && data.back().isEnd())
|
if (!data.empty() && data.back().isEnd())
|
||||||
|
{
|
||||||
|
max_pos = data.size() - 1;
|
||||||
return data.back();
|
return data.back();
|
||||||
|
}
|
||||||
|
|
||||||
Token token = lexer.nextToken();
|
Token token = lexer.nextToken();
|
||||||
|
|
||||||
@ -51,7 +58,12 @@ public:
|
|||||||
{
|
{
|
||||||
if (data.empty())
|
if (data.empty())
|
||||||
return (*this)[0];
|
return (*this)[0];
|
||||||
return data.back();
|
return data[max_pos];
|
||||||
|
}
|
||||||
|
|
||||||
|
void reset()
|
||||||
|
{
|
||||||
|
max_pos = 0;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <Parsers/ParserQuery.h>
|
#include <Parsers/ParserQuery.h>
|
||||||
#include <Parsers/ASTInsertQuery.h>
|
#include <Parsers/ASTInsertQuery.h>
|
||||||
#include <Parsers/ASTExplainQuery.h>
|
#include <Parsers/ASTExplainQuery.h>
|
||||||
|
#include <Parsers/CommonParsers.h>
|
||||||
#include <Parsers/Lexer.h>
|
#include <Parsers/Lexer.h>
|
||||||
#include <Parsers/TokenIterator.h>
|
#include <Parsers/TokenIterator.h>
|
||||||
#include <Common/StringUtils.h>
|
#include <Common/StringUtils.h>
|
||||||
@ -285,6 +286,33 @@ ASTPtr tryParseQuery(
|
|||||||
}
|
}
|
||||||
|
|
||||||
Expected expected;
|
Expected expected;
|
||||||
|
|
||||||
|
/** A shortcut - if Lexer found invalid tokens, fail early without full parsing.
|
||||||
|
* But there are certain cases when invalid tokens are permitted:
|
||||||
|
* 1. INSERT queries can have arbitrary data after the FORMAT clause, that is parsed by a different parser.
|
||||||
|
* 2. It can also be the case when there are multiple queries separated by semicolons, and the first queries are ok
|
||||||
|
* while subsequent queries have syntax errors.
|
||||||
|
*
|
||||||
|
* This shortcut is needed to avoid complex backtracking in case of obviously erroneous queries.
|
||||||
|
*/
|
||||||
|
IParser::Pos lookahead(token_iterator);
|
||||||
|
if (!ParserKeyword(Keyword::INSERT_INTO).ignore(lookahead))
|
||||||
|
{
|
||||||
|
while (lookahead->type != TokenType::Semicolon && lookahead->type != TokenType::EndOfStream)
|
||||||
|
{
|
||||||
|
if (lookahead->isError())
|
||||||
|
{
|
||||||
|
out_error_message = getLexicalErrorMessage(query_begin, all_queries_end, *lookahead, hilite, query_description);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
++lookahead;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// We should not spoil the info about maximum parsed position in the original iterator.
|
||||||
|
tokens.reset();
|
||||||
|
}
|
||||||
|
|
||||||
ASTPtr res;
|
ASTPtr res;
|
||||||
const bool parse_res = parser.parse(token_iterator, res, expected);
|
const bool parse_res = parser.parse(token_iterator, res, expected);
|
||||||
const auto last_token = token_iterator.max();
|
const auto last_token = token_iterator.max();
|
||||||
|
@ -2,10 +2,25 @@
|
|||||||
#include <Processors/QueryPlan/FilterStep.h>
|
#include <Processors/QueryPlan/FilterStep.h>
|
||||||
#include <Processors/QueryPlan/ExpressionStep.h>
|
#include <Processors/QueryPlan/ExpressionStep.h>
|
||||||
#include <Interpreters/ActionsDAG.h>
|
#include <Interpreters/ActionsDAG.h>
|
||||||
|
#include <Functions/FunctionsLogical.h>
|
||||||
|
#include <Functions/IFunctionAdaptors.h>
|
||||||
|
|
||||||
namespace DB::QueryPlanOptimizations
|
namespace DB::QueryPlanOptimizations
|
||||||
{
|
{
|
||||||
|
|
||||||
|
static void removeFromOutputs(ActionsDAG & dag, const ActionsDAG::Node & node)
|
||||||
|
{
|
||||||
|
auto & outputs = dag.getOutputs();
|
||||||
|
for (size_t i = 0; i < outputs.size(); ++i)
|
||||||
|
{
|
||||||
|
if (&node == outputs[i])
|
||||||
|
{
|
||||||
|
outputs.erase(outputs.begin() + i);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
size_t tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Nodes &)
|
size_t tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Nodes &)
|
||||||
{
|
{
|
||||||
if (parent_node->children.size() != 1)
|
if (parent_node->children.size() != 1)
|
||||||
@ -19,6 +34,7 @@ size_t tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Nodes &)
|
|||||||
auto * parent_expr = typeid_cast<ExpressionStep *>(parent.get());
|
auto * parent_expr = typeid_cast<ExpressionStep *>(parent.get());
|
||||||
auto * parent_filter = typeid_cast<FilterStep *>(parent.get());
|
auto * parent_filter = typeid_cast<FilterStep *>(parent.get());
|
||||||
auto * child_expr = typeid_cast<ExpressionStep *>(child.get());
|
auto * child_expr = typeid_cast<ExpressionStep *>(child.get());
|
||||||
|
auto * child_filter = typeid_cast<FilterStep *>(child.get());
|
||||||
|
|
||||||
if (parent_expr && child_expr)
|
if (parent_expr && child_expr)
|
||||||
{
|
{
|
||||||
@ -60,6 +76,42 @@ size_t tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Nodes &)
|
|||||||
parent_node->children.swap(child_node->children);
|
parent_node->children.swap(child_node->children);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
else if (parent_filter && child_filter)
|
||||||
|
{
|
||||||
|
const auto & child_actions = child_filter->getExpression();
|
||||||
|
const auto & parent_actions = parent_filter->getExpression();
|
||||||
|
|
||||||
|
if (child_actions->hasArrayJoin())
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
auto actions = child_actions->clone();
|
||||||
|
const auto & child_filter_node = actions->findInOutputs(child_filter->getFilterColumnName());
|
||||||
|
if (child_filter->removesFilterColumn())
|
||||||
|
removeFromOutputs(*actions, child_filter_node);
|
||||||
|
|
||||||
|
actions->mergeInplace(std::move(*parent_actions->clone()));
|
||||||
|
|
||||||
|
const auto & parent_filter_node = actions->findInOutputs(parent_filter->getFilterColumnName());
|
||||||
|
if (parent_filter->removesFilterColumn())
|
||||||
|
removeFromOutputs(*actions, parent_filter_node);
|
||||||
|
|
||||||
|
FunctionOverloadResolverPtr func_builder_and = std::make_unique<FunctionToOverloadResolverAdaptor>(std::make_shared<FunctionAnd>());
|
||||||
|
const auto & condition = actions->addFunction(func_builder_and, {&child_filter_node, &parent_filter_node}, {});
|
||||||
|
auto & outputs = actions->getOutputs();
|
||||||
|
outputs.insert(outputs.begin(), &condition);
|
||||||
|
|
||||||
|
actions->removeUnusedActions(false);
|
||||||
|
|
||||||
|
auto filter = std::make_unique<FilterStep>(child_filter->getInputStreams().front(),
|
||||||
|
actions,
|
||||||
|
condition.result_name,
|
||||||
|
true);
|
||||||
|
filter->setStepDescription("(" + parent_filter->getStepDescription() + " + " + child_filter->getStepDescription() + ")");
|
||||||
|
|
||||||
|
parent_node->step = std::move(filter);
|
||||||
|
parent_node->children.swap(child_node->children);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user