mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 01:51:59 +00:00
Merge branch 'master' into analyzer-fix-test_mutations_with_merge_tree
This commit is contained in:
commit
824ee3efc2
47
.github/workflows/master.yml
vendored
47
.github/workflows/master.yml
vendored
@ -97,6 +97,14 @@ jobs:
|
|||||||
build_name: package_release
|
build_name: package_release
|
||||||
checkout_depth: 0
|
checkout_depth: 0
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
BuilderDebReleaseCoverage:
|
||||||
|
needs: [RunConfig, BuildDockers]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
|
with:
|
||||||
|
build_name: package_release_coverage
|
||||||
|
checkout_depth: 0
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
BuilderDebAarch64:
|
BuilderDebAarch64:
|
||||||
needs: [RunConfig, BuildDockers]
|
needs: [RunConfig, BuildDockers]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -277,6 +285,7 @@ jobs:
|
|||||||
- BuilderDebDebug
|
- BuilderDebDebug
|
||||||
- BuilderDebMsan
|
- BuilderDebMsan
|
||||||
- BuilderDebRelease
|
- BuilderDebRelease
|
||||||
|
- BuilderDebReleaseCoverage
|
||||||
- BuilderDebTsan
|
- BuilderDebTsan
|
||||||
- BuilderDebUBsan
|
- BuilderDebUBsan
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
@ -318,15 +327,19 @@ jobs:
|
|||||||
run_command: |
|
run_command: |
|
||||||
python3 build_report_check.py "$CHECK_NAME"
|
python3 build_report_check.py "$CHECK_NAME"
|
||||||
MarkReleaseReady:
|
MarkReleaseReady:
|
||||||
needs: [RunConfig, BuilderBinDarwin, BuilderBinDarwinAarch64, BuilderDebRelease, BuilderDebAarch64]
|
needs:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
- BuilderBinDarwin
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
- BuilderBinDarwinAarch64
|
||||||
with:
|
- BuilderDebRelease
|
||||||
test_name: Mark Commit Release Ready
|
- BuilderDebAarch64
|
||||||
runner_type: style-checker
|
runs-on: [self-hosted, style-checker]
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
steps:
|
||||||
run_command: |
|
- name: Check out repository code
|
||||||
python3 mark_release_ready.py
|
uses: ClickHouse/checkout@v1
|
||||||
|
- name: Mark Commit Release Ready
|
||||||
|
run: |
|
||||||
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
python3 mark_release_ready.py
|
||||||
############################################################################################
|
############################################################################################
|
||||||
#################################### INSTALL PACKAGES ######################################
|
#################################### INSTALL PACKAGES ######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
@ -361,6 +374,14 @@ jobs:
|
|||||||
test_name: Stateless tests (release)
|
test_name: Stateless tests (release)
|
||||||
runner_type: func-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
FunctionalStatelessTestCoverage:
|
||||||
|
needs: [RunConfig, BuilderDebReleaseCoverage]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Stateless tests (coverage)
|
||||||
|
runner_type: func-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FunctionalStatelessTestReleaseDatabaseReplicated:
|
FunctionalStatelessTestReleaseDatabaseReplicated:
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
needs: [RunConfig, BuilderDebRelease]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -461,6 +482,14 @@ jobs:
|
|||||||
test_name: Stateful tests (release)
|
test_name: Stateful tests (release)
|
||||||
runner_type: func-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
FunctionalStatefulTestCoverage:
|
||||||
|
needs: [RunConfig, BuilderDebReleaseCoverage]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Stateful tests (coverage)
|
||||||
|
runner_type: func-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FunctionalStatefulTestAarch64:
|
FunctionalStatefulTestAarch64:
|
||||||
needs: [RunConfig, BuilderDebAarch64]
|
needs: [RunConfig, BuilderDebAarch64]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
25
.github/workflows/pull_request.yml
vendored
25
.github/workflows/pull_request.yml
vendored
@ -147,6 +147,14 @@ jobs:
|
|||||||
build_name: package_release
|
build_name: package_release
|
||||||
checkout_depth: 0
|
checkout_depth: 0
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
BuilderDebReleaseCoverage:
|
||||||
|
needs: [RunConfig, FastTest]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_build.yml
|
||||||
|
with:
|
||||||
|
build_name: package_release_coverage
|
||||||
|
checkout_depth: 0
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
BuilderDebAarch64:
|
BuilderDebAarch64:
|
||||||
needs: [RunConfig, FastTest]
|
needs: [RunConfig, FastTest]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -309,6 +317,7 @@ jobs:
|
|||||||
- BuilderDebDebug
|
- BuilderDebDebug
|
||||||
- BuilderDebMsan
|
- BuilderDebMsan
|
||||||
- BuilderDebRelease
|
- BuilderDebRelease
|
||||||
|
- BuilderDebReleaseCoverage
|
||||||
- BuilderDebTsan
|
- BuilderDebTsan
|
||||||
- BuilderDebUBsan
|
- BuilderDebUBsan
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
@ -382,6 +391,14 @@ jobs:
|
|||||||
test_name: Stateless tests (release)
|
test_name: Stateless tests (release)
|
||||||
runner_type: func-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
FunctionalStatelessTestCoverage:
|
||||||
|
needs: [RunConfig, BuilderDebReleaseCoverage]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Stateless tests (coverage)
|
||||||
|
runner_type: func-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FunctionalStatelessTestReleaseDatabaseReplicated:
|
FunctionalStatelessTestReleaseDatabaseReplicated:
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
needs: [RunConfig, BuilderDebRelease]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -509,6 +526,14 @@ jobs:
|
|||||||
test_name: Stateful tests (release)
|
test_name: Stateful tests (release)
|
||||||
runner_type: func-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
FunctionalStatefulTestCoverage:
|
||||||
|
needs: [RunConfig, BuilderDebReleaseCoverage]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Stateful tests (coverage)
|
||||||
|
runner_type: func-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FunctionalStatefulTestAarch64:
|
FunctionalStatefulTestAarch64:
|
||||||
needs: [RunConfig, BuilderDebAarch64]
|
needs: [RunConfig, BuilderDebAarch64]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
20
.github/workflows/release_branches.yml
vendored
20
.github/workflows/release_branches.yml
vendored
@ -91,6 +91,8 @@ jobs:
|
|||||||
build_name: package_release
|
build_name: package_release
|
||||||
checkout_depth: 0
|
checkout_depth: 0
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
# always rebuild on release branches to be able to publish from any commit
|
||||||
|
force: true
|
||||||
BuilderDebAarch64:
|
BuilderDebAarch64:
|
||||||
needs: [RunConfig, BuildDockers]
|
needs: [RunConfig, BuildDockers]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -99,6 +101,8 @@ jobs:
|
|||||||
build_name: package_aarch64
|
build_name: package_aarch64
|
||||||
checkout_depth: 0
|
checkout_depth: 0
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
# always rebuild on release branches to be able to publish from any commit
|
||||||
|
force: true
|
||||||
BuilderDebAsan:
|
BuilderDebAsan:
|
||||||
needs: [RunConfig, BuildDockers]
|
needs: [RunConfig, BuildDockers]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -142,6 +146,8 @@ jobs:
|
|||||||
build_name: binary_darwin
|
build_name: binary_darwin
|
||||||
checkout_depth: 0
|
checkout_depth: 0
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
# always rebuild on release branches to be able to publish from any commit
|
||||||
|
force: true
|
||||||
BuilderBinDarwinAarch64:
|
BuilderBinDarwinAarch64:
|
||||||
needs: [RunConfig, BuildDockers]
|
needs: [RunConfig, BuildDockers]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -150,6 +156,8 @@ jobs:
|
|||||||
build_name: binary_darwin_aarch64
|
build_name: binary_darwin_aarch64
|
||||||
checkout_depth: 0
|
checkout_depth: 0
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
# always rebuild on release branches to be able to publish from any commit
|
||||||
|
force: true
|
||||||
############################################################################################
|
############################################################################################
|
||||||
##################################### Docker images #######################################
|
##################################### Docker images #######################################
|
||||||
############################################################################################
|
############################################################################################
|
||||||
@ -206,13 +214,8 @@ jobs:
|
|||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs:
|
needs:
|
||||||
- RunConfig
|
- RunConfig
|
||||||
- BuilderDebRelease
|
- BuilderBinDarwin
|
||||||
- BuilderDebAarch64
|
- BuilderBinDarwinAarch64
|
||||||
- BuilderDebAsan
|
|
||||||
- BuilderDebTsan
|
|
||||||
- BuilderDebUBsan
|
|
||||||
- BuilderDebMsan
|
|
||||||
- BuilderDebDebug
|
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: ClickHouse special build check
|
test_name: ClickHouse special build check
|
||||||
@ -225,7 +228,6 @@ jobs:
|
|||||||
run_command: |
|
run_command: |
|
||||||
python3 build_report_check.py "$CHECK_NAME"
|
python3 build_report_check.py "$CHECK_NAME"
|
||||||
MarkReleaseReady:
|
MarkReleaseReady:
|
||||||
if: ${{ !failure() && !cancelled() }}
|
|
||||||
needs:
|
needs:
|
||||||
- BuilderBinDarwin
|
- BuilderBinDarwin
|
||||||
- BuilderBinDarwinAarch64
|
- BuilderBinDarwinAarch64
|
||||||
@ -235,8 +237,6 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
uses: ClickHouse/checkout@v1
|
uses: ClickHouse/checkout@v1
|
||||||
with:
|
|
||||||
clear-repository: true
|
|
||||||
- name: Mark Commit Release Ready
|
- name: Mark Commit Release Ready
|
||||||
run: |
|
run: |
|
||||||
cd "$GITHUB_WORKSPACE/tests/ci"
|
cd "$GITHUB_WORKSPACE/tests/ci"
|
||||||
|
9
.github/workflows/reusable_build.yml
vendored
9
.github/workflows/reusable_build.yml
vendored
@ -26,6 +26,10 @@ name: Build ClickHouse
|
|||||||
description: json ci data
|
description: json ci data
|
||||||
type: string
|
type: string
|
||||||
required: true
|
required: true
|
||||||
|
force:
|
||||||
|
description: disallow job skipping
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
additional_envs:
|
additional_envs:
|
||||||
description: additional ENV variables to setup the job
|
description: additional ENV variables to setup the job
|
||||||
type: string
|
type: string
|
||||||
@ -33,7 +37,7 @@ name: Build ClickHouse
|
|||||||
jobs:
|
jobs:
|
||||||
Build:
|
Build:
|
||||||
name: Build-${{inputs.build_name}}
|
name: Build-${{inputs.build_name}}
|
||||||
if: contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.build_name)
|
if: ${{ contains(fromJson(inputs.data).jobs_data.jobs_to_do, inputs.build_name) || inputs.force }}
|
||||||
env:
|
env:
|
||||||
GITHUB_JOB_OVERRIDDEN: Build-${{inputs.build_name}}
|
GITHUB_JOB_OVERRIDDEN: Build-${{inputs.build_name}}
|
||||||
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
runs-on: [self-hosted, '${{inputs.runner_type}}']
|
||||||
@ -78,7 +82,8 @@ jobs:
|
|||||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" \
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" \
|
||||||
--infile ${{ toJson(inputs.data) }} \
|
--infile ${{ toJson(inputs.data) }} \
|
||||||
--job-name "$BUILD_NAME" \
|
--job-name "$BUILD_NAME" \
|
||||||
--run
|
--run \
|
||||||
|
${{ inputs.force && '--force' || '' }}
|
||||||
- name: Post
|
- name: Post
|
||||||
# it still be build report to upload for failed build job
|
# it still be build report to upload for failed build job
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
|
@ -254,10 +254,17 @@ endif()
|
|||||||
|
|
||||||
include(cmake/cpu_features.cmake)
|
include(cmake/cpu_features.cmake)
|
||||||
|
|
||||||
# Asynchronous unwind tables are needed for Query Profiler.
|
|
||||||
# They are already by default on some platforms but possibly not on all platforms.
|
# Query Profiler doesn't work on MacOS for several reasons
|
||||||
# Enable it explicitly.
|
# - PHDR cache is not available
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables")
|
# - We use native functionality to get stacktraces which is not async signal safe
|
||||||
|
# and thus we don't need to generate asynchronous unwind tables
|
||||||
|
if (NOT OS_DARWIN)
|
||||||
|
# Asynchronous unwind tables are needed for Query Profiler.
|
||||||
|
# They are already by default on some platforms but possibly not on all platforms.
|
||||||
|
# Enable it explicitly.
|
||||||
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables")
|
||||||
|
endif()
|
||||||
|
|
||||||
# Reproducible builds.
|
# Reproducible builds.
|
||||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||||
@ -348,7 +355,7 @@ if (COMPILER_CLANG)
|
|||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
|
||||||
|
|
||||||
if (NOT ENABLE_TESTS AND NOT SANITIZE AND OS_LINUX)
|
if (NOT ENABLE_TESTS AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND OS_LINUX)
|
||||||
# https://clang.llvm.org/docs/ThinLTO.html
|
# https://clang.llvm.org/docs/ThinLTO.html
|
||||||
# Applies to clang and linux only.
|
# Applies to clang and linux only.
|
||||||
# Disabled when building with tests or sanitizers.
|
# Disabled when building with tests or sanitizers.
|
||||||
@ -546,7 +553,7 @@ if (ENABLE_RUST)
|
|||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND NOT SANITIZE AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64))
|
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64))
|
||||||
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT ON)
|
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT ON)
|
||||||
else ()
|
else ()
|
||||||
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT OFF)
|
set(CHECK_LARGE_OBJECT_SIZES_DEFAULT OFF)
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#include "coverage.h"
|
#include "coverage.h"
|
||||||
|
#include <sys/mman.h>
|
||||||
|
|
||||||
#pragma GCC diagnostic ignored "-Wreserved-identifier"
|
#pragma GCC diagnostic ignored "-Wreserved-identifier"
|
||||||
|
|
||||||
@ -52,11 +53,21 @@ namespace
|
|||||||
uint32_t * guards_start = nullptr;
|
uint32_t * guards_start = nullptr;
|
||||||
uint32_t * guards_end = nullptr;
|
uint32_t * guards_end = nullptr;
|
||||||
|
|
||||||
uintptr_t * coverage_array = nullptr;
|
uintptr_t * current_coverage_array = nullptr;
|
||||||
|
uintptr_t * cumulative_coverage_array = nullptr;
|
||||||
size_t coverage_array_size = 0;
|
size_t coverage_array_size = 0;
|
||||||
|
|
||||||
uintptr_t * all_addresses_array = nullptr;
|
uintptr_t * all_addresses_array = nullptr;
|
||||||
size_t all_addresses_array_size = 0;
|
size_t all_addresses_array_size = 0;
|
||||||
|
|
||||||
|
uintptr_t * allocate(size_t size)
|
||||||
|
{
|
||||||
|
/// Note: mmap return zero-initialized memory, and we count on that.
|
||||||
|
void * map = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
||||||
|
if (MAP_FAILED == map)
|
||||||
|
return nullptr;
|
||||||
|
return static_cast<uintptr_t*>(map);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
@ -79,7 +90,8 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t * start, uint32_t * stop)
|
|||||||
coverage_array_size = stop - start;
|
coverage_array_size = stop - start;
|
||||||
|
|
||||||
/// Note: we will leak this.
|
/// Note: we will leak this.
|
||||||
coverage_array = static_cast<uintptr_t*>(malloc(sizeof(uintptr_t) * coverage_array_size));
|
current_coverage_array = allocate(sizeof(uintptr_t) * coverage_array_size);
|
||||||
|
cumulative_coverage_array = allocate(sizeof(uintptr_t) * coverage_array_size);
|
||||||
|
|
||||||
resetCoverage();
|
resetCoverage();
|
||||||
}
|
}
|
||||||
@ -92,8 +104,8 @@ void __sanitizer_cov_pcs_init(const uintptr_t * pcs_begin, const uintptr_t * pcs
|
|||||||
return;
|
return;
|
||||||
pc_table_initialized = true;
|
pc_table_initialized = true;
|
||||||
|
|
||||||
all_addresses_array = static_cast<uintptr_t*>(malloc(sizeof(uintptr_t) * coverage_array_size));
|
|
||||||
all_addresses_array_size = pcs_end - pcs_begin;
|
all_addresses_array_size = pcs_end - pcs_begin;
|
||||||
|
all_addresses_array = allocate(sizeof(uintptr_t) * all_addresses_array_size);
|
||||||
|
|
||||||
/// They are not a real pointers, but also contain a flag in the most significant bit,
|
/// They are not a real pointers, but also contain a flag in the most significant bit,
|
||||||
/// in which we are not interested for now. Reset it.
|
/// in which we are not interested for now. Reset it.
|
||||||
@ -115,17 +127,24 @@ void __sanitizer_cov_trace_pc_guard(uint32_t * guard)
|
|||||||
/// The values of `*guard` are as you set them in
|
/// The values of `*guard` are as you set them in
|
||||||
/// __sanitizer_cov_trace_pc_guard_init and so you can make them consecutive
|
/// __sanitizer_cov_trace_pc_guard_init and so you can make them consecutive
|
||||||
/// and use them to dereference an array or a bit vector.
|
/// and use them to dereference an array or a bit vector.
|
||||||
void * pc = __builtin_return_address(0);
|
intptr_t pc = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
|
||||||
|
|
||||||
coverage_array[guard - guards_start] = reinterpret_cast<uintptr_t>(pc);
|
current_coverage_array[guard - guards_start] = pc;
|
||||||
|
cumulative_coverage_array[guard - guards_start] = pc;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__attribute__((no_sanitize("coverage")))
|
__attribute__((no_sanitize("coverage")))
|
||||||
std::span<const uintptr_t> getCoverage()
|
std::span<const uintptr_t> getCurrentCoverage()
|
||||||
{
|
{
|
||||||
return {coverage_array, coverage_array_size};
|
return {current_coverage_array, coverage_array_size};
|
||||||
|
}
|
||||||
|
|
||||||
|
__attribute__((no_sanitize("coverage")))
|
||||||
|
std::span<const uintptr_t> getCumulativeCoverage()
|
||||||
|
{
|
||||||
|
return {cumulative_coverage_array, coverage_array_size};
|
||||||
}
|
}
|
||||||
|
|
||||||
__attribute__((no_sanitize("coverage")))
|
__attribute__((no_sanitize("coverage")))
|
||||||
@ -137,7 +156,7 @@ std::span<const uintptr_t> getAllInstrumentedAddresses()
|
|||||||
__attribute__((no_sanitize("coverage")))
|
__attribute__((no_sanitize("coverage")))
|
||||||
void resetCoverage()
|
void resetCoverage()
|
||||||
{
|
{
|
||||||
memset(coverage_array, 0, coverage_array_size * sizeof(*coverage_array));
|
memset(current_coverage_array, 0, coverage_array_size * sizeof(*current_coverage_array));
|
||||||
|
|
||||||
/// The guard defines whether the __sanitizer_cov_trace_pc_guard should be called.
|
/// The guard defines whether the __sanitizer_cov_trace_pc_guard should be called.
|
||||||
/// For example, you can unset it after first invocation to prevent excessive work.
|
/// For example, you can unset it after first invocation to prevent excessive work.
|
||||||
|
@ -15,7 +15,10 @@ void dumpCoverageReportIfPossible();
|
|||||||
/// Get accumulated unique program addresses of the instrumented parts of the code,
|
/// Get accumulated unique program addresses of the instrumented parts of the code,
|
||||||
/// seen so far after program startup or after previous reset.
|
/// seen so far after program startup or after previous reset.
|
||||||
/// The returned span will be represented as a sparse map, containing mostly zeros, which you should filter away.
|
/// The returned span will be represented as a sparse map, containing mostly zeros, which you should filter away.
|
||||||
std::span<const uintptr_t> getCoverage();
|
std::span<const uintptr_t> getCurrentCoverage();
|
||||||
|
|
||||||
|
/// Similar but not being reset.
|
||||||
|
std::span<const uintptr_t> getCumulativeCoverage();
|
||||||
|
|
||||||
/// Get all instrumented addresses that could be in the coverage.
|
/// Get all instrumented addresses that could be in the coverage.
|
||||||
std::span<const uintptr_t> getAllInstrumentedAddresses();
|
std::span<const uintptr_t> getAllInstrumentedAddresses();
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "Poco/Channel.h"
|
#include "Poco/Channel.h"
|
||||||
#include "Poco/Format.h"
|
#include "Poco/Format.h"
|
||||||
#include "Poco/Foundation.h"
|
#include "Poco/Foundation.h"
|
||||||
@ -871,21 +872,11 @@ public:
|
|||||||
/// If the Logger does not yet exist, it is created, based
|
/// If the Logger does not yet exist, it is created, based
|
||||||
/// on its parent logger.
|
/// on its parent logger.
|
||||||
|
|
||||||
static LoggerPtr getShared(const std::string & name);
|
static LoggerPtr getShared(const std::string & name, bool should_be_owned_by_shared_ptr_if_created = true);
|
||||||
/// Returns a shared pointer to the Logger with the given name.
|
/// Returns a shared pointer to the Logger with the given name.
|
||||||
/// If the Logger does not yet exist, it is created, based
|
/// If the Logger does not yet exist, it is created, based
|
||||||
/// on its parent logger.
|
/// on its parent logger.
|
||||||
|
|
||||||
static Logger & unsafeGet(const std::string & name);
|
|
||||||
/// Returns a reference to the Logger with the given name.
|
|
||||||
/// If the Logger does not yet exist, it is created, based
|
|
||||||
/// on its parent logger.
|
|
||||||
///
|
|
||||||
/// WARNING: This method is not thread safe. You should
|
|
||||||
/// probably use get() instead.
|
|
||||||
/// The only time this method should be used is during
|
|
||||||
/// program initialization, when only one thread is running.
|
|
||||||
|
|
||||||
static Logger & create(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
|
static Logger & create(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
|
||||||
/// Creates and returns a reference to a Logger with the
|
/// Creates and returns a reference to a Logger with the
|
||||||
/// given name. The Logger's Channel and log level as set as
|
/// given name. The Logger's Channel and log level as set as
|
||||||
@ -932,6 +923,16 @@ public:
|
|||||||
|
|
||||||
static const std::string ROOT; /// The name of the root logger ("").
|
static const std::string ROOT; /// The name of the root logger ("").
|
||||||
|
|
||||||
|
public:
|
||||||
|
struct LoggerEntry
|
||||||
|
{
|
||||||
|
Poco::Logger * logger;
|
||||||
|
bool owned_by_shared_ptr = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
using LoggerMap = std::unordered_map<std::string, LoggerEntry>;
|
||||||
|
using LoggerMapIterator = LoggerMap::iterator;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
Logger(const std::string & name, Channel * pChannel, int level);
|
Logger(const std::string & name, Channel * pChannel, int level);
|
||||||
~Logger();
|
~Logger();
|
||||||
@ -940,12 +941,16 @@ protected:
|
|||||||
void log(const std::string & text, Message::Priority prio, const char * file, int line);
|
void log(const std::string & text, Message::Priority prio, const char * file, int line);
|
||||||
|
|
||||||
static std::string format(const std::string & fmt, int argc, std::string argv[]);
|
static std::string format(const std::string & fmt, int argc, std::string argv[]);
|
||||||
static Logger & unsafeCreate(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
|
|
||||||
static Logger & parent(const std::string & name);
|
|
||||||
static void add(Logger * pLogger);
|
|
||||||
static Logger * find(const std::string & name);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
static std::pair<Logger::LoggerMapIterator, bool> unsafeGet(const std::string & name, bool get_shared);
|
||||||
|
static Logger * unsafeGetRawPtr(const std::string & name);
|
||||||
|
static std::pair<LoggerMapIterator, bool> unsafeCreate(const std::string & name, Channel * pChannel, int level = Message::PRIO_INFORMATION);
|
||||||
|
static Logger & parent(const std::string & name);
|
||||||
|
static std::pair<LoggerMapIterator, bool> add(Logger * pLogger);
|
||||||
|
static std::optional<LoggerMapIterator> find(const std::string & name);
|
||||||
|
static Logger * findRawPtr(const std::string & name);
|
||||||
|
|
||||||
Logger();
|
Logger();
|
||||||
Logger(const Logger &);
|
Logger(const Logger &);
|
||||||
Logger & operator=(const Logger &);
|
Logger & operator=(const Logger &);
|
||||||
|
@ -38,14 +38,7 @@ std::mutex & getLoggerMutex()
|
|||||||
return *logger_mutex;
|
return *logger_mutex;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct LoggerEntry
|
Poco::Logger::LoggerMap * _pLoggerMap = nullptr;
|
||||||
{
|
|
||||||
Poco::Logger * logger;
|
|
||||||
bool owned_by_shared_ptr = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
using LoggerMap = std::unordered_map<std::string, LoggerEntry>;
|
|
||||||
LoggerMap * _pLoggerMap = nullptr;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -337,10 +330,12 @@ struct LoggerDeleter
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
inline LoggerPtr makeLoggerPtr(Logger & logger, bool owned_by_shared_ptr)
|
||||||
inline LoggerPtr makeLoggerPtr(Logger & logger)
|
|
||||||
{
|
{
|
||||||
return std::shared_ptr<Logger>(&logger, LoggerDeleter());
|
if (owned_by_shared_ptr)
|
||||||
|
return LoggerPtr(&logger, LoggerDeleter());
|
||||||
|
|
||||||
|
return LoggerPtr(std::shared_ptr<void>{}, &logger);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -350,64 +345,67 @@ Logger& Logger::get(const std::string& name)
|
|||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
Logger & logger = unsafeGet(name);
|
auto [it, inserted] = unsafeGet(name, false /*get_shared*/);
|
||||||
|
return *it->second.logger;
|
||||||
/** If there are already shared pointer created for this logger
|
|
||||||
* we need to increment Logger reference count and now logger
|
|
||||||
* is owned by logger infrastructure.
|
|
||||||
*/
|
|
||||||
auto it = _pLoggerMap->find(name);
|
|
||||||
if (it->second.owned_by_shared_ptr)
|
|
||||||
{
|
|
||||||
it->second.logger->duplicate();
|
|
||||||
it->second.owned_by_shared_ptr = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return logger;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
LoggerPtr Logger::getShared(const std::string & name)
|
LoggerPtr Logger::getShared(const std::string & name, bool should_be_owned_by_shared_ptr_if_created)
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
bool logger_exists = _pLoggerMap && _pLoggerMap->contains(name);
|
auto [it, inserted] = unsafeGet(name, true /*get_shared*/);
|
||||||
|
|
||||||
Logger & logger = unsafeGet(name);
|
/** If during `unsafeGet` logger was created, then this shared pointer owns it.
|
||||||
|
* If logger was already created, then this shared pointer does not own it.
|
||||||
/** If logger already exists, then this shared pointer does not own it.
|
|
||||||
* If logger does not exists, logger infrastructure could be already destroyed
|
|
||||||
* or logger was created.
|
|
||||||
*/
|
*/
|
||||||
if (logger_exists)
|
if (inserted && should_be_owned_by_shared_ptr_if_created)
|
||||||
{
|
it->second.owned_by_shared_ptr = true;
|
||||||
logger.duplicate();
|
|
||||||
}
|
|
||||||
else if (_pLoggerMap)
|
|
||||||
{
|
|
||||||
_pLoggerMap->find(name)->second.owned_by_shared_ptr = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return makeLoggerPtr(logger);
|
return makeLoggerPtr(*it->second.logger, it->second.owned_by_shared_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Logger& Logger::unsafeGet(const std::string& name)
|
std::pair<Logger::LoggerMapIterator, bool> Logger::unsafeGet(const std::string& name, bool get_shared)
|
||||||
{
|
{
|
||||||
Logger* pLogger = find(name);
|
std::optional<Logger::LoggerMapIterator> optional_logger_it = find(name);
|
||||||
if (!pLogger)
|
|
||||||
|
if (optional_logger_it)
|
||||||
{
|
{
|
||||||
|
auto & logger_it = *optional_logger_it;
|
||||||
|
|
||||||
|
if (logger_it->second.owned_by_shared_ptr)
|
||||||
|
{
|
||||||
|
logger_it->second.logger->duplicate();
|
||||||
|
|
||||||
|
if (!get_shared)
|
||||||
|
logger_it->second.owned_by_shared_ptr = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!optional_logger_it)
|
||||||
|
{
|
||||||
|
Logger * logger = nullptr;
|
||||||
|
|
||||||
if (name == ROOT)
|
if (name == ROOT)
|
||||||
{
|
{
|
||||||
pLogger = new Logger(name, 0, Message::PRIO_INFORMATION);
|
logger = new Logger(name, nullptr, Message::PRIO_INFORMATION);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Logger& par = parent(name);
|
Logger& par = parent(name);
|
||||||
pLogger = new Logger(name, par.getChannel(), par.getLevel());
|
logger = new Logger(name, par.getChannel(), par.getLevel());
|
||||||
}
|
}
|
||||||
add(pLogger);
|
|
||||||
|
return add(logger);
|
||||||
}
|
}
|
||||||
return *pLogger;
|
|
||||||
|
return std::make_pair(*optional_logger_it, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Logger * Logger::unsafeGetRawPtr(const std::string & name)
|
||||||
|
{
|
||||||
|
return unsafeGet(name, false /*get_shared*/).first->second.logger;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -415,24 +413,24 @@ Logger& Logger::create(const std::string& name, Channel* pChannel, int level)
|
|||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
return unsafeCreate(name, pChannel, level);
|
return *unsafeCreate(name, pChannel, level).first->second.logger;
|
||||||
}
|
}
|
||||||
|
|
||||||
LoggerPtr Logger::createShared(const std::string & name, Channel * pChannel, int level)
|
LoggerPtr Logger::createShared(const std::string & name, Channel * pChannel, int level)
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
Logger & logger = unsafeCreate(name, pChannel, level);
|
auto [it, inserted] = unsafeCreate(name, pChannel, level);
|
||||||
_pLoggerMap->find(name)->second.owned_by_shared_ptr = true;
|
it->second.owned_by_shared_ptr = true;
|
||||||
|
|
||||||
return makeLoggerPtr(logger);
|
return makeLoggerPtr(*it->second.logger, it->second.owned_by_shared_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
Logger& Logger::root()
|
Logger& Logger::root()
|
||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
return unsafeGet(ROOT);
|
return *unsafeGetRawPtr(ROOT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -440,7 +438,11 @@ Logger* Logger::has(const std::string& name)
|
|||||||
{
|
{
|
||||||
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
std::lock_guard<std::mutex> lock(getLoggerMutex());
|
||||||
|
|
||||||
return find(name);
|
auto optional_it = find(name);
|
||||||
|
if (!optional_it)
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
|
return (*optional_it)->second.logger;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -459,20 +461,32 @@ void Logger::shutdown()
|
|||||||
}
|
}
|
||||||
|
|
||||||
delete _pLoggerMap;
|
delete _pLoggerMap;
|
||||||
_pLoggerMap = 0;
|
_pLoggerMap = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Logger* Logger::find(const std::string& name)
|
std::optional<Logger::LoggerMapIterator> Logger::find(const std::string& name)
|
||||||
{
|
{
|
||||||
if (_pLoggerMap)
|
if (_pLoggerMap)
|
||||||
{
|
{
|
||||||
LoggerMap::iterator it = _pLoggerMap->find(name);
|
LoggerMap::iterator it = _pLoggerMap->find(name);
|
||||||
if (it != _pLoggerMap->end())
|
if (it != _pLoggerMap->end())
|
||||||
return it->second.logger;
|
return it;
|
||||||
|
|
||||||
|
return {};
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
Logger * Logger::findRawPtr(const std::string & name)
|
||||||
|
{
|
||||||
|
auto optional_it = find(name);
|
||||||
|
if (!optional_it)
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
|
return (*optional_it)->second.logger;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -490,28 +504,28 @@ void Logger::names(std::vector<std::string>& names)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Logger& Logger::unsafeCreate(const std::string & name, Channel * pChannel, int level)
|
|
||||||
|
std::pair<Logger::LoggerMapIterator, bool> Logger::unsafeCreate(const std::string & name, Channel * pChannel, int level)
|
||||||
{
|
{
|
||||||
if (find(name)) throw ExistsException();
|
if (find(name)) throw ExistsException();
|
||||||
Logger* pLogger = new Logger(name, pChannel, level);
|
Logger* pLogger = new Logger(name, pChannel, level);
|
||||||
add(pLogger);
|
return add(pLogger);
|
||||||
|
|
||||||
return *pLogger;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Logger& Logger::parent(const std::string& name)
|
Logger& Logger::parent(const std::string& name)
|
||||||
{
|
{
|
||||||
std::string::size_type pos = name.rfind('.');
|
std::string::size_type pos = name.rfind('.');
|
||||||
if (pos != std::string::npos)
|
if (pos != std::string::npos)
|
||||||
{
|
{
|
||||||
std::string pname = name.substr(0, pos);
|
std::string pname = name.substr(0, pos);
|
||||||
Logger* pParent = find(pname);
|
Logger* pParent = findRawPtr(pname);
|
||||||
if (pParent)
|
if (pParent)
|
||||||
return *pParent;
|
return *pParent;
|
||||||
else
|
else
|
||||||
return parent(pname);
|
return parent(pname);
|
||||||
}
|
}
|
||||||
else return unsafeGet(ROOT);
|
else return *unsafeGetRawPtr(ROOT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -579,12 +593,14 @@ namespace
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Logger::add(Logger* pLogger)
|
std::pair<Logger::LoggerMapIterator, bool> Logger::add(Logger* pLogger)
|
||||||
{
|
{
|
||||||
if (!_pLoggerMap)
|
if (!_pLoggerMap)
|
||||||
_pLoggerMap = new LoggerMap;
|
_pLoggerMap = new Logger::LoggerMap;
|
||||||
|
|
||||||
_pLoggerMap->emplace(pLogger->name(), LoggerEntry{pLogger, false /*owned_by_shared_ptr*/});
|
auto result = _pLoggerMap->emplace(pLogger->name(), LoggerEntry{pLogger, false /*owned_by_shared_ptr*/});
|
||||||
|
assert(result.second);
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -63,14 +63,14 @@ endif()
|
|||||||
option(WITH_COVERAGE "Instrumentation for code coverage with default implementation" OFF)
|
option(WITH_COVERAGE "Instrumentation for code coverage with default implementation" OFF)
|
||||||
|
|
||||||
if (WITH_COVERAGE)
|
if (WITH_COVERAGE)
|
||||||
message (INFORMATION "Enabled instrumentation for code coverage")
|
message (STATUS "Enabled instrumentation for code coverage")
|
||||||
set(COVERAGE_FLAGS "-fprofile-instr-generate -fcoverage-mapping")
|
set(COVERAGE_FLAGS "-fprofile-instr-generate -fcoverage-mapping")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
|
option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF)
|
||||||
|
|
||||||
if (SANITIZE_COVERAGE)
|
if (SANITIZE_COVERAGE)
|
||||||
message (INFORMATION "Enabled instrumentation for code coverage")
|
message (STATUS "Enabled instrumentation for code coverage")
|
||||||
|
|
||||||
# We set this define for whole build to indicate that at least some parts are compiled with coverage.
|
# We set this define for whole build to indicate that at least some parts are compiled with coverage.
|
||||||
# And to expose it in system.build_options.
|
# And to expose it in system.build_options.
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
#include <libunwind.h>
|
#include <libunwind.h>
|
||||||
|
|
||||||
|
/// On MacOS this function will be replaced with a dynamic symbol
|
||||||
|
/// from the system library.
|
||||||
|
#if !defined(OS_DARWIN)
|
||||||
int backtrace(void ** buffer, int size)
|
int backtrace(void ** buffer, int size)
|
||||||
{
|
{
|
||||||
return unw_backtrace(buffer, size);
|
return unw_backtrace(buffer, size);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
2
contrib/llvm-project
vendored
2
contrib/llvm-project
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 2568a7cd1297c7c3044b0f3cc0c23a6f6444d856
|
Subproject commit d2142eed98046a47ff7112e3cc1e197c8a5cd80f
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.1.2.5"
|
ARG VERSION="24.1.3.31"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -115,12 +115,17 @@ def run_docker_image_with_env(
|
|||||||
subprocess.check_call(cmd, shell=True)
|
subprocess.check_call(cmd, shell=True)
|
||||||
|
|
||||||
|
|
||||||
def is_release_build(debug_build: bool, package_type: str, sanitizer: str) -> bool:
|
def is_release_build(
|
||||||
return not debug_build and package_type == "deb" and sanitizer == ""
|
debug_build: bool, package_type: str, sanitizer: str, coverage: bool
|
||||||
|
) -> bool:
|
||||||
|
return (
|
||||||
|
not debug_build and package_type == "deb" and sanitizer == "" and not coverage
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def parse_env_variables(
|
def parse_env_variables(
|
||||||
debug_build: bool,
|
debug_build: bool,
|
||||||
|
coverage: bool,
|
||||||
compiler: str,
|
compiler: str,
|
||||||
sanitizer: str,
|
sanitizer: str,
|
||||||
package_type: str,
|
package_type: str,
|
||||||
@ -261,7 +266,7 @@ def parse_env_variables(
|
|||||||
build_target = (
|
build_target = (
|
||||||
f"{build_target} clickhouse-odbc-bridge clickhouse-library-bridge"
|
f"{build_target} clickhouse-odbc-bridge clickhouse-library-bridge"
|
||||||
)
|
)
|
||||||
if is_release_build(debug_build, package_type, sanitizer):
|
if is_release_build(debug_build, package_type, sanitizer, coverage):
|
||||||
cmake_flags.append("-DSPLIT_DEBUG_SYMBOLS=ON")
|
cmake_flags.append("-DSPLIT_DEBUG_SYMBOLS=ON")
|
||||||
result.append("WITH_PERFORMANCE=1")
|
result.append("WITH_PERFORMANCE=1")
|
||||||
if is_cross_arm:
|
if is_cross_arm:
|
||||||
@ -287,6 +292,9 @@ def parse_env_variables(
|
|||||||
else:
|
else:
|
||||||
result.append("BUILD_TYPE=None")
|
result.append("BUILD_TYPE=None")
|
||||||
|
|
||||||
|
if coverage:
|
||||||
|
cmake_flags.append("-DSANITIZE_COVERAGE=1 -DBUILD_STANDALONE_KEEPER=0")
|
||||||
|
|
||||||
if not cache:
|
if not cache:
|
||||||
cmake_flags.append("-DCOMPILER_CACHE=disabled")
|
cmake_flags.append("-DCOMPILER_CACHE=disabled")
|
||||||
|
|
||||||
@ -415,6 +423,11 @@ def parse_args() -> argparse.Namespace:
|
|||||||
choices=("address", "thread", "memory", "undefined", ""),
|
choices=("address", "thread", "memory", "undefined", ""),
|
||||||
default="",
|
default="",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--coverage",
|
||||||
|
action="store_true",
|
||||||
|
help="enable granular coverage with introspection",
|
||||||
|
)
|
||||||
|
|
||||||
parser.add_argument("--clang-tidy", action="store_true")
|
parser.add_argument("--clang-tidy", action="store_true")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@ -507,6 +520,7 @@ def main() -> None:
|
|||||||
|
|
||||||
env_prepared = parse_env_variables(
|
env_prepared = parse_env_variables(
|
||||||
args.debug_build,
|
args.debug_build,
|
||||||
|
args.coverage,
|
||||||
args.compiler,
|
args.compiler,
|
||||||
args.sanitizer,
|
args.sanitizer,
|
||||||
args.package_type,
|
args.package_type,
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.1.2.5"
|
ARG VERSION="24.1.3.31"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="24.1.2.5"
|
ARG VERSION="24.1.3.31"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -118,13 +118,19 @@ if [ -n "$CLICKHOUSE_USER" ] && [ "$CLICKHOUSE_USER" != "default" ] || [ -n "$CL
|
|||||||
EOT
|
EOT
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS="${CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS:-}"
|
||||||
|
|
||||||
# checking $DATA_DIR for initialization
|
# checking $DATA_DIR for initialization
|
||||||
if [ -d "${DATA_DIR%/}/data" ]; then
|
if [ -d "${DATA_DIR%/}/data" ]; then
|
||||||
DATABASE_ALREADY_EXISTS='true'
|
DATABASE_ALREADY_EXISTS='true'
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# only run initialization on an empty data directory
|
# run initialization if flag CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS is not empty or data directory is empty
|
||||||
if [ -z "${DATABASE_ALREADY_EXISTS}" ]; then
|
if [[ -n "${CLICKHOUSE_ALWAYS_RUN_INITDB_SCRIPTS}" || -z "${DATABASE_ALREADY_EXISTS}" ]]; then
|
||||||
|
RUN_INITDB_SCRIPTS='true'
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "${RUN_INITDB_SCRIPTS}" ]; then
|
||||||
if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
|
if [ -n "$(ls /docker-entrypoint-initdb.d/)" ] || [ -n "$CLICKHOUSE_DB" ]; then
|
||||||
# port is needed to check if clickhouse-server is ready for connections
|
# port is needed to check if clickhouse-server is ready for connections
|
||||||
HTTP_PORT="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=http_port --try)"
|
HTTP_PORT="$(clickhouse extract-from-config --config-file "$CLICKHOUSE_CONFIG" --key=http_port --try)"
|
||||||
|
@ -17,16 +17,20 @@ CLICKHOUSE_CI_LOGS_CLUSTER=${CLICKHOUSE_CI_LOGS_CLUSTER:-system_logs_export}
|
|||||||
|
|
||||||
EXTRA_COLUMNS=${EXTRA_COLUMNS:-"pull_request_number UInt32, commit_sha String, check_start_time DateTime('UTC'), check_name LowCardinality(String), instance_type LowCardinality(String), instance_id String, INDEX ix_pr (pull_request_number) TYPE set(100), INDEX ix_commit (commit_sha) TYPE set(100), INDEX ix_check_time (check_start_time) TYPE minmax, "}
|
EXTRA_COLUMNS=${EXTRA_COLUMNS:-"pull_request_number UInt32, commit_sha String, check_start_time DateTime('UTC'), check_name LowCardinality(String), instance_type LowCardinality(String), instance_id String, INDEX ix_pr (pull_request_number) TYPE set(100), INDEX ix_commit (commit_sha) TYPE set(100), INDEX ix_check_time (check_start_time) TYPE minmax, "}
|
||||||
EXTRA_COLUMNS_EXPRESSION=${EXTRA_COLUMNS_EXPRESSION:-"CAST(0 AS UInt32) AS pull_request_number, '' AS commit_sha, now() AS check_start_time, toLowCardinality('') AS check_name, toLowCardinality('') AS instance_type, '' AS instance_id"}
|
EXTRA_COLUMNS_EXPRESSION=${EXTRA_COLUMNS_EXPRESSION:-"CAST(0 AS UInt32) AS pull_request_number, '' AS commit_sha, now() AS check_start_time, toLowCardinality('') AS check_name, toLowCardinality('') AS instance_type, '' AS instance_id"}
|
||||||
EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:-"check_name, "}
|
EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:-"check_name"}
|
||||||
|
|
||||||
# trace_log needs more columns for symbolization
|
# trace_log needs more columns for symbolization
|
||||||
EXTRA_COLUMNS_TRACE_LOG="${EXTRA_COLUMNS} symbols Array(LowCardinality(String)), lines Array(LowCardinality(String)), "
|
EXTRA_COLUMNS_TRACE_LOG="${EXTRA_COLUMNS} symbols Array(LowCardinality(String)), lines Array(LowCardinality(String)), "
|
||||||
EXTRA_COLUMNS_EXPRESSION_TRACE_LOG="${EXTRA_COLUMNS_EXPRESSION}, arrayMap(x -> demangle(addressToSymbol(x)), trace)::Array(LowCardinality(String)) AS symbols, arrayMap(x -> addressToLine(x), trace)::Array(LowCardinality(String)) AS lines"
|
EXTRA_COLUMNS_EXPRESSION_TRACE_LOG="${EXTRA_COLUMNS_EXPRESSION}, arrayMap(x -> demangle(addressToSymbol(x)), trace)::Array(LowCardinality(String)) AS symbols, arrayMap(x -> addressToLine(x), trace)::Array(LowCardinality(String)) AS lines"
|
||||||
|
|
||||||
|
# coverage_log needs more columns for symbolization, but only symbol names (the line numbers are too heavy to calculate)
|
||||||
|
EXTRA_COLUMNS_COVERAGE_LOG="${EXTRA_COLUMNS} symbols Array(LowCardinality(String)), "
|
||||||
|
EXTRA_COLUMNS_EXPRESSION_COVERAGE_LOG="${EXTRA_COLUMNS_EXPRESSION}, arrayMap(x -> demangle(addressToSymbol(x)), coverage)::Array(LowCardinality(String)) AS symbols"
|
||||||
|
|
||||||
|
|
||||||
function __set_connection_args
|
function __set_connection_args
|
||||||
{
|
{
|
||||||
# It's impossible to use generous $CONNECTION_ARGS string, it's unsafe from word splitting perspective.
|
# It's impossible to use a generic $CONNECTION_ARGS string, it's unsafe from word splitting perspective.
|
||||||
# That's why we must stick to the generated option
|
# That's why we must stick to the generated option
|
||||||
CONNECTION_ARGS=(
|
CONNECTION_ARGS=(
|
||||||
--receive_timeout=45 --send_timeout=45 --secure
|
--receive_timeout=45 --send_timeout=45 --secure
|
||||||
@ -129,6 +133,19 @@ function setup_logs_replication
|
|||||||
debug_or_sanitizer_build=$(clickhouse-client -q "WITH ((SELECT value FROM system.build_options WHERE name='BUILD_TYPE') AS build, (SELECT value FROM system.build_options WHERE name='CXX_FLAGS') as flags) SELECT build='Debug' OR flags LIKE '%fsanitize%'")
|
debug_or_sanitizer_build=$(clickhouse-client -q "WITH ((SELECT value FROM system.build_options WHERE name='BUILD_TYPE') AS build, (SELECT value FROM system.build_options WHERE name='CXX_FLAGS') as flags) SELECT build='Debug' OR flags LIKE '%fsanitize%'")
|
||||||
echo "Build is debug or sanitizer: $debug_or_sanitizer_build"
|
echo "Build is debug or sanitizer: $debug_or_sanitizer_build"
|
||||||
|
|
||||||
|
# We will pre-create a table system.coverage_log.
|
||||||
|
# It is normally created by clickhouse-test rather than the server,
|
||||||
|
# so we will create it in advance to make it be picked up by the next commands:
|
||||||
|
|
||||||
|
clickhouse-client --query "
|
||||||
|
CREATE TABLE IF NOT EXISTS system.coverage_log
|
||||||
|
(
|
||||||
|
time DateTime COMMENT 'The time of test run',
|
||||||
|
test_name String COMMENT 'The name of the test',
|
||||||
|
coverage Array(UInt64) COMMENT 'An array of addresses of the code (a subset of addresses instrumented for coverage) that were encountered during the test run'
|
||||||
|
) ENGINE = Null COMMENT 'Contains information about per-test coverage from the CI, but used only for exporting to the CI cluster'
|
||||||
|
"
|
||||||
|
|
||||||
# For each system log table:
|
# For each system log table:
|
||||||
echo 'Create %_log tables'
|
echo 'Create %_log tables'
|
||||||
clickhouse-client --query "SHOW TABLES FROM system LIKE '%\\_log'" | while read -r table
|
clickhouse-client --query "SHOW TABLES FROM system LIKE '%\\_log'" | while read -r table
|
||||||
@ -139,11 +156,16 @@ function setup_logs_replication
|
|||||||
# Do not try to resolve stack traces in case of debug/sanitizers
|
# Do not try to resolve stack traces in case of debug/sanitizers
|
||||||
# build, since it is too slow (flushing of trace_log can take ~1min
|
# build, since it is too slow (flushing of trace_log can take ~1min
|
||||||
# with such MV attached)
|
# with such MV attached)
|
||||||
if [[ "$debug_or_sanitizer_build" = 1 ]]; then
|
if [[ "$debug_or_sanitizer_build" = 1 ]]
|
||||||
|
then
|
||||||
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}"
|
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}"
|
||||||
else
|
else
|
||||||
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION_TRACE_LOG}"
|
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION_TRACE_LOG}"
|
||||||
fi
|
fi
|
||||||
|
elif [[ "$table" = "coverage_log" ]]
|
||||||
|
then
|
||||||
|
EXTRA_COLUMNS_FOR_TABLE="${EXTRA_COLUMNS_COVERAGE_LOG}"
|
||||||
|
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION_COVERAGE_LOG}"
|
||||||
else
|
else
|
||||||
EXTRA_COLUMNS_FOR_TABLE="${EXTRA_COLUMNS}"
|
EXTRA_COLUMNS_FOR_TABLE="${EXTRA_COLUMNS}"
|
||||||
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}"
|
EXTRA_COLUMNS_EXPRESSION_FOR_TABLE="${EXTRA_COLUMNS_EXPRESSION}"
|
||||||
@ -160,7 +182,7 @@ function setup_logs_replication
|
|||||||
# Create the destination table with adapted name and structure:
|
# Create the destination table with adapted name and structure:
|
||||||
statement=$(clickhouse-client --format TSVRaw --query "SHOW CREATE TABLE system.${table}" | sed -r -e '
|
statement=$(clickhouse-client --format TSVRaw --query "SHOW CREATE TABLE system.${table}" | sed -r -e '
|
||||||
s/^\($/('"$EXTRA_COLUMNS_FOR_TABLE"'/;
|
s/^\($/('"$EXTRA_COLUMNS_FOR_TABLE"'/;
|
||||||
s/ORDER BY \(/ORDER BY ('"$EXTRA_ORDER_BY_COLUMNS"'/;
|
s/^ORDER BY (([^\(].+?)|\((.+?)\))$/ORDER BY ('"$EXTRA_ORDER_BY_COLUMNS"', \2\3)/;
|
||||||
s/^CREATE TABLE system\.\w+_log$/CREATE TABLE IF NOT EXISTS '"$table"'_'"$hash"'/;
|
s/^CREATE TABLE system\.\w+_log$/CREATE TABLE IF NOT EXISTS '"$table"'_'"$hash"'/;
|
||||||
/^TTL /d
|
/^TTL /d
|
||||||
')
|
')
|
||||||
|
@ -62,46 +62,47 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
|||||||
# kazoo 2.10.0 is broken
|
# kazoo 2.10.0 is broken
|
||||||
# https://s3.amazonaws.com/clickhouse-test-reports/59337/524625a1d2f4cc608a3f1059e3df2c30f353a649/integration_tests__asan__analyzer__[5_6].html
|
# https://s3.amazonaws.com/clickhouse-test-reports/59337/524625a1d2f4cc608a3f1059e3df2c30f353a649/integration_tests__asan__analyzer__[5_6].html
|
||||||
RUN python3 -m pip install --no-cache-dir \
|
RUN python3 -m pip install --no-cache-dir \
|
||||||
PyMySQL \
|
|
||||||
aerospike==11.1.0 \
|
aerospike==11.1.0 \
|
||||||
asyncio \
|
PyMySQL==1.1.0 \
|
||||||
|
asyncio==3.4.3 \
|
||||||
avro==1.10.2 \
|
avro==1.10.2 \
|
||||||
azure-storage-blob \
|
azure-storage-blob==12.19.0 \
|
||||||
boto3 \
|
boto3==1.34.24 \
|
||||||
cassandra-driver \
|
cassandra-driver==3.29.0 \
|
||||||
confluent-kafka==2.3.0 \
|
confluent-kafka==2.3.0 \
|
||||||
delta-spark==2.3.0 \
|
delta-spark==2.3.0 \
|
||||||
dict2xml \
|
dict2xml==1.7.4 \
|
||||||
dicttoxml \
|
dicttoxml==1.7.16 \
|
||||||
docker==6.1.3 \
|
docker==6.1.3 \
|
||||||
docker-compose==1.29.2 \
|
docker-compose==1.29.2 \
|
||||||
grpcio \
|
grpcio==1.60.0 \
|
||||||
grpcio-tools \
|
grpcio-tools==1.60.0 \
|
||||||
kafka-python \
|
kafka-python==2.0.2 \
|
||||||
|
lz4==4.3.3 \
|
||||||
|
minio==7.2.3 \
|
||||||
|
nats-py==2.6.0 \
|
||||||
|
protobuf==4.25.2 \
|
||||||
kazoo==2.9.0 \
|
kazoo==2.9.0 \
|
||||||
lz4 \
|
|
||||||
minio \
|
|
||||||
nats-py \
|
|
||||||
protobuf \
|
|
||||||
psycopg2-binary==2.9.6 \
|
psycopg2-binary==2.9.6 \
|
||||||
pyhdfs \
|
pyhdfs==0.3.1 \
|
||||||
pymongo==3.11.0 \
|
pymongo==3.11.0 \
|
||||||
pyspark==3.3.2 \
|
pyspark==3.3.2 \
|
||||||
pytest \
|
pytest==7.4.4 \
|
||||||
pytest-order==1.0.0 \
|
pytest-order==1.0.0 \
|
||||||
pytest-random \
|
pytest-random==0.2 \
|
||||||
pytest-repeat \
|
pytest-repeat==0.9.3 \
|
||||||
pytest-timeout \
|
pytest-timeout==2.2.0 \
|
||||||
pytest-xdist \
|
pytest-xdist==3.5.0 \
|
||||||
pytz \
|
pytest-reportlog==0.4.0 \
|
||||||
|
pytz==2023.3.post1 \
|
||||||
pyyaml==5.3.1 \
|
pyyaml==5.3.1 \
|
||||||
redis \
|
redis==5.0.1 \
|
||||||
requests-kerberos \
|
requests-kerberos==0.14.0 \
|
||||||
tzlocal==2.1 \
|
tzlocal==2.1 \
|
||||||
retry \
|
retry==0.9.2 \
|
||||||
bs4 \
|
bs4==0.0.2 \
|
||||||
lxml \
|
lxml==5.1.0 \
|
||||||
urllib3
|
urllib3==2.0.7
|
||||||
# bs4, lxml are for cloud tests, do not delete
|
# bs4, lxml are for cloud tests, do not delete
|
||||||
|
|
||||||
# Hudi supports only spark 3.3.*, not 3.4
|
# Hudi supports only spark 3.3.*, not 3.4
|
||||||
|
@ -74,7 +74,6 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
&& wget "https://dl.min.io/client/mc/release/linux-${arch}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -O ./mc \
|
&& wget "https://dl.min.io/client/mc/release/linux-${arch}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -O ./mc \
|
||||||
&& chmod +x ./mc ./minio
|
&& chmod +x ./mc ./minio
|
||||||
|
|
||||||
|
|
||||||
RUN wget --no-verbose 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \
|
RUN wget --no-verbose 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \
|
||||||
&& tar -xvf hadoop-3.3.1.tar.gz \
|
&& tar -xvf hadoop-3.3.1.tar.gz \
|
||||||
&& rm -rf hadoop-3.3.1.tar.gz
|
&& rm -rf hadoop-3.3.1.tar.gz
|
||||||
|
@ -185,11 +185,15 @@ function run_tests()
|
|||||||
|
|
||||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||||
ADDITIONAL_OPTIONS+=('--replicated-database')
|
ADDITIONAL_OPTIONS+=('--replicated-database')
|
||||||
|
# Too many tests fail for DatabaseReplicated in parallel.
|
||||||
ADDITIONAL_OPTIONS+=('--jobs')
|
ADDITIONAL_OPTIONS+=('--jobs')
|
||||||
ADDITIONAL_OPTIONS+=('2')
|
ADDITIONAL_OPTIONS+=('2')
|
||||||
|
elif [[ 1 == $(clickhouse-client --query "SELECT value LIKE '%SANITIZE_COVERAGE%' FROM system.build_options WHERE name = 'CXX_FLAGS'") ]]; then
|
||||||
|
# Coverage on a per-test basis could only be collected sequentially.
|
||||||
|
# Do not set the --jobs parameter.
|
||||||
|
echo "Running tests with coverage collection."
|
||||||
else
|
else
|
||||||
# Too many tests fail for DatabaseReplicated in parallel. All other
|
# All other configurations are OK.
|
||||||
# configurations are OK.
|
|
||||||
ADDITIONAL_OPTIONS+=('--jobs')
|
ADDITIONAL_OPTIONS+=('--jobs')
|
||||||
ADDITIONAL_OPTIONS+=('8')
|
ADDITIONAL_OPTIONS+=('8')
|
||||||
fi
|
fi
|
||||||
|
@ -5,7 +5,6 @@ FROM ubuntu:22.04
|
|||||||
ARG apt_archive="http://archive.ubuntu.com"
|
ARG apt_archive="http://archive.ubuntu.com"
|
||||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||||
|
|
||||||
# 15.0.2
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=17
|
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=17
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
@ -30,8 +29,7 @@ RUN apt-get update \
|
|||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
|
# Install cmake 3.20+ for Rust support
|
||||||
# Install cmake 3.20+ for rust support
|
|
||||||
# Used https://askubuntu.com/a/1157132 as reference
|
# Used https://askubuntu.com/a/1157132 as reference
|
||||||
RUN curl -s https://apt.kitware.com/keys/kitware-archive-latest.asc | \
|
RUN curl -s https://apt.kitware.com/keys/kitware-archive-latest.asc | \
|
||||||
gpg --dearmor - > /etc/apt/trusted.gpg.d/kitware.gpg && \
|
gpg --dearmor - > /etc/apt/trusted.gpg.d/kitware.gpg && \
|
||||||
@ -65,8 +63,7 @@ RUN apt-get update \
|
|||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
|
# This symlink is required by gcc to find the lld linker
|
||||||
# This symlink required by gcc to find lld compiler
|
|
||||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||||
# for external_symbolizer_path
|
# for external_symbolizer_path
|
||||||
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
||||||
@ -111,5 +108,4 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
&& mv "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl/sccache" /usr/bin \
|
&& mv "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl/sccache" /usr/bin \
|
||||||
&& rm "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl" -r
|
&& rm "/tmp/sccache-$SCCACHE_VERSION-$rarch-unknown-linux-musl" -r
|
||||||
|
|
||||||
|
|
||||||
COPY process_functional_tests_result.py /
|
COPY process_functional_tests_result.py /
|
||||||
|
21
docs/changelogs/v23.12.4.15-stable.md
Normal file
21
docs/changelogs/v23.12.4.15-stable.md
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.12.4.15-stable (4233d111d20) FIXME as compared to v23.12.3.40-stable (a594704ae75)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix incorrect result of arrayElement / map[] on empty value [#59594](https://github.com/ClickHouse/ClickHouse/pull/59594) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix crash in topK when merging empty states [#59603](https://github.com/ClickHouse/ClickHouse/pull/59603) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix distributed table with a constant sharding key [#59606](https://github.com/ClickHouse/ClickHouse/pull/59606) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix leftPad / rightPad function with FixedString input [#59739](https://github.com/ClickHouse/ClickHouse/pull/59739) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Fix 02720_row_policy_column_with_dots [#59453](https://github.com/ClickHouse/ClickHouse/pull/59453) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Pin python dependencies in stateless tests [#59663](https://github.com/ClickHouse/ClickHouse/pull/59663) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
34
docs/changelogs/v24.1.3.31-stable.md
Normal file
34
docs/changelogs/v24.1.3.31-stable.md
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.1.3.31-stable (135b08cbd28) FIXME as compared to v24.1.2.5-stable (b2605dd4a5a)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#59569](https://github.com/ClickHouse/ClickHouse/issues/59569): Now dashboard understands both compressed and uncompressed state of URL's #hash (backward compatibility). Continuation of [#59124](https://github.com/ClickHouse/ClickHouse/issues/59124) . [#59548](https://github.com/ClickHouse/ClickHouse/pull/59548) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Backported in [#59776](https://github.com/ClickHouse/ClickHouse/issues/59776): Added settings `split_parts_ranges_into_intersecting_and_non_intersecting_final` and `split_intersecting_parts_ranges_into_layers_final`. This settings are needed to disable optimizations for queries with `FINAL` and needed for debug only. [#59705](https://github.com/ClickHouse/ClickHouse/pull/59705) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Fix `ASTAlterCommand::formatImpl` in case of column specific settings… [#59445](https://github.com/ClickHouse/ClickHouse/pull/59445) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Make MAX use the same rules as permutation for complex types [#59498](https://github.com/ClickHouse/ClickHouse/pull/59498) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix corner case when passing `update_insert_deduplication_token_in_dependent_materialized_views` [#59544](https://github.com/ClickHouse/ClickHouse/pull/59544) ([Jordi Villar](https://github.com/jrdi)).
|
||||||
|
* Fix incorrect result of arrayElement / map[] on empty value [#59594](https://github.com/ClickHouse/ClickHouse/pull/59594) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix crash in topK when merging empty states [#59603](https://github.com/ClickHouse/ClickHouse/pull/59603) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Maintain function alias in RewriteSumFunctionWithSumAndCountVisitor [#59658](https://github.com/ClickHouse/ClickHouse/pull/59658) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix leftPad / rightPad function with FixedString input [#59739](https://github.com/ClickHouse/ClickHouse/pull/59739) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### NO CL ENTRY
|
||||||
|
|
||||||
|
* NO CL ENTRY: 'Revert "Backport [#59650](https://github.com/ClickHouse/ClickHouse/issues/59650) to 24.1: MergeTree FINAL optimization diagnostics and settings"'. [#59701](https://github.com/ClickHouse/ClickHouse/pull/59701) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Fix 02720_row_policy_column_with_dots [#59453](https://github.com/ClickHouse/ClickHouse/pull/59453) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Refactoring of dashboard state encoding [#59554](https://github.com/ClickHouse/ClickHouse/pull/59554) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* MergeTree FINAL optimization diagnostics and settings [#59650](https://github.com/ClickHouse/ClickHouse/pull/59650) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Pin python dependencies in stateless tests [#59663](https://github.com/ClickHouse/ClickHouse/pull/59663) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
@ -6,6 +6,12 @@ sidebar_label: Memory
|
|||||||
|
|
||||||
# Memory Table Engine
|
# Memory Table Engine
|
||||||
|
|
||||||
|
:::note
|
||||||
|
When using the Memory table engine on ClickHouse Cloud, data is not replicated across all nodes (by design). To guarantee that all queries are routed to the same node and that the Memory table engine works as expected, you can do one of the following:
|
||||||
|
- Execute all operations in the same session
|
||||||
|
- Use a client that uses TCP or the native interface (which enables support for sticky connections) such as [clickhouse-client](/en/interfaces/cli)
|
||||||
|
:::
|
||||||
|
|
||||||
The Memory engine stores data in RAM, in uncompressed form. Data is stored in exactly the same form as it is received when read. In other words, reading from this table is completely free.
|
The Memory engine stores data in RAM, in uncompressed form. Data is stored in exactly the same form as it is received when read. In other words, reading from this table is completely free.
|
||||||
Concurrent data access is synchronized. Locks are short: read and write operations do not block each other.
|
Concurrent data access is synchronized. Locks are short: read and write operations do not block each other.
|
||||||
Indexes are not supported. Reading is parallelized.
|
Indexes are not supported. Reading is parallelized.
|
||||||
|
@ -451,3 +451,24 @@ To disallow concurrent backup/restore, you can use these settings respectively.
|
|||||||
|
|
||||||
The default value for both is true, so by default concurrent backup/restores are allowed.
|
The default value for both is true, so by default concurrent backup/restores are allowed.
|
||||||
When these settings are false on a cluster, only 1 backup/restore is allowed to run on a cluster at a time.
|
When these settings are false on a cluster, only 1 backup/restore is allowed to run on a cluster at a time.
|
||||||
|
|
||||||
|
## Configuring BACKUP/RESTORE to use an AzureBlobStorage Endpoint
|
||||||
|
|
||||||
|
To write backups to an AzureBlobStorage container you need the following pieces of information:
|
||||||
|
- AzureBlobStorage endpoint connection string / url,
|
||||||
|
- Container,
|
||||||
|
- Path,
|
||||||
|
- Account name (if url is specified)
|
||||||
|
- Account Key (if url is specified)
|
||||||
|
|
||||||
|
The destination for a backup will be specified like this:
|
||||||
|
```
|
||||||
|
AzureBlobStorage('<connection string>/<url>', '<container>', '<path>', '<account name>', '<account key>')
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
BACKUP TABLE data TO AzureBlobStorage('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/;',
|
||||||
|
'test_container', 'data_backup');
|
||||||
|
RESTORE TABLE data AS data_restored FROM AzureBlobStorage('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/;',
|
||||||
|
'test_container', 'data_backup');
|
||||||
|
```
|
||||||
|
@ -31,6 +31,10 @@ This reduces maintenance effort and avoids redundancy.
|
|||||||
|
|
||||||
## Configuration Settings and Usage
|
## Configuration Settings and Usage
|
||||||
|
|
||||||
|
:::note
|
||||||
|
In ClickHouse Cloud, you must use [query level settings](/en/operations/settings/query-level) to edit query cache settings. Editing [config level settings](/en/operations/configuration-files) is currently not supported.
|
||||||
|
:::
|
||||||
|
|
||||||
Setting [use_query_cache](settings/settings.md#use-query-cache) can be used to control whether a specific query or all queries of the
|
Setting [use_query_cache](settings/settings.md#use-query-cache) can be used to control whether a specific query or all queries of the
|
||||||
current session should utilize the query cache. For example, the first execution of query
|
current session should utilize the query cache. For example, the first execution of query
|
||||||
|
|
||||||
|
@ -28,6 +28,8 @@ The maximum amount of RAM to use for running a query on a single server.
|
|||||||
|
|
||||||
The default setting is unlimited (set to `0`).
|
The default setting is unlimited (set to `0`).
|
||||||
|
|
||||||
|
Cloud default value: depends on the amount of RAM on the replica.
|
||||||
|
|
||||||
The setting does not consider the volume of available memory or the total volume of memory on the machine.
|
The setting does not consider the volume of available memory or the total volume of memory on the machine.
|
||||||
The restriction applies to a single query within a single server.
|
The restriction applies to a single query within a single server.
|
||||||
You can use `SHOW PROCESSLIST` to see the current memory consumption for each query.
|
You can use `SHOW PROCESSLIST` to see the current memory consumption for each query.
|
||||||
@ -104,7 +106,9 @@ Possible values:
|
|||||||
- Maximum volume of RAM (in bytes) that can be used by the single [GROUP BY](../../sql-reference/statements/select/group-by.md#select-group-by-clause) operation.
|
- Maximum volume of RAM (in bytes) that can be used by the single [GROUP BY](../../sql-reference/statements/select/group-by.md#select-group-by-clause) operation.
|
||||||
- 0 — `GROUP BY` in external memory disabled.
|
- 0 — `GROUP BY` in external memory disabled.
|
||||||
|
|
||||||
Default value: 0.
|
Default value: `0`.
|
||||||
|
|
||||||
|
Cloud default value: half the memory amount per replica.
|
||||||
|
|
||||||
## max_bytes_before_external_sort {#settings-max_bytes_before_external_sort}
|
## max_bytes_before_external_sort {#settings-max_bytes_before_external_sort}
|
||||||
|
|
||||||
@ -115,6 +119,8 @@ Enables or disables execution of `ORDER BY` clauses in external memory. See [ORD
|
|||||||
|
|
||||||
Default value: 0.
|
Default value: 0.
|
||||||
|
|
||||||
|
Cloud default value: half the memory amount per replica.
|
||||||
|
|
||||||
## max_rows_to_sort {#max-rows-to-sort}
|
## max_rows_to_sort {#max-rows-to-sort}
|
||||||
|
|
||||||
A maximum number of rows before sorting. This allows you to limit memory consumption when sorting.
|
A maximum number of rows before sorting. This allows you to limit memory consumption when sorting.
|
||||||
@ -129,7 +135,11 @@ What to do if the number of rows received before sorting exceeds one of the limi
|
|||||||
|
|
||||||
## max_result_rows {#setting-max_result_rows}
|
## max_result_rows {#setting-max_result_rows}
|
||||||
|
|
||||||
Limit on the number of rows in the result. Also checked for subqueries, and on remote servers when running parts of a distributed query.
|
Limit on the number of rows in the result. Also checked for subqueries, and on remote servers when running parts of a distributed query. No limit is applied when value is `0`.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
Cloud default value: `0`.
|
||||||
|
|
||||||
## max_result_bytes {#max-result-bytes}
|
## max_result_bytes {#max-result-bytes}
|
||||||
|
|
||||||
@ -137,10 +147,14 @@ Limit on the number of bytes in the result. The same as the previous setting.
|
|||||||
|
|
||||||
## result_overflow_mode {#result-overflow-mode}
|
## result_overflow_mode {#result-overflow-mode}
|
||||||
|
|
||||||
What to do if the volume of the result exceeds one of the limits: ‘throw’ or ‘break’. By default, throw.
|
What to do if the volume of the result exceeds one of the limits: ‘throw’ or ‘break’.
|
||||||
|
|
||||||
Using ‘break’ is similar to using LIMIT. `Break` interrupts execution only at the block level. This means that amount of returned rows is greater than [max_result_rows](#setting-max_result_rows), multiple of [max_block_size](../../operations/settings/settings.md#setting-max_block_size) and depends on [max_threads](../../operations/settings/settings.md#max_threads).
|
Using ‘break’ is similar to using LIMIT. `Break` interrupts execution only at the block level. This means that amount of returned rows is greater than [max_result_rows](#setting-max_result_rows), multiple of [max_block_size](../../operations/settings/settings.md#setting-max_block_size) and depends on [max_threads](../../operations/settings/settings.md#max_threads).
|
||||||
|
|
||||||
|
Default value: `throw`.
|
||||||
|
|
||||||
|
Cloud default value: `throw`.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
|
@ -508,7 +508,9 @@ Possible values:
|
|||||||
- Any positive integer number of hops.
|
- Any positive integer number of hops.
|
||||||
- 0 — No hops allowed.
|
- 0 — No hops allowed.
|
||||||
|
|
||||||
Default value: 0.
|
Default value: `0`.
|
||||||
|
|
||||||
|
Cloud default value: `10`.
|
||||||
|
|
||||||
## insert_null_as_default {#insert_null_as_default}
|
## insert_null_as_default {#insert_null_as_default}
|
||||||
|
|
||||||
@ -1126,7 +1128,9 @@ Possible values:
|
|||||||
- 0 (or 1) — `INSERT SELECT` no parallel execution.
|
- 0 (or 1) — `INSERT SELECT` no parallel execution.
|
||||||
- Positive integer. Bigger than 1.
|
- Positive integer. Bigger than 1.
|
||||||
|
|
||||||
Default value: 0.
|
Default value: `0`.
|
||||||
|
|
||||||
|
Cloud default value: from `2` to `4`, depending on the service size.
|
||||||
|
|
||||||
Parallel `INSERT SELECT` has effect only if the `SELECT` part is executed in parallel, see [max_threads](#max_threads) setting.
|
Parallel `INSERT SELECT` has effect only if the `SELECT` part is executed in parallel, see [max_threads](#max_threads) setting.
|
||||||
Higher values will lead to higher memory usage.
|
Higher values will lead to higher memory usage.
|
||||||
@ -1207,7 +1211,9 @@ Default value: 10000.
|
|||||||
|
|
||||||
Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response.
|
Cancels HTTP read-only queries (e.g. SELECT) when a client closes the connection without waiting for the response.
|
||||||
|
|
||||||
Default value: 0
|
Default value: `0`.
|
||||||
|
|
||||||
|
Cloud default value: `1`.
|
||||||
|
|
||||||
## poll_interval {#poll-interval}
|
## poll_interval {#poll-interval}
|
||||||
|
|
||||||
@ -1946,6 +1952,8 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `200`.
|
Default value: `200`.
|
||||||
|
|
||||||
|
Cloud default value: `1000`.
|
||||||
|
|
||||||
### async_insert_poll_timeout_ms {#async-insert-poll-timeout-ms}
|
### async_insert_poll_timeout_ms {#async-insert-poll-timeout-ms}
|
||||||
|
|
||||||
Timeout in milliseconds for polling data from asynchronous insert queue.
|
Timeout in milliseconds for polling data from asynchronous insert queue.
|
||||||
@ -2130,7 +2138,9 @@ Possible values:
|
|||||||
- Positive integer.
|
- Positive integer.
|
||||||
- 0 — Retries are disabled
|
- 0 — Retries are disabled
|
||||||
|
|
||||||
Default value: 0
|
Default value: 20
|
||||||
|
|
||||||
|
Cloud default value: `20`.
|
||||||
|
|
||||||
Keeper request retries are done after some timeout. The timeout is controlled by the following settings: `insert_keeper_retry_initial_backoff_ms`, `insert_keeper_retry_max_backoff_ms`.
|
Keeper request retries are done after some timeout. The timeout is controlled by the following settings: `insert_keeper_retry_initial_backoff_ms`, `insert_keeper_retry_max_backoff_ms`.
|
||||||
The first retry is done after `insert_keeper_retry_initial_backoff_ms` timeout. The consequent timeouts will be calculated as follows:
|
The first retry is done after `insert_keeper_retry_initial_backoff_ms` timeout. The consequent timeouts will be calculated as follows:
|
||||||
@ -2660,6 +2670,8 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
|||||||
|
|
||||||
Default value: 1000000000 nanoseconds (once a second).
|
Default value: 1000000000 nanoseconds (once a second).
|
||||||
|
|
||||||
|
**Temporarily disabled in ClickHouse Cloud.**
|
||||||
|
|
||||||
See also:
|
See also:
|
||||||
|
|
||||||
- System table [trace_log](../../operations/system-tables/trace_log.md/#system_tables-trace_log)
|
- System table [trace_log](../../operations/system-tables/trace_log.md/#system_tables-trace_log)
|
||||||
@ -2683,6 +2695,8 @@ Type: [UInt64](../../sql-reference/data-types/int-uint.md).
|
|||||||
|
|
||||||
Default value: 1000000000 nanoseconds.
|
Default value: 1000000000 nanoseconds.
|
||||||
|
|
||||||
|
**Temporarily disabled in ClickHouse Cloud.**
|
||||||
|
|
||||||
See also:
|
See also:
|
||||||
|
|
||||||
- System table [trace_log](../../operations/system-tables/trace_log.md/#system_tables-trace_log)
|
- System table [trace_log](../../operations/system-tables/trace_log.md/#system_tables-trace_log)
|
||||||
@ -2804,6 +2818,8 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
|
Cloud default value: `1`.
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [Distributed Table Engine](../../engines/table-engines/special/distributed.md/#distributed)
|
- [Distributed Table Engine](../../engines/table-engines/special/distributed.md/#distributed)
|
||||||
@ -3319,7 +3335,9 @@ Possible values:
|
|||||||
|
|
||||||
- a string representing any valid table engine name
|
- a string representing any valid table engine name
|
||||||
|
|
||||||
Default value: `None`
|
Default value: `MergeTree`.
|
||||||
|
|
||||||
|
Cloud default value: `SharedMergeTree`.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
@ -3895,6 +3913,8 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
|
Cloud default value: `1`.
|
||||||
|
|
||||||
## database_replicated_initial_query_timeout_sec {#database_replicated_initial_query_timeout_sec}
|
## database_replicated_initial_query_timeout_sec {#database_replicated_initial_query_timeout_sec}
|
||||||
|
|
||||||
Sets how long initial DDL query should wait for Replicated database to process previous DDL queue entries in seconds.
|
Sets how long initial DDL query should wait for Replicated database to process previous DDL queue entries in seconds.
|
||||||
@ -3933,6 +3953,8 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `throw`.
|
Default value: `throw`.
|
||||||
|
|
||||||
|
Cloud default value: `none`.
|
||||||
|
|
||||||
## flatten_nested {#flatten-nested}
|
## flatten_nested {#flatten-nested}
|
||||||
|
|
||||||
Sets the data format of a [nested](../../sql-reference/data-types/nested-data-structures/index.md) columns.
|
Sets the data format of a [nested](../../sql-reference/data-types/nested-data-structures/index.md) columns.
|
||||||
@ -4068,6 +4090,8 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `1`.
|
Default value: `1`.
|
||||||
|
|
||||||
|
Cloud default value: `0`.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
`alter_sync` is applicable to `Replicated` tables only, it does nothing to alters of not `Replicated` tables.
|
`alter_sync` is applicable to `Replicated` tables only, it does nothing to alters of not `Replicated` tables.
|
||||||
:::
|
:::
|
||||||
@ -4723,6 +4747,8 @@ other connections are cancelled. Queries with `max_parallel_replicas > 1` are su
|
|||||||
|
|
||||||
Enabled by default.
|
Enabled by default.
|
||||||
|
|
||||||
|
Disabled by default on Cloud.
|
||||||
|
|
||||||
## hedged_connection_timeout {#hedged_connection_timeout}
|
## hedged_connection_timeout {#hedged_connection_timeout}
|
||||||
|
|
||||||
If we can't establish connection with replica after this timeout in hedged requests, we start working with the next replica without cancelling connection to the previous.
|
If we can't establish connection with replica after this timeout in hedged requests, we start working with the next replica without cancelling connection to the previous.
|
||||||
@ -5348,10 +5374,11 @@ Default value: `false`.
|
|||||||
|
|
||||||
## max_partition_size_to_drop
|
## max_partition_size_to_drop
|
||||||
|
|
||||||
Restriction on dropping partitions in query time.
|
Restriction on dropping partitions in query time. The value 0 means that you can drop partitions without any restrictions.
|
||||||
|
|
||||||
Default value: 50 GB.
|
Default value: 50 GB.
|
||||||
The value 0 means that you can drop partitions without any restrictions.
|
|
||||||
|
Cloud default value: 1 TB.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
This query setting overwrites its server setting equivalent, see [max_partition_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-partition-size-to-drop)
|
This query setting overwrites its server setting equivalent, see [max_partition_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-partition-size-to-drop)
|
||||||
@ -5359,10 +5386,11 @@ This query setting overwrites its server setting equivalent, see [max_partition_
|
|||||||
|
|
||||||
## max_table_size_to_drop
|
## max_table_size_to_drop
|
||||||
|
|
||||||
Restriction on deleting tables in query time.
|
Restriction on deleting tables in query time. The value 0 means that you can delete all tables without any restrictions.
|
||||||
|
|
||||||
Default value: 50 GB.
|
Default value: 50 GB.
|
||||||
The value 0 means that you can delete all tables without any restrictions.
|
|
||||||
|
Cloud default value: 1 TB.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
This query setting overwrites its server setting equivalent, see [max_table_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-table-size-to-drop)
|
This query setting overwrites its server setting equivalent, see [max_table_size_to_drop](/docs/en/operations/server-configuration-parameters/settings.md/#max-table-size-to-drop)
|
||||||
|
@ -514,6 +514,10 @@ ENGINE = MergeTree ORDER BY x;
|
|||||||
|
|
||||||
## Temporary Tables
|
## Temporary Tables
|
||||||
|
|
||||||
|
:::note
|
||||||
|
Please note that temporary tables are not replicated. As a result, there is no guarantee that data inserted into a temporary table will be available in other replicas. The primary use case where temporary tables can be useful is for querying or joining small external datasets during a single session.
|
||||||
|
:::
|
||||||
|
|
||||||
ClickHouse supports temporary tables which have the following characteristics:
|
ClickHouse supports temporary tables which have the following characteristics:
|
||||||
|
|
||||||
- Temporary tables disappear when the session ends, including if the connection is lost.
|
- Temporary tables disappear when the session ends, including if the connection is lost.
|
||||||
|
@ -20,6 +20,6 @@ sidebar_position: 11
|
|||||||
|
|
||||||
Если вы не видели наших футболок, посмотрите видео о ClickHouse. Например, вот это:
|
Если вы не видели наших футболок, посмотрите видео о ClickHouse. Например, вот это:
|
||||||
|
|
||||||
![iframe](https://www.youtube.com/embed/bSyQahMVZ7w)
|
<iframe width="675" height="380" src="https://www.youtube.com/embed/bSyQahMVZ7w" frameborder="0" allow="accelerometer; autoplay; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
||||||
|
|
||||||
P.S. Эти футболки не продаются, а распространяются бесплатно на большинстве митапов [ClickHouse](https://clickhouse.com/#meet), обычно в награду за самые интересные вопросы или другие виды активного участия.
|
P.S. Эти футболки не продаются, а распространяются бесплатно на большинстве митапов [ClickHouse](https://clickhouse.com/#meet), обычно в награду за самые интересные вопросы или другие виды активного участия.
|
||||||
|
@ -130,6 +130,8 @@ if [ -n "$SANITIZER" ]; then
|
|||||||
fi
|
fi
|
||||||
elif [[ $BUILD_TYPE == 'debug' ]]; then
|
elif [[ $BUILD_TYPE == 'debug' ]]; then
|
||||||
VERSION_POSTFIX+="+debug"
|
VERSION_POSTFIX+="+debug"
|
||||||
|
elif [[ $BUILD_TYPE =~ 'coverage' ]]; then
|
||||||
|
VERSION_POSTFIX+="+coverage"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$PKG_ROOT" != "$SOURCE" ]]; then
|
if [[ "$PKG_ROOT" != "$SOURCE" ]]; then
|
||||||
|
@ -49,6 +49,12 @@ contents:
|
|||||||
dst: /usr/bin/clickhouse-client
|
dst: /usr/bin/clickhouse-client
|
||||||
- src: root/usr/bin/clickhouse-local
|
- src: root/usr/bin/clickhouse-local
|
||||||
dst: /usr/bin/clickhouse-local
|
dst: /usr/bin/clickhouse-local
|
||||||
|
- src: root/usr/bin/ch
|
||||||
|
dst: /usr/bin/ch
|
||||||
|
- src: root/usr/bin/chc
|
||||||
|
dst: /usr/bin/chc
|
||||||
|
- src: root/usr/bin/chl
|
||||||
|
dst: /usr/bin/chl
|
||||||
- src: root/usr/bin/clickhouse-obfuscator
|
- src: root/usr/bin/clickhouse-obfuscator
|
||||||
dst: /usr/bin/clickhouse-obfuscator
|
dst: /usr/bin/clickhouse-obfuscator
|
||||||
# docs
|
# docs
|
||||||
|
@ -7,35 +7,16 @@ endif ()
|
|||||||
include(${ClickHouse_SOURCE_DIR}/cmake/split_debug_symbols.cmake)
|
include(${ClickHouse_SOURCE_DIR}/cmake/split_debug_symbols.cmake)
|
||||||
|
|
||||||
# The `clickhouse` binary is a multi purpose tool that contains multiple execution modes (client, server, etc.),
|
# The `clickhouse` binary is a multi purpose tool that contains multiple execution modes (client, server, etc.),
|
||||||
# each of them may be built and linked as a separate library.
|
# So client/server/... is just a symlink to `clickhouse` binary.
|
||||||
# If you do not know what modes you need, turn this option OFF and enable SERVER and CLIENT only.
|
#
|
||||||
|
# But, there are several components that requires extra libraries, like keeper
|
||||||
|
# requires NuRaft, that regular binary does not requires, so you can disable
|
||||||
|
# compilation of this components.
|
||||||
|
#
|
||||||
|
# If you do not know what modes you need, turn then all.
|
||||||
option (ENABLE_CLICKHOUSE_ALL "Enable all ClickHouse modes by default" ON)
|
option (ENABLE_CLICKHOUSE_ALL "Enable all ClickHouse modes by default" ON)
|
||||||
|
|
||||||
option (ENABLE_CLICKHOUSE_SERVER "Server mode (main mode)" ${ENABLE_CLICKHOUSE_ALL})
|
|
||||||
option (ENABLE_CLICKHOUSE_CLIENT "Client mode (interactive tui/shell that connects to the server)"
|
|
||||||
${ENABLE_CLICKHOUSE_ALL})
|
|
||||||
|
|
||||||
# https://clickhouse.com/docs/en/operations/utilities/clickhouse-local/
|
|
||||||
option (ENABLE_CLICKHOUSE_LOCAL "Local files fast processing mode" ${ENABLE_CLICKHOUSE_ALL})
|
|
||||||
|
|
||||||
# https://clickhouse.com/docs/en/operations/utilities/clickhouse-benchmark/
|
|
||||||
option (ENABLE_CLICKHOUSE_BENCHMARK "Queries benchmarking mode" ${ENABLE_CLICKHOUSE_ALL})
|
|
||||||
|
|
||||||
option (ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG "Configs processor (extract values etc.)" ${ENABLE_CLICKHOUSE_ALL})
|
|
||||||
|
|
||||||
# https://clickhouse.com/docs/en/operations/utilities/clickhouse-compressor/
|
|
||||||
option (ENABLE_CLICKHOUSE_COMPRESSOR "Data compressor and decompressor" ${ENABLE_CLICKHOUSE_ALL})
|
|
||||||
|
|
||||||
# https://clickhouse.com/docs/en/operations/utilities/clickhouse-copier/
|
|
||||||
option (ENABLE_CLICKHOUSE_COPIER "Inter-cluster data copying mode" ${ENABLE_CLICKHOUSE_ALL})
|
|
||||||
|
|
||||||
option (ENABLE_CLICKHOUSE_FORMAT "Queries pretty-printer and formatter with syntax highlighting"
|
|
||||||
${ENABLE_CLICKHOUSE_ALL})
|
|
||||||
|
|
||||||
# https://clickhouse.com/docs/en/operations/utilities/clickhouse-obfuscator/
|
# https://clickhouse.com/docs/en/operations/utilities/clickhouse-obfuscator/
|
||||||
option (ENABLE_CLICKHOUSE_OBFUSCATOR "Table data obfuscator (convert real data to benchmark-ready one)"
|
|
||||||
${ENABLE_CLICKHOUSE_ALL})
|
|
||||||
|
|
||||||
# https://clickhouse.com/docs/en/operations/utilities/odbc-bridge/
|
# https://clickhouse.com/docs/en/operations/utilities/odbc-bridge/
|
||||||
# TODO Also needs NANODBC.
|
# TODO Also needs NANODBC.
|
||||||
if (ENABLE_ODBC AND NOT USE_MUSL)
|
if (ENABLE_ODBC AND NOT USE_MUSL)
|
||||||
@ -51,18 +32,12 @@ endif ()
|
|||||||
# https://presentations.clickhouse.com/matemarketing_2020/
|
# https://presentations.clickhouse.com/matemarketing_2020/
|
||||||
option (ENABLE_CLICKHOUSE_GIT_IMPORT "A tool to analyze Git repositories" ${ENABLE_CLICKHOUSE_ALL})
|
option (ENABLE_CLICKHOUSE_GIT_IMPORT "A tool to analyze Git repositories" ${ENABLE_CLICKHOUSE_ALL})
|
||||||
|
|
||||||
option (ENABLE_CLICKHOUSE_STATIC_FILES_DISK_UPLOADER "A tool to export table data files to be later put to a static files web server" ${ENABLE_CLICKHOUSE_ALL})
|
|
||||||
|
|
||||||
option (ENABLE_CLICKHOUSE_KEEPER "ClickHouse alternative to ZooKeeper" ${ENABLE_CLICKHOUSE_ALL})
|
option (ENABLE_CLICKHOUSE_KEEPER "ClickHouse alternative to ZooKeeper" ${ENABLE_CLICKHOUSE_ALL})
|
||||||
|
|
||||||
option (ENABLE_CLICKHOUSE_KEEPER_CONVERTER "Util allows to convert ZooKeeper logs and snapshots into clickhouse-keeper snapshot" ${ENABLE_CLICKHOUSE_ALL})
|
option (ENABLE_CLICKHOUSE_KEEPER_CONVERTER "Util allows to convert ZooKeeper logs and snapshots into clickhouse-keeper snapshot" ${ENABLE_CLICKHOUSE_ALL})
|
||||||
|
|
||||||
option (ENABLE_CLICKHOUSE_KEEPER_CLIENT "ClickHouse Keeper Client" ${ENABLE_CLICKHOUSE_ALL})
|
option (ENABLE_CLICKHOUSE_KEEPER_CLIENT "ClickHouse Keeper Client" ${ENABLE_CLICKHOUSE_ALL})
|
||||||
|
|
||||||
option (ENABLE_CLICKHOUSE_SU "A tool similar to 'su'" ${ENABLE_CLICKHOUSE_ALL})
|
|
||||||
|
|
||||||
option (ENABLE_CLICKHOUSE_DISKS "A tool to manage disks" ${ENABLE_CLICKHOUSE_ALL})
|
|
||||||
|
|
||||||
if (NOT ENABLE_NURAFT)
|
if (NOT ENABLE_NURAFT)
|
||||||
# RECONFIGURE_MESSAGE_LEVEL should not be used here,
|
# RECONFIGURE_MESSAGE_LEVEL should not be used here,
|
||||||
# since ENABLE_NURAFT is set to OFF for FreeBSD and Darwin.
|
# since ENABLE_NURAFT is set to OFF for FreeBSD and Darwin.
|
||||||
@ -71,27 +46,7 @@ if (NOT ENABLE_NURAFT)
|
|||||||
set(ENABLE_CLICKHOUSE_KEEPER_CONVERTER OFF)
|
set(ENABLE_CLICKHOUSE_KEEPER_CONVERTER OFF)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
option(ENABLE_CLICKHOUSE_INSTALL "Install ClickHouse without .deb/.rpm/.tgz packages (having the binary only)" ${ENABLE_CLICKHOUSE_ALL})
|
message(STATUS "ClickHouse extra components:")
|
||||||
|
|
||||||
message(STATUS "ClickHouse modes:")
|
|
||||||
|
|
||||||
if (NOT ENABLE_CLICKHOUSE_SERVER)
|
|
||||||
message(WARNING "ClickHouse server mode is not going to be built.")
|
|
||||||
else()
|
|
||||||
message(STATUS "Server mode: ON")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (NOT ENABLE_CLICKHOUSE_CLIENT)
|
|
||||||
message(WARNING "ClickHouse client mode is not going to be built. You won't be able to connect to the server and run tests")
|
|
||||||
else()
|
|
||||||
message(STATUS "Client mode: ON")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_LOCAL)
|
|
||||||
message(STATUS "Local mode: ON")
|
|
||||||
else()
|
|
||||||
message(STATUS "Local mode: OFF")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_SELF_EXTRACTING)
|
if (ENABLE_CLICKHOUSE_SELF_EXTRACTING)
|
||||||
message(STATUS "Self-extracting executable: ON")
|
message(STATUS "Self-extracting executable: ON")
|
||||||
@ -99,42 +54,6 @@ else()
|
|||||||
message(STATUS "Self-extracting executable: OFF")
|
message(STATUS "Self-extracting executable: OFF")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_BENCHMARK)
|
|
||||||
message(STATUS "Benchmark mode: ON")
|
|
||||||
else()
|
|
||||||
message(STATUS "Benchmark mode: OFF")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG)
|
|
||||||
message(STATUS "Extract from config mode: ON")
|
|
||||||
else()
|
|
||||||
message(STATUS "Extract from config mode: OFF")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_COMPRESSOR)
|
|
||||||
message(STATUS "Compressor mode: ON")
|
|
||||||
else()
|
|
||||||
message(STATUS "Compressor mode: OFF")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_COPIER)
|
|
||||||
message(STATUS "Copier mode: ON")
|
|
||||||
else()
|
|
||||||
message(STATUS "Copier mode: OFF")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_FORMAT)
|
|
||||||
message(STATUS "Format mode: ON")
|
|
||||||
else()
|
|
||||||
message(STATUS "Format mode: OFF")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_OBFUSCATOR)
|
|
||||||
message(STATUS "Obfuscator mode: ON")
|
|
||||||
else()
|
|
||||||
message(STATUS "Obfuscator mode: OFF")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_ODBC_BRIDGE)
|
if (ENABLE_CLICKHOUSE_ODBC_BRIDGE)
|
||||||
message(STATUS "ODBC bridge mode: ON")
|
message(STATUS "ODBC bridge mode: ON")
|
||||||
else()
|
else()
|
||||||
@ -147,18 +66,6 @@ else()
|
|||||||
message(STATUS "Library bridge mode: OFF")
|
message(STATUS "Library bridge mode: OFF")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_INSTALL)
|
|
||||||
message(STATUS "ClickHouse install: ON")
|
|
||||||
else()
|
|
||||||
message(STATUS "ClickHouse install: OFF")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_GIT_IMPORT)
|
|
||||||
message(STATUS "ClickHouse git-import: ON")
|
|
||||||
else()
|
|
||||||
message(STATUS "ClickHouse git-import: OFF")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_KEEPER)
|
if (ENABLE_CLICKHOUSE_KEEPER)
|
||||||
message(STATUS "ClickHouse keeper mode: ON")
|
message(STATUS "ClickHouse keeper mode: ON")
|
||||||
else()
|
else()
|
||||||
@ -177,19 +84,6 @@ else()
|
|||||||
message(STATUS "ClickHouse keeper-client mode: OFF")
|
message(STATUS "ClickHouse keeper-client mode: OFF")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_DISKS)
|
|
||||||
message(STATUS "Clickhouse disks mode: ON")
|
|
||||||
else()
|
|
||||||
message(STATUS "ClickHouse disks mode: OFF")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_SU)
|
|
||||||
message(STATUS "ClickHouse su: ON")
|
|
||||||
else()
|
|
||||||
message(STATUS "ClickHouse su: OFF")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
configure_file (config_tools.h.in ${CONFIG_INCLUDE_PATH}/config_tools.h)
|
configure_file (config_tools.h.in ${CONFIG_INCLUDE_PATH}/config_tools.h)
|
||||||
|
|
||||||
macro(clickhouse_target_link_split_lib target name)
|
macro(clickhouse_target_link_split_lib target name)
|
||||||
@ -272,42 +166,6 @@ endif ()
|
|||||||
target_link_libraries (clickhouse PRIVATE clickhouse_common_io string_utils ${HARMFUL_LIB})
|
target_link_libraries (clickhouse PRIVATE clickhouse_common_io string_utils ${HARMFUL_LIB})
|
||||||
target_include_directories (clickhouse PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
|
target_include_directories (clickhouse PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_SERVER)
|
|
||||||
clickhouse_target_link_split_lib(clickhouse server)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_CLIENT)
|
|
||||||
clickhouse_target_link_split_lib(clickhouse client)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_LOCAL)
|
|
||||||
clickhouse_target_link_split_lib(clickhouse local)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_BENCHMARK)
|
|
||||||
clickhouse_target_link_split_lib(clickhouse benchmark)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_COPIER)
|
|
||||||
clickhouse_target_link_split_lib(clickhouse copier)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG)
|
|
||||||
clickhouse_target_link_split_lib(clickhouse extract-from-config)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_COMPRESSOR)
|
|
||||||
clickhouse_target_link_split_lib(clickhouse compressor)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_FORMAT)
|
|
||||||
clickhouse_target_link_split_lib(clickhouse format)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_OBFUSCATOR)
|
|
||||||
clickhouse_target_link_split_lib(clickhouse obfuscator)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_GIT_IMPORT)
|
|
||||||
clickhouse_target_link_split_lib(clickhouse git-import)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_STATIC_FILES_DISK_UPLOADER)
|
|
||||||
clickhouse_target_link_split_lib(clickhouse static-files-disk-uploader)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_SU)
|
|
||||||
clickhouse_target_link_split_lib(clickhouse su)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_KEEPER)
|
if (ENABLE_CLICKHOUSE_KEEPER)
|
||||||
clickhouse_target_link_split_lib(clickhouse keeper)
|
clickhouse_target_link_split_lib(clickhouse keeper)
|
||||||
endif()
|
endif()
|
||||||
@ -317,77 +175,40 @@ endif()
|
|||||||
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT)
|
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT)
|
||||||
clickhouse_target_link_split_lib(clickhouse keeper-client)
|
clickhouse_target_link_split_lib(clickhouse keeper-client)
|
||||||
endif()
|
endif()
|
||||||
if (ENABLE_CLICKHOUSE_INSTALL)
|
clickhouse_target_link_split_lib(clickhouse install)
|
||||||
clickhouse_target_link_split_lib(clickhouse install)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_DISKS)
|
|
||||||
clickhouse_target_link_split_lib(clickhouse disks)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
set (CLICKHOUSE_BUNDLE)
|
set (CLICKHOUSE_BUNDLE)
|
||||||
|
macro(clickhouse_program_install name lib_name)
|
||||||
|
clickhouse_target_link_split_lib(clickhouse ${lib_name})
|
||||||
|
add_custom_target (${name} ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse ${name} DEPENDS clickhouse)
|
||||||
|
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/${name}" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||||
|
list(APPEND CLICKHOUSE_BUNDLE ${name})
|
||||||
|
|
||||||
|
foreach(alias ${ARGN})
|
||||||
|
message(STATUS "Adding alias ${alias} for ${name}")
|
||||||
|
add_custom_target (${alias} ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse ${alias} DEPENDS clickhouse)
|
||||||
|
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/${alias}" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
||||||
|
list(APPEND CLICKHOUSE_BUNDLE ${alias})
|
||||||
|
endforeach()
|
||||||
|
endmacro()
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_SELF_EXTRACTING)
|
if (ENABLE_CLICKHOUSE_SELF_EXTRACTING)
|
||||||
list(APPEND CLICKHOUSE_BUNDLE self-extracting)
|
list(APPEND CLICKHOUSE_BUNDLE self-extracting)
|
||||||
endif ()
|
endif ()
|
||||||
if (ENABLE_CLICKHOUSE_SERVER)
|
|
||||||
add_custom_target (clickhouse-server ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-server DEPENDS clickhouse)
|
clickhouse_program_install(clickhouse-server server)
|
||||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-server" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
clickhouse_program_install(clickhouse-client client chc)
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-server)
|
clickhouse_program_install(clickhouse-local local chl ch)
|
||||||
endif ()
|
clickhouse_program_install(clickhouse-benchmark benchmark)
|
||||||
if (ENABLE_CLICKHOUSE_CLIENT)
|
clickhouse_program_install(clickhouse-copier copier)
|
||||||
add_custom_target (clickhouse-client ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-client DEPENDS clickhouse)
|
clickhouse_program_install(clickhouse-extract-from-config extract-from-config)
|
||||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-client" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
clickhouse_program_install(clickhouse-compressor compressor)
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-client)
|
clickhouse_program_install(clickhouse-format format)
|
||||||
endif ()
|
clickhouse_program_install(clickhouse-obfuscator obfuscator)
|
||||||
if (ENABLE_CLICKHOUSE_LOCAL)
|
clickhouse_program_install(clickhouse-git-import git-import)
|
||||||
add_custom_target (clickhouse-local ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-local DEPENDS clickhouse)
|
clickhouse_program_install(clickhouse-static-files-disk-uploader static-files-disk-uploader)
|
||||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-local" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
clickhouse_program_install(clickhouse-disks disks)
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-local)
|
clickhouse_program_install(clickhouse-su su)
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_BENCHMARK)
|
|
||||||
add_custom_target (clickhouse-benchmark ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-benchmark DEPENDS clickhouse)
|
|
||||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-benchmark" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-benchmark)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_COPIER)
|
|
||||||
add_custom_target (clickhouse-copier ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-copier DEPENDS clickhouse)
|
|
||||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-copier" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-copier)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG)
|
|
||||||
add_custom_target (clickhouse-extract-from-config ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-extract-from-config DEPENDS clickhouse)
|
|
||||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-extract-from-config" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-extract-from-config)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_COMPRESSOR)
|
|
||||||
add_custom_target (clickhouse-compressor ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-compressor DEPENDS clickhouse)
|
|
||||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-compressor" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-compressor)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_FORMAT)
|
|
||||||
add_custom_target (clickhouse-format ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-format DEPENDS clickhouse)
|
|
||||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-format" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-format)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_OBFUSCATOR)
|
|
||||||
add_custom_target (clickhouse-obfuscator ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-obfuscator DEPENDS clickhouse)
|
|
||||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-obfuscator" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-obfuscator)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_GIT_IMPORT)
|
|
||||||
add_custom_target (clickhouse-git-import ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-git-import DEPENDS clickhouse)
|
|
||||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-git-import" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-git-import)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_STATIC_FILES_DISK_UPLOADER)
|
|
||||||
add_custom_target (clickhouse-static-files-disk-uploader ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-static-files-disk-uploader DEPENDS clickhouse)
|
|
||||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-static-files-disk-uploader" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-static-files-disk-uploader)
|
|
||||||
endif ()
|
|
||||||
if (ENABLE_CLICKHOUSE_SU)
|
|
||||||
add_custom_target (clickhouse-su ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-su DEPENDS clickhouse)
|
|
||||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-su" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-su)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_KEEPER)
|
if (ENABLE_CLICKHOUSE_KEEPER)
|
||||||
if (NOT BUILD_STANDALONE_KEEPER AND CREATE_KEEPER_SYMLINK)
|
if (NOT BUILD_STANDALONE_KEEPER AND CREATE_KEEPER_SYMLINK)
|
||||||
@ -417,11 +238,6 @@ if (ENABLE_CLICKHOUSE_KEEPER_CLIENT)
|
|||||||
|
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper-client)
|
list(APPEND CLICKHOUSE_BUNDLE clickhouse-keeper-client)
|
||||||
endif ()
|
endif ()
|
||||||
if (ENABLE_CLICKHOUSE_DISKS)
|
|
||||||
add_custom_target (clickhouse-disks ALL COMMAND ${CMAKE_COMMAND} -E create_symlink clickhouse clickhouse-disks DEPENDS clickhouse)
|
|
||||||
install (FILES "${CMAKE_CURRENT_BINARY_DIR}/clickhouse-disks" DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT clickhouse)
|
|
||||||
list(APPEND CLICKHOUSE_BUNDLE clickhouse-disks)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
add_custom_target (clickhouse-bundle ALL DEPENDS ${CLICKHOUSE_BUNDLE})
|
add_custom_target (clickhouse-bundle ALL DEPENDS ${CLICKHOUSE_BUNDLE})
|
||||||
|
|
||||||
|
@ -640,7 +640,8 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
|||||||
{
|
{
|
||||||
std::cout << "Usage: " << argv[0] << " [options] < queries.txt\n";
|
std::cout << "Usage: " << argv[0] << " [options] < queries.txt\n";
|
||||||
std::cout << desc << "\n";
|
std::cout << desc << "\n";
|
||||||
return 1;
|
std::cout << "\nSee also: https://clickhouse.com/docs/en/operations/utilities/clickhouse-benchmark/\n";
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
print_stacktrace = options.count("stacktrace");
|
print_stacktrace = options.count("stacktrace");
|
||||||
|
@ -1000,6 +1000,7 @@ void Client::printHelpMessage(const OptionsDescription & options_description)
|
|||||||
std::cout << options_description.external_description.value() << "\n";
|
std::cout << options_description.external_description.value() << "\n";
|
||||||
std::cout << options_description.hosts_and_ports_description.value() << "\n";
|
std::cout << options_description.hosts_and_ports_description.value() << "\n";
|
||||||
std::cout << "In addition, --param_name=value can be specified for substitution of parameters for parametrized queries.\n";
|
std::cout << "In addition, --param_name=value can be specified for substitution of parameters for parametrized queries.\n";
|
||||||
|
std::cout << "\nSee also: https://clickhouse.com/docs/en/integrations/sql-clients/cli\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -100,6 +100,7 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
|
|||||||
std::cout << "Usage: " << argv[0] << " [options] < INPUT > OUTPUT" << std::endl;
|
std::cout << "Usage: " << argv[0] << " [options] < INPUT > OUTPUT" << std::endl;
|
||||||
std::cout << "Usage: " << argv[0] << " [options] INPUT OUTPUT" << std::endl;
|
std::cout << "Usage: " << argv[0] << " [options] INPUT OUTPUT" << std::endl;
|
||||||
std::cout << desc << std::endl;
|
std::cout << desc << std::endl;
|
||||||
|
std::cout << "\nSee also: https://clickhouse.com/docs/en/operations/utilities/clickhouse-compressor/\n";
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,23 +2,8 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_SERVER
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_CLIENT
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_LOCAL
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_BENCHMARK
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_PERFORMANCE_TEST
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_COPIER
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_COMPRESSOR
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_FORMAT
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_OBFUSCATOR
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_GIT_IMPORT
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_INSTALL
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_ODBC_BRIDGE
|
#cmakedefine01 ENABLE_CLICKHOUSE_ODBC_BRIDGE
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_LIBRARY_BRIDGE
|
#cmakedefine01 ENABLE_CLICKHOUSE_LIBRARY_BRIDGE
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_KEEPER
|
#cmakedefine01 ENABLE_CLICKHOUSE_KEEPER
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_KEEPER_CLIENT
|
#cmakedefine01 ENABLE_CLICKHOUSE_KEEPER_CLIENT
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_KEEPER_CONVERTER
|
#cmakedefine01 ENABLE_CLICKHOUSE_KEEPER_CONVERTER
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_STATIC_FILES_DISK_UPLOADER
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_SU
|
|
||||||
#cmakedefine01 ENABLE_CLICKHOUSE_DISKS
|
|
||||||
|
@ -78,6 +78,7 @@ void ClusterCopierApp::handleHelp(const std::string &, const std::string &)
|
|||||||
help_formatter.setHeader("Copies tables from one cluster to another");
|
help_formatter.setHeader("Copies tables from one cluster to another");
|
||||||
help_formatter.setUsage("--config-file <config-file> --task-path <task-path>");
|
help_formatter.setUsage("--config-file <config-file> --task-path <task-path>");
|
||||||
help_formatter.format(std::cerr);
|
help_formatter.format(std::cerr);
|
||||||
|
help_formatter.setFooter("See also: https://clickhouse.com/docs/en/operations/utilities/clickhouse-copier/");
|
||||||
|
|
||||||
stopOptionsProcessing();
|
stopOptionsProcessing();
|
||||||
}
|
}
|
||||||
|
@ -11,6 +11,10 @@ set (CLICKHOUSE_DISKS_SOURCES
|
|||||||
CommandRemove.cpp
|
CommandRemove.cpp
|
||||||
CommandWrite.cpp)
|
CommandWrite.cpp)
|
||||||
|
|
||||||
|
if (CLICKHOUSE_CLOUD)
|
||||||
|
set (CLICKHOUSE_DISKS_SOURCES ${CLICKHOUSE_DISKS_SOURCES} CommandPackedIO.cpp)
|
||||||
|
endif ()
|
||||||
|
|
||||||
set (CLICKHOUSE_DISKS_LINK
|
set (CLICKHOUSE_DISKS_LINK
|
||||||
PRIVATE
|
PRIVATE
|
||||||
boost::program_options
|
boost::program_options
|
||||||
|
@ -61,7 +61,6 @@ public:
|
|||||||
auto out = disk->writeFile(relative_path_output);
|
auto out = disk->writeFile(relative_path_output);
|
||||||
copyData(*in, *out);
|
copyData(*in, *out);
|
||||||
out->finalize();
|
out->finalize();
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -65,6 +65,9 @@ void DisksApp::addOptions(
|
|||||||
positional_options_description.add("command_name", 1);
|
positional_options_description.add("command_name", 1);
|
||||||
|
|
||||||
supported_commands = {"list-disks", "list", "move", "remove", "link", "copy", "write", "read", "mkdir"};
|
supported_commands = {"list-disks", "list", "move", "remove", "link", "copy", "write", "read", "mkdir"};
|
||||||
|
#ifdef CLICKHOUSE_CLOUD
|
||||||
|
supported_commands.insert("packed-io");
|
||||||
|
#endif
|
||||||
|
|
||||||
command_descriptions.emplace("list-disks", makeCommandListDisks());
|
command_descriptions.emplace("list-disks", makeCommandListDisks());
|
||||||
command_descriptions.emplace("list", makeCommandList());
|
command_descriptions.emplace("list", makeCommandList());
|
||||||
@ -75,6 +78,9 @@ void DisksApp::addOptions(
|
|||||||
command_descriptions.emplace("write", makeCommandWrite());
|
command_descriptions.emplace("write", makeCommandWrite());
|
||||||
command_descriptions.emplace("read", makeCommandRead());
|
command_descriptions.emplace("read", makeCommandRead());
|
||||||
command_descriptions.emplace("mkdir", makeCommandMkDir());
|
command_descriptions.emplace("mkdir", makeCommandMkDir());
|
||||||
|
#ifdef CLICKHOUSE_CLOUD
|
||||||
|
command_descriptions.emplace("packed-io", makeCommandPackedIO());
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void DisksApp::processOptions()
|
void DisksApp::processOptions()
|
||||||
@ -89,6 +95,11 @@ void DisksApp::processOptions()
|
|||||||
config().setString("log-level", options["log-level"].as<String>());
|
config().setString("log-level", options["log-level"].as<String>());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DisksApp::~DisksApp()
|
||||||
|
{
|
||||||
|
global_context->shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
void DisksApp::init(std::vector<String> & common_arguments)
|
void DisksApp::init(std::vector<String> & common_arguments)
|
||||||
{
|
{
|
||||||
stopOptionsProcessing();
|
stopOptionsProcessing();
|
||||||
@ -134,6 +145,7 @@ void DisksApp::parseAndCheckOptions(
|
|||||||
.options(options_description_)
|
.options(options_description_)
|
||||||
.positional(positional_options_description)
|
.positional(positional_options_description)
|
||||||
.allow_unregistered();
|
.allow_unregistered();
|
||||||
|
|
||||||
po::parsed_options parsed = parser.run();
|
po::parsed_options parsed = parser.run();
|
||||||
po::store(parsed, options);
|
po::store(parsed, options);
|
||||||
|
|
||||||
@ -199,8 +211,8 @@ int DisksApp::main(const std::vector<String> & /*args*/)
|
|||||||
po::parsed_options parsed = parser.run();
|
po::parsed_options parsed = parser.run();
|
||||||
po::store(parsed, options);
|
po::store(parsed, options);
|
||||||
po::notify(options);
|
po::notify(options);
|
||||||
args = po::collect_unrecognized(parsed.options, po::collect_unrecognized_mode::include_positional);
|
|
||||||
|
|
||||||
|
args = po::collect_unrecognized(parsed.options, po::collect_unrecognized_mode::include_positional);
|
||||||
command->processOptions(config(), options);
|
command->processOptions(config(), options);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -21,6 +21,7 @@ class DisksApp : public Poco::Util::Application, public Loggers
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
DisksApp() = default;
|
DisksApp() = default;
|
||||||
|
~DisksApp() override;
|
||||||
|
|
||||||
void init(std::vector<String> & common_arguments);
|
void init(std::vector<String> & common_arguments);
|
||||||
|
|
||||||
@ -52,9 +53,9 @@ protected:
|
|||||||
std::vector<String> command_arguments;
|
std::vector<String> command_arguments;
|
||||||
|
|
||||||
std::unordered_set<String> supported_commands;
|
std::unordered_set<String> supported_commands;
|
||||||
|
|
||||||
std::unordered_map<String, CommandPtr> command_descriptions;
|
std::unordered_map<String, CommandPtr> command_descriptions;
|
||||||
|
|
||||||
po::variables_map options;
|
po::variables_map options;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -63,3 +63,4 @@ DB::CommandPtr makeCommandRead();
|
|||||||
DB::CommandPtr makeCommandRemove();
|
DB::CommandPtr makeCommandRemove();
|
||||||
DB::CommandPtr makeCommandWrite();
|
DB::CommandPtr makeCommandWrite();
|
||||||
DB::CommandPtr makeCommandMkDir();
|
DB::CommandPtr makeCommandMkDir();
|
||||||
|
DB::CommandPtr makeCommandPackedIO();
|
||||||
|
@ -172,6 +172,7 @@ clickhouse-client --query "INSERT INTO git.commits FORMAT TSV" < commits.tsv
|
|||||||
clickhouse-client --query "INSERT INTO git.file_changes FORMAT TSV" < file_changes.tsv
|
clickhouse-client --query "INSERT INTO git.file_changes FORMAT TSV" < file_changes.tsv
|
||||||
clickhouse-client --query "INSERT INTO git.line_changes FORMAT TSV" < line_changes.tsv
|
clickhouse-client --query "INSERT INTO git.line_changes FORMAT TSV" < line_changes.tsv
|
||||||
|
|
||||||
|
Check out this presentation: https://presentations.clickhouse.com/matemarketing_2020/
|
||||||
)";
|
)";
|
||||||
|
|
||||||
namespace po = boost::program_options;
|
namespace po = boost::program_options;
|
||||||
|
@ -79,10 +79,6 @@ namespace ErrorCodes
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// ANSI escape sequence for intense color in terminal.
|
|
||||||
#define HILITE "\033[1m"
|
|
||||||
#define END_HILITE "\033[0m"
|
|
||||||
|
|
||||||
#if defined(OS_DARWIN)
|
#if defined(OS_DARWIN)
|
||||||
/// Until createUser() and createGroup() are implemented, only sudo-less installations are supported/default for macOS.
|
/// Until createUser() and createGroup() are implemented, only sudo-less installations are supported/default for macOS.
|
||||||
static constexpr auto DEFAULT_CLICKHOUSE_SERVER_USER = "";
|
static constexpr auto DEFAULT_CLICKHOUSE_SERVER_USER = "";
|
||||||
@ -216,6 +212,16 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
|||||||
{
|
{
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
|
const char * start_hilite = "";
|
||||||
|
const char * end_hilite = "";
|
||||||
|
|
||||||
|
if (isatty(STDOUT_FILENO))
|
||||||
|
{
|
||||||
|
/// ANSI escape sequence for intense color in terminal.
|
||||||
|
start_hilite = "\033[1m";
|
||||||
|
end_hilite = "\033[0m";
|
||||||
|
}
|
||||||
|
|
||||||
po::options_description desc;
|
po::options_description desc;
|
||||||
desc.add_options()
|
desc.add_options()
|
||||||
("help,h", "produce help message")
|
("help,h", "produce help message")
|
||||||
@ -236,9 +242,10 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
|||||||
|
|
||||||
if (options.count("help"))
|
if (options.count("help"))
|
||||||
{
|
{
|
||||||
|
std::cout << "Install ClickHouse without .deb/.rpm/.tgz packages (having the binary only)\n\n";
|
||||||
std::cout << "Usage: " << formatWithSudo(std::string(argv[0]) + " install [options]", getuid() != 0) << '\n';
|
std::cout << "Usage: " << formatWithSudo(std::string(argv[0]) + " install [options]", getuid() != 0) << '\n';
|
||||||
std::cout << desc << '\n';
|
std::cout << desc << '\n';
|
||||||
return 1;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// We need to copy binary to the binary directory.
|
/// We need to copy binary to the binary directory.
|
||||||
@ -707,7 +714,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
|||||||
{
|
{
|
||||||
fmt::print("Users config file {} already exists, will keep it and extract users info from it.\n", users_config_file.string());
|
fmt::print("Users config file {} already exists, will keep it and extract users info from it.\n", users_config_file.string());
|
||||||
|
|
||||||
/// Check if password for default user already specified.
|
/// Check if password for the default user already specified.
|
||||||
ConfigProcessor processor(users_config_file.string(), /* throw_on_bad_incl = */ false, /* log_to_console = */ false);
|
ConfigProcessor processor(users_config_file.string(), /* throw_on_bad_incl = */ false, /* log_to_console = */ false);
|
||||||
ConfigurationPtr configuration(new Poco::Util::XMLConfiguration(processor.processConfig()));
|
ConfigurationPtr configuration(new Poco::Util::XMLConfiguration(processor.processConfig()));
|
||||||
|
|
||||||
@ -799,13 +806,13 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
|||||||
/// Set up password for default user.
|
/// Set up password for default user.
|
||||||
if (has_password_for_default_user)
|
if (has_password_for_default_user)
|
||||||
{
|
{
|
||||||
fmt::print(HILITE "Password for default user is already specified. To remind or reset, see {} and {}." END_HILITE "\n",
|
fmt::print("{}Password for the default user is already specified. To remind or reset, see {} and {}.{}\n",
|
||||||
users_config_file.string(), users_d.string());
|
start_hilite, users_config_file.string(), users_d.string(), end_hilite);
|
||||||
}
|
}
|
||||||
else if (!can_ask_password)
|
else if (!can_ask_password)
|
||||||
{
|
{
|
||||||
fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE "\n",
|
fmt::print("{}Password for the default user is an empty string. See {} and {} to change it.{}\n",
|
||||||
users_config_file.string(), users_d.string());
|
start_hilite, users_config_file.string(), users_d.string(), end_hilite);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -814,7 +821,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
|||||||
|
|
||||||
char buf[1000] = {};
|
char buf[1000] = {};
|
||||||
std::string password;
|
std::string password;
|
||||||
if (auto * result = readpassphrase("Enter password for default user: ", buf, sizeof(buf), 0))
|
if (auto * result = readpassphrase("Enter password for the default user: ", buf, sizeof(buf), 0))
|
||||||
password = result;
|
password = result;
|
||||||
|
|
||||||
if (!password.empty())
|
if (!password.empty())
|
||||||
@ -839,7 +846,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
|||||||
"</clickhouse>\n";
|
"</clickhouse>\n";
|
||||||
out.sync();
|
out.sync();
|
||||||
out.finalize();
|
out.finalize();
|
||||||
fmt::print(HILITE "Password for default user is saved in file {}." END_HILITE "\n", password_file);
|
fmt::print("{}Password for the default user is saved in file {}.{}\n", start_hilite, password_file, end_hilite);
|
||||||
#else
|
#else
|
||||||
out << "<clickhouse>\n"
|
out << "<clickhouse>\n"
|
||||||
" <users>\n"
|
" <users>\n"
|
||||||
@ -850,13 +857,13 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
|||||||
"</clickhouse>\n";
|
"</clickhouse>\n";
|
||||||
out.sync();
|
out.sync();
|
||||||
out.finalize();
|
out.finalize();
|
||||||
fmt::print(HILITE "Password for default user is saved in plaintext in file {}." END_HILITE "\n", password_file);
|
fmt::print("{}Password for the default user is saved in plaintext in file {}.{}\n", start_hilite, password_file, end_hilite);
|
||||||
#endif
|
#endif
|
||||||
has_password_for_default_user = true;
|
has_password_for_default_user = true;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
fmt::print(HILITE "Password for default user is empty string. See {} and {} to change it." END_HILITE "\n",
|
fmt::print("{}Password for the default user is an empty string. See {} and {} to change it.{}\n",
|
||||||
users_config_file.string(), users_d.string());
|
start_hilite, users_config_file.string(), users_d.string(), end_hilite);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Set capabilities for the binary.
|
/** Set capabilities for the binary.
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <optional>
|
|
||||||
#include <boost/program_options.hpp>
|
#include <boost/program_options.hpp>
|
||||||
|
|
||||||
#include <Coordination/KeeperSnapshotManager.h>
|
#include <Coordination/KeeperSnapshotManager.h>
|
||||||
|
@ -828,6 +828,7 @@ void LocalServer::printHelpMessage([[maybe_unused]] const OptionsDescription & o
|
|||||||
std::cout << options_description.main_description.value() << "\n";
|
std::cout << options_description.main_description.value() << "\n";
|
||||||
std::cout << getHelpFooter() << "\n";
|
std::cout << getHelpFooter() << "\n";
|
||||||
std::cout << "In addition, --param_name=value can be specified for substitution of parameters for parametrized queries.\n";
|
std::cout << "In addition, --param_name=value can be specified for substitution of parameters for parametrized queries.\n";
|
||||||
|
std::cout << "\nSee also: https://clickhouse.com/docs/en/operations/utilities/clickhouse-local/\n";
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#include <csignal>
|
#include <csignal>
|
||||||
#include <csetjmp>
|
#include <csetjmp>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
|
||||||
#include <new>
|
#include <new>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
@ -19,39 +20,32 @@
|
|||||||
#include <Common/IO.h>
|
#include <Common/IO.h>
|
||||||
|
|
||||||
#include <base/phdr_cache.h>
|
#include <base/phdr_cache.h>
|
||||||
|
#include <base/coverage.h>
|
||||||
|
|
||||||
|
|
||||||
/// Universal executable for various clickhouse applications
|
/// Universal executable for various clickhouse applications
|
||||||
#if ENABLE_CLICKHOUSE_SERVER
|
|
||||||
int mainEntryClickHouseServer(int argc, char ** argv);
|
int mainEntryClickHouseServer(int argc, char ** argv);
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_CLIENT
|
|
||||||
int mainEntryClickHouseClient(int argc, char ** argv);
|
int mainEntryClickHouseClient(int argc, char ** argv);
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_LOCAL
|
|
||||||
int mainEntryClickHouseLocal(int argc, char ** argv);
|
int mainEntryClickHouseLocal(int argc, char ** argv);
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_BENCHMARK
|
|
||||||
int mainEntryClickHouseBenchmark(int argc, char ** argv);
|
int mainEntryClickHouseBenchmark(int argc, char ** argv);
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG
|
|
||||||
int mainEntryClickHouseExtractFromConfig(int argc, char ** argv);
|
int mainEntryClickHouseExtractFromConfig(int argc, char ** argv);
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_COMPRESSOR
|
|
||||||
int mainEntryClickHouseCompressor(int argc, char ** argv);
|
int mainEntryClickHouseCompressor(int argc, char ** argv);
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_FORMAT
|
|
||||||
int mainEntryClickHouseFormat(int argc, char ** argv);
|
int mainEntryClickHouseFormat(int argc, char ** argv);
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_COPIER
|
|
||||||
int mainEntryClickHouseClusterCopier(int argc, char ** argv);
|
int mainEntryClickHouseClusterCopier(int argc, char ** argv);
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_OBFUSCATOR
|
|
||||||
int mainEntryClickHouseObfuscator(int argc, char ** argv);
|
int mainEntryClickHouseObfuscator(int argc, char ** argv);
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_GIT_IMPORT
|
|
||||||
int mainEntryClickHouseGitImport(int argc, char ** argv);
|
int mainEntryClickHouseGitImport(int argc, char ** argv);
|
||||||
#endif
|
int mainEntryClickHouseStaticFilesDiskUploader(int argc, char ** argv);
|
||||||
|
int mainEntryClickHouseSU(int argc, char ** argv);
|
||||||
|
int mainEntryClickHouseDisks(int argc, char ** argv);
|
||||||
|
|
||||||
|
int mainEntryClickHouseHashBinary(int, char **)
|
||||||
|
{
|
||||||
|
/// Intentionally without newline. So you can run:
|
||||||
|
/// objcopy --add-section .clickhouse.hash=<(./clickhouse hash-binary) clickhouse
|
||||||
|
std::cout << getHashOfLoadedBinaryHex();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#if ENABLE_CLICKHOUSE_KEEPER
|
#if ENABLE_CLICKHOUSE_KEEPER
|
||||||
int mainEntryClickHouseKeeper(int argc, char ** argv);
|
int mainEntryClickHouseKeeper(int argc, char ** argv);
|
||||||
#endif
|
#endif
|
||||||
@ -61,30 +55,13 @@ int mainEntryClickHouseKeeperConverter(int argc, char ** argv);
|
|||||||
#if ENABLE_CLICKHOUSE_KEEPER_CLIENT
|
#if ENABLE_CLICKHOUSE_KEEPER_CLIENT
|
||||||
int mainEntryClickHouseKeeperClient(int argc, char ** argv);
|
int mainEntryClickHouseKeeperClient(int argc, char ** argv);
|
||||||
#endif
|
#endif
|
||||||
#if ENABLE_CLICKHOUSE_STATIC_FILES_DISK_UPLOADER
|
|
||||||
int mainEntryClickHouseStaticFilesDiskUploader(int argc, char ** argv);
|
// install
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_SU
|
|
||||||
int mainEntryClickHouseSU(int argc, char ** argv);
|
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_INSTALL
|
|
||||||
int mainEntryClickHouseInstall(int argc, char ** argv);
|
int mainEntryClickHouseInstall(int argc, char ** argv);
|
||||||
int mainEntryClickHouseStart(int argc, char ** argv);
|
int mainEntryClickHouseStart(int argc, char ** argv);
|
||||||
int mainEntryClickHouseStop(int argc, char ** argv);
|
int mainEntryClickHouseStop(int argc, char ** argv);
|
||||||
int mainEntryClickHouseStatus(int argc, char ** argv);
|
int mainEntryClickHouseStatus(int argc, char ** argv);
|
||||||
int mainEntryClickHouseRestart(int argc, char ** argv);
|
int mainEntryClickHouseRestart(int argc, char ** argv);
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_DISKS
|
|
||||||
int mainEntryClickHouseDisks(int argc, char ** argv);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int mainEntryClickHouseHashBinary(int, char **)
|
|
||||||
{
|
|
||||||
/// Intentionally without newline. So you can run:
|
|
||||||
/// objcopy --add-section .clickhouse.hash=<(./clickhouse hash-binary) clickhouse
|
|
||||||
std::cout << getHashOfLoadedBinaryHex();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
@ -96,36 +73,22 @@ using MainFunc = int (*)(int, char**);
|
|||||||
/// Add an item here to register new application
|
/// Add an item here to register new application
|
||||||
std::pair<std::string_view, MainFunc> clickhouse_applications[] =
|
std::pair<std::string_view, MainFunc> clickhouse_applications[] =
|
||||||
{
|
{
|
||||||
#if ENABLE_CLICKHOUSE_LOCAL
|
|
||||||
{"local", mainEntryClickHouseLocal},
|
{"local", mainEntryClickHouseLocal},
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_CLIENT
|
|
||||||
{"client", mainEntryClickHouseClient},
|
{"client", mainEntryClickHouseClient},
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_BENCHMARK
|
|
||||||
{"benchmark", mainEntryClickHouseBenchmark},
|
{"benchmark", mainEntryClickHouseBenchmark},
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_SERVER
|
|
||||||
{"server", mainEntryClickHouseServer},
|
{"server", mainEntryClickHouseServer},
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_EXTRACT_FROM_CONFIG
|
|
||||||
{"extract-from-config", mainEntryClickHouseExtractFromConfig},
|
{"extract-from-config", mainEntryClickHouseExtractFromConfig},
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_COMPRESSOR
|
|
||||||
{"compressor", mainEntryClickHouseCompressor},
|
{"compressor", mainEntryClickHouseCompressor},
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_FORMAT
|
|
||||||
{"format", mainEntryClickHouseFormat},
|
{"format", mainEntryClickHouseFormat},
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_COPIER
|
|
||||||
{"copier", mainEntryClickHouseClusterCopier},
|
{"copier", mainEntryClickHouseClusterCopier},
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_OBFUSCATOR
|
|
||||||
{"obfuscator", mainEntryClickHouseObfuscator},
|
{"obfuscator", mainEntryClickHouseObfuscator},
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_GIT_IMPORT
|
|
||||||
{"git-import", mainEntryClickHouseGitImport},
|
{"git-import", mainEntryClickHouseGitImport},
|
||||||
#endif
|
{"static-files-disk-uploader", mainEntryClickHouseStaticFilesDiskUploader},
|
||||||
|
{"su", mainEntryClickHouseSU},
|
||||||
|
{"hash-binary", mainEntryClickHouseHashBinary},
|
||||||
|
{"disks", mainEntryClickHouseDisks},
|
||||||
|
|
||||||
|
// keeper
|
||||||
#if ENABLE_CLICKHOUSE_KEEPER
|
#if ENABLE_CLICKHOUSE_KEEPER
|
||||||
{"keeper", mainEntryClickHouseKeeper},
|
{"keeper", mainEntryClickHouseKeeper},
|
||||||
#endif
|
#endif
|
||||||
@ -135,34 +98,20 @@ std::pair<std::string_view, MainFunc> clickhouse_applications[] =
|
|||||||
#if ENABLE_CLICKHOUSE_KEEPER_CLIENT
|
#if ENABLE_CLICKHOUSE_KEEPER_CLIENT
|
||||||
{"keeper-client", mainEntryClickHouseKeeperClient},
|
{"keeper-client", mainEntryClickHouseKeeperClient},
|
||||||
#endif
|
#endif
|
||||||
#if ENABLE_CLICKHOUSE_INSTALL
|
|
||||||
|
// install
|
||||||
{"install", mainEntryClickHouseInstall},
|
{"install", mainEntryClickHouseInstall},
|
||||||
{"start", mainEntryClickHouseStart},
|
{"start", mainEntryClickHouseStart},
|
||||||
{"stop", mainEntryClickHouseStop},
|
{"stop", mainEntryClickHouseStop},
|
||||||
{"status", mainEntryClickHouseStatus},
|
{"status", mainEntryClickHouseStatus},
|
||||||
{"restart", mainEntryClickHouseRestart},
|
{"restart", mainEntryClickHouseRestart},
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_STATIC_FILES_DISK_UPLOADER
|
|
||||||
{"static-files-disk-uploader", mainEntryClickHouseStaticFilesDiskUploader},
|
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_SU
|
|
||||||
{"su", mainEntryClickHouseSU},
|
|
||||||
#endif
|
|
||||||
{"hash-binary", mainEntryClickHouseHashBinary},
|
|
||||||
#if ENABLE_CLICKHOUSE_DISKS
|
|
||||||
{"disks", mainEntryClickHouseDisks},
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Add an item here to register a new short name
|
/// Add an item here to register a new short name
|
||||||
std::pair<std::string_view, std::string_view> clickhouse_short_names[] =
|
std::pair<std::string_view, std::string_view> clickhouse_short_names[] =
|
||||||
{
|
{
|
||||||
#if ENABLE_CLICKHOUSE_LOCAL
|
|
||||||
{"chl", "local"},
|
{"chl", "local"},
|
||||||
#endif
|
|
||||||
#if ENABLE_CLICKHOUSE_CLIENT
|
|
||||||
{"chc", "client"},
|
{"chc", "client"},
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
int printHelp(int, char **)
|
int printHelp(int, char **)
|
||||||
@ -392,6 +341,50 @@ void checkHarmfulEnvironmentVariables(char ** argv)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#if defined(SANITIZE_COVERAGE)
|
||||||
|
__attribute__((no_sanitize("coverage")))
|
||||||
|
void dumpCoverage()
|
||||||
|
{
|
||||||
|
/// A user can request to dump the coverage information into files at exit.
|
||||||
|
/// This is useful for non-server applications such as clickhouse-format or clickhouse-client,
|
||||||
|
/// that cannot introspect it with SQL functions at runtime.
|
||||||
|
|
||||||
|
/// The CLICKHOUSE_WRITE_COVERAGE environment variable defines a prefix for a filename 'prefix.pid'
|
||||||
|
/// containing the list of addresses of covered .
|
||||||
|
|
||||||
|
/// The format is even simpler than Clang's "sancov": an array of 64-bit addresses, native byte order, no header.
|
||||||
|
|
||||||
|
if (const char * coverage_filename_prefix = getenv("CLICKHOUSE_WRITE_COVERAGE")) // NOLINT(concurrency-mt-unsafe)
|
||||||
|
{
|
||||||
|
auto dump = [](const std::string & name, auto span)
|
||||||
|
{
|
||||||
|
/// Write only non-zeros.
|
||||||
|
std::vector<uintptr_t> data;
|
||||||
|
data.reserve(span.size());
|
||||||
|
for (auto addr : span)
|
||||||
|
if (addr)
|
||||||
|
data.push_back(addr);
|
||||||
|
|
||||||
|
int fd = ::open(name.c_str(), O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0400);
|
||||||
|
if (-1 == fd)
|
||||||
|
{
|
||||||
|
writeError("Cannot open a file to write the coverage data\n");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (!writeRetry(fd, reinterpret_cast<const char *>(data.data()), data.size() * sizeof(data[0])))
|
||||||
|
writeError("Cannot write the coverage data to a file\n");
|
||||||
|
if (0 != ::close(fd))
|
||||||
|
writeError("Cannot close the file with coverage data\n");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
dump(fmt::format("{}.{}", coverage_filename_prefix, getpid()), getCumulativeCoverage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isClickhouseApp(std::string_view app_suffix, std::vector<char *> & argv)
|
bool isClickhouseApp(std::string_view app_suffix, std::vector<char *> & argv)
|
||||||
@ -512,6 +505,12 @@ int main(int argc_, char ** argv_)
|
|||||||
if (main_func == printHelp && !argv.empty() && (argv.size() == 1 || argv[1][0] == '-'))
|
if (main_func == printHelp && !argv.empty() && (argv.size() == 1 || argv[1][0] == '-'))
|
||||||
main_func = mainEntryClickHouseLocal;
|
main_func = mainEntryClickHouseLocal;
|
||||||
|
|
||||||
return main_func(static_cast<int>(argv.size()), argv.data());
|
int exit_code = main_func(static_cast<int>(argv.size()), argv.data());
|
||||||
|
|
||||||
|
#if defined(SANITIZE_COVERAGE)
|
||||||
|
dumpCoverage();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return exit_code;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -557,7 +557,7 @@ static void sanityChecks(Server & server)
|
|||||||
{
|
{
|
||||||
const char * filename = "/proc/sys/kernel/task_delayacct";
|
const char * filename = "/proc/sys/kernel/task_delayacct";
|
||||||
if (readNumber(filename) == 0)
|
if (readNumber(filename) == 0)
|
||||||
server.context()->addWarningMessage("Delay accounting is not enabled, OSIOWaitMicroseconds will not be gathered. Check " + String(filename));
|
server.context()->addWarningMessage("Delay accounting is not enabled, OSIOWaitMicroseconds will not be gathered. You can enable it using `echo 1 > " + String(filename) + "` or by using sysctl.");
|
||||||
}
|
}
|
||||||
catch (...) // NOLINT(bugprone-empty-catch)
|
catch (...) // NOLINT(bugprone-empty-catch)
|
||||||
{
|
{
|
||||||
|
@ -937,6 +937,11 @@
|
|||||||
</macros>
|
</macros>
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
<!--
|
||||||
|
<default_replica_path>/clickhouse/tables/{database}/{table}</default_replica_path>
|
||||||
|
<default_replica_name>{replica}</default_replica_name>
|
||||||
|
-->
|
||||||
|
|
||||||
<!-- Replica group name for database Replicated.
|
<!-- Replica group name for database Replicated.
|
||||||
The cluster created by Replicated database will consist of replicas in the same group.
|
The cluster created by Replicated database will consist of replicas in the same group.
|
||||||
DDL queries will only wail for the replicas in the same group.
|
DDL queries will only wail for the replicas in the same group.
|
||||||
|
@ -107,6 +107,7 @@ try
|
|||||||
|
|
||||||
if (argc < 3)
|
if (argc < 3)
|
||||||
{
|
{
|
||||||
|
std::cout << "A tool similar to 'su'" << std::endl;
|
||||||
std::cout << "Usage: ./clickhouse su user:group ..." << std::endl;
|
std::cout << "Usage: ./clickhouse su user:group ..." << std::endl;
|
||||||
exit(0); // NOLINT(concurrency-mt-unsafe)
|
exit(0); // NOLINT(concurrency-mt-unsafe)
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,11 @@ macro(configure_rustc)
|
|||||||
set(RUST_CFLAGS "${RUST_CFLAGS} --sysroot ${CMAKE_SYSROOT}")
|
set(RUST_CFLAGS "${RUST_CFLAGS} --sysroot ${CMAKE_SYSROOT}")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (CMAKE_OSX_DEPLOYMENT_TARGET)
|
||||||
|
set(RUST_CXXFLAGS "${RUST_CXXFLAGS} -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}")
|
||||||
|
set(RUST_CFLAGS "${RUST_CFLAGS} -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}")
|
||||||
|
endif()
|
||||||
|
|
||||||
if (USE_MUSL)
|
if (USE_MUSL)
|
||||||
set(RUST_CXXFLAGS "${RUST_CXXFLAGS} -D_LIBCPP_HAS_MUSL_LIBC=1")
|
set(RUST_CXXFLAGS "${RUST_CXXFLAGS} -D_LIBCPP_HAS_MUSL_LIBC=1")
|
||||||
endif ()
|
endif ()
|
||||||
@ -25,14 +30,23 @@ macro(configure_rustc)
|
|||||||
set(RUSTCWRAPPER "")
|
set(RUSTCWRAPPER "")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(RUSTFLAGS "[]")
|
set(RUSTFLAGS)
|
||||||
|
if (CMAKE_OSX_DEPLOYMENT_TARGET)
|
||||||
|
list(APPEND RUSTFLAGS "-C" "link-arg=-mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}")
|
||||||
|
endif()
|
||||||
|
|
||||||
set(RUST_CARGO_BUILD_STD "")
|
set(RUST_CARGO_BUILD_STD "")
|
||||||
# For more info: https://doc.rust-lang.org/beta/unstable-book/compiler-flags/sanitizer.html#memorysanitizer
|
# For more info: https://doc.rust-lang.org/beta/unstable-book/compiler-flags/sanitizer.html#memorysanitizer
|
||||||
if (SANITIZE STREQUAL "memory")
|
if (SANITIZE STREQUAL "memory")
|
||||||
set(RUST_CARGO_BUILD_STD "build-std = [\"std\", \"panic_abort\", \"core\", \"alloc\"]")
|
set(RUST_CARGO_BUILD_STD "build-std = [\"std\", \"panic_abort\", \"core\", \"alloc\"]")
|
||||||
set(RUSTFLAGS "[\"-Zsanitizer=memory\", \"-Zsanitizer-memory-track-origins\"]")
|
list(APPEND RUSTFLAGS "-Zsanitizer=memory" "-Zsanitizer-memory-track-origins")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
list(TRANSFORM RUSTFLAGS PREPEND "\"")
|
||||||
|
list(TRANSFORM RUSTFLAGS APPEND "\"")
|
||||||
|
list(JOIN RUSTFLAGS "," RUSTFLAGS)
|
||||||
|
set(RUSTFLAGS "[${RUSTFLAGS}]")
|
||||||
|
|
||||||
message(STATUS "RUST_CFLAGS: ${RUST_CFLAGS}")
|
message(STATUS "RUST_CFLAGS: ${RUST_CFLAGS}")
|
||||||
message(STATUS "RUST_CXXFLAGS: ${RUST_CXXFLAGS}")
|
message(STATUS "RUST_CXXFLAGS: ${RUST_CXXFLAGS}")
|
||||||
message(STATUS "RUSTFLAGS: ${RUSTFLAGS}")
|
message(STATUS "RUSTFLAGS: ${RUSTFLAGS}")
|
||||||
|
@ -156,7 +156,6 @@ public:
|
|||||||
void read(ReadBuffer & in)
|
void read(ReadBuffer & in)
|
||||||
{
|
{
|
||||||
size_t new_size = 0;
|
size_t new_size = 0;
|
||||||
auto * const position = in.position();
|
|
||||||
readVarUInt(new_size, in);
|
readVarUInt(new_size, in);
|
||||||
if (new_size > 100'000'000'000)
|
if (new_size > 100'000'000'000)
|
||||||
throw DB::Exception(
|
throw DB::Exception(
|
||||||
@ -174,8 +173,14 @@ public:
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
in.position() = position; // Rollback position
|
asSingleLevel().reserve(new_size);
|
||||||
asSingleLevel().read(in);
|
|
||||||
|
for (size_t i = 0; i < new_size; ++i)
|
||||||
|
{
|
||||||
|
typename SingleLevelSet::Cell x;
|
||||||
|
x.read(in);
|
||||||
|
asSingleLevel().insert(x.getValue());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,7 +128,10 @@ ASTPtr ConstantNode::toASTImpl(const ConvertToASTOptions & options) const
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (need_to_add_cast_function)
|
// Add cast if constant was created as a result of constant folding.
|
||||||
|
// Constant folding may lead to type transformation and literal on shard
|
||||||
|
// may have a different type.
|
||||||
|
if (need_to_add_cast_function || source_expression != nullptr)
|
||||||
{
|
{
|
||||||
auto constant_type_name_ast = std::make_shared<ASTLiteral>(constant_value->getType()->getName());
|
auto constant_type_name_ast = std::make_shared<ASTLiteral>(constant_value->getType()->getName());
|
||||||
return makeASTFunction("_CAST", std::move(constant_value_ast), std::move(constant_type_name_ast));
|
return makeASTFunction("_CAST", std::move(constant_value_ast), std::move(constant_type_name_ast));
|
||||||
|
@ -2171,21 +2171,45 @@ void QueryAnalyzer::replaceNodesWithPositionalArguments(QueryTreeNodePtr & node_
|
|||||||
node_to_replace = &sort_node->getExpression();
|
node_to_replace = &sort_node->getExpression();
|
||||||
|
|
||||||
auto * constant_node = (*node_to_replace)->as<ConstantNode>();
|
auto * constant_node = (*node_to_replace)->as<ConstantNode>();
|
||||||
if (!constant_node || constant_node->getValue().getType() != Field::Types::UInt64)
|
|
||||||
|
if (!constant_node
|
||||||
|
|| (constant_node->getValue().getType() != Field::Types::UInt64 && constant_node->getValue().getType() != Field::Types::Int64))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
UInt64 positional_argument_number = constant_node->getValue().get<UInt64>();
|
UInt64 pos;
|
||||||
if (positional_argument_number == 0 || positional_argument_number > projection_nodes.size())
|
if (constant_node->getValue().getType() == Field::Types::UInt64)
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
{
|
||||||
|
pos = constant_node->getValue().get<UInt64>();
|
||||||
|
}
|
||||||
|
else // Int64
|
||||||
|
{
|
||||||
|
auto value = constant_node->getValue().get<Int64>();
|
||||||
|
if (value > 0)
|
||||||
|
pos = value;
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (static_cast<size_t>(std::abs(value)) > projection_nodes.size())
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
"Negative positional argument number {} is out of bounds. Expected in range [-{}, -1]. In scope {}",
|
||||||
|
value,
|
||||||
|
projection_nodes.size(),
|
||||||
|
scope.scope_node->formatASTForErrorMessage());
|
||||||
|
pos = projection_nodes.size() + value + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!pos || pos > projection_nodes.size())
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::BAD_ARGUMENTS,
|
||||||
"Positional argument number {} is out of bounds. Expected in range [1, {}]. In scope {}",
|
"Positional argument number {} is out of bounds. Expected in range [1, {}]. In scope {}",
|
||||||
positional_argument_number,
|
pos,
|
||||||
projection_nodes.size(),
|
projection_nodes.size(),
|
||||||
scope.scope_node->formatASTForErrorMessage());
|
scope.scope_node->formatASTForErrorMessage());
|
||||||
|
|
||||||
--positional_argument_number;
|
--pos;
|
||||||
*node_to_replace = projection_nodes[positional_argument_number]->clone();
|
*node_to_replace = projection_nodes[pos]->clone();
|
||||||
if (auto it = resolved_expressions.find(projection_nodes[positional_argument_number]);
|
if (auto it = resolved_expressions.find(projection_nodes[pos]); it != resolved_expressions.end())
|
||||||
it != resolved_expressions.end())
|
|
||||||
{
|
{
|
||||||
resolved_expressions[*node_to_replace] = it->second;
|
resolved_expressions[*node_to_replace] = it->second;
|
||||||
}
|
}
|
||||||
|
@ -33,11 +33,13 @@ void BackupFactory::registerBackupEngine(const String & engine_name, const Creat
|
|||||||
|
|
||||||
void registerBackupEnginesFileAndDisk(BackupFactory &);
|
void registerBackupEnginesFileAndDisk(BackupFactory &);
|
||||||
void registerBackupEngineS3(BackupFactory &);
|
void registerBackupEngineS3(BackupFactory &);
|
||||||
|
void registerBackupEngineAzureBlobStorage(BackupFactory &);
|
||||||
|
|
||||||
void registerBackupEngines(BackupFactory & factory)
|
void registerBackupEngines(BackupFactory & factory)
|
||||||
{
|
{
|
||||||
registerBackupEnginesFileAndDisk(factory);
|
registerBackupEnginesFileAndDisk(factory);
|
||||||
registerBackupEngineS3(factory);
|
registerBackupEngineS3(factory);
|
||||||
|
registerBackupEngineAzureBlobStorage(factory);
|
||||||
}
|
}
|
||||||
|
|
||||||
BackupFactory::BackupFactory()
|
BackupFactory::BackupFactory()
|
||||||
|
320
src/Backups/BackupIO_AzureBlobStorage.cpp
Normal file
320
src/Backups/BackupIO_AzureBlobStorage.cpp
Normal file
@ -0,0 +1,320 @@
|
|||||||
|
#include <Backups/BackupIO_AzureBlobStorage.h>
|
||||||
|
|
||||||
|
#if USE_AZURE_BLOB_STORAGE
|
||||||
|
#include <Common/quoteString.h>
|
||||||
|
#include <Interpreters/threadPoolCallbackRunner.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
#include <IO/SharedThreadPools.h>
|
||||||
|
#include <IO/HTTPHeaderEntries.h>
|
||||||
|
#include <Storages/StorageAzureBlobCluster.h>
|
||||||
|
#include <Disks/IO/ReadBufferFromAzureBlobStorage.h>
|
||||||
|
#include <Disks/IO/WriteBufferFromAzureBlobStorage.h>
|
||||||
|
#include <IO/AzureBlobStorage/copyAzureBlobStorageFile.h>
|
||||||
|
#include <Disks/IDisk.h>
|
||||||
|
#include <Disks/DiskType.h>
|
||||||
|
|
||||||
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
|
|
||||||
|
#include <filesystem>
|
||||||
|
|
||||||
|
|
||||||
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int AZURE_BLOB_STORAGE_ERROR;
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
BackupReaderAzureBlobStorage::BackupReaderAzureBlobStorage(
|
||||||
|
StorageAzureBlob::Configuration configuration_,
|
||||||
|
const ReadSettings & read_settings_,
|
||||||
|
const WriteSettings & write_settings_,
|
||||||
|
const ContextPtr & context_)
|
||||||
|
: BackupReaderDefault(read_settings_, write_settings_, getLogger("BackupReaderAzureBlobStorage"))
|
||||||
|
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::Azure, MetadataStorageType::None, configuration_.container, false, false}
|
||||||
|
, configuration(configuration_)
|
||||||
|
{
|
||||||
|
auto client_ptr = StorageAzureBlob::createClient(configuration, /* is_read_only */ false);
|
||||||
|
object_storage = std::make_unique<AzureObjectStorage>("BackupReaderAzureBlobStorage",
|
||||||
|
std::move(client_ptr),
|
||||||
|
StorageAzureBlob::createSettings(context_),
|
||||||
|
configuration_.container);
|
||||||
|
client = object_storage->getAzureBlobStorageClient();
|
||||||
|
settings = object_storage->getSettings();
|
||||||
|
}
|
||||||
|
|
||||||
|
BackupReaderAzureBlobStorage::~BackupReaderAzureBlobStorage() = default;
|
||||||
|
|
||||||
|
bool BackupReaderAzureBlobStorage::fileExists(const String & file_name)
|
||||||
|
{
|
||||||
|
String key;
|
||||||
|
if (startsWith(file_name, "."))
|
||||||
|
{
|
||||||
|
key= configuration.blob_path + file_name;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
key = file_name;
|
||||||
|
}
|
||||||
|
return object_storage->exists(StoredObject(key));
|
||||||
|
}
|
||||||
|
|
||||||
|
UInt64 BackupReaderAzureBlobStorage::getFileSize(const String & file_name)
|
||||||
|
{
|
||||||
|
String key;
|
||||||
|
if (startsWith(file_name, "."))
|
||||||
|
{
|
||||||
|
key= configuration.blob_path + file_name;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
key = file_name;
|
||||||
|
}
|
||||||
|
ObjectMetadata object_metadata = object_storage->getObjectMetadata(key);
|
||||||
|
return object_metadata.size_bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<SeekableReadBuffer> BackupReaderAzureBlobStorage::readFile(const String & file_name)
|
||||||
|
{
|
||||||
|
String key;
|
||||||
|
if (startsWith(file_name, "."))
|
||||||
|
{
|
||||||
|
key= configuration.blob_path + file_name;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
key = file_name;
|
||||||
|
}
|
||||||
|
return std::make_unique<ReadBufferFromAzureBlobStorage>(
|
||||||
|
client, key, read_settings, settings->max_single_read_retries,
|
||||||
|
settings->max_single_download_retries);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackupReaderAzureBlobStorage::copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
|
||||||
|
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode)
|
||||||
|
{
|
||||||
|
auto destination_data_source_description = destination_disk->getDataSourceDescription();
|
||||||
|
if ((destination_data_source_description.type == DataSourceType::ObjectStorage)
|
||||||
|
&& (destination_data_source_description.object_storage_type == ObjectStorageType::Azure)
|
||||||
|
&& (destination_data_source_description.is_encrypted == encrypted_in_backup))
|
||||||
|
{
|
||||||
|
LOG_TRACE(log, "Copying {} from AzureBlobStorage to disk {}", path_in_backup, destination_disk->getName());
|
||||||
|
auto write_blob_function = [&](const Strings & blob_path, WriteMode mode, const std::optional<ObjectAttributes> &) -> size_t
|
||||||
|
{
|
||||||
|
/// Object storage always uses mode `Rewrite` because it simulates append using metadata and different files.
|
||||||
|
if (blob_path.size() != 2 || mode != WriteMode::Rewrite)
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Blob writing function called with unexpected blob_path.size={} or mode={}",
|
||||||
|
blob_path.size(), mode);
|
||||||
|
|
||||||
|
copyAzureBlobStorageFile(
|
||||||
|
client,
|
||||||
|
destination_disk->getObjectStorage()->getAzureBlobStorageClient(),
|
||||||
|
configuration.container,
|
||||||
|
fs::path(configuration.blob_path) / path_in_backup,
|
||||||
|
0,
|
||||||
|
file_size,
|
||||||
|
/* dest_container */ blob_path[1],
|
||||||
|
/* dest_path */ blob_path[0],
|
||||||
|
settings,
|
||||||
|
read_settings,
|
||||||
|
threadPoolCallbackRunner<void>(getBackupsIOThreadPool().get(), "BackupRDAzure"),
|
||||||
|
/* for_disk_azure_blob_storage= */ true);
|
||||||
|
|
||||||
|
return file_size;
|
||||||
|
};
|
||||||
|
|
||||||
|
destination_disk->writeFileUsingBlobWritingFunction(destination_path, write_mode, write_blob_function);
|
||||||
|
return; /// copied!
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fallback to copy through buffers.
|
||||||
|
BackupReaderDefault::copyFileToDisk(path_in_backup, file_size, encrypted_in_backup, destination_disk, destination_path, write_mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
BackupWriterAzureBlobStorage::BackupWriterAzureBlobStorage(
|
||||||
|
StorageAzureBlob::Configuration configuration_,
|
||||||
|
const ReadSettings & read_settings_,
|
||||||
|
const WriteSettings & write_settings_,
|
||||||
|
const ContextPtr & context_)
|
||||||
|
: BackupWriterDefault(read_settings_, write_settings_, getLogger("BackupWriterAzureBlobStorage"))
|
||||||
|
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::Azure, MetadataStorageType::None, configuration_.container, false, false}
|
||||||
|
, configuration(configuration_)
|
||||||
|
{
|
||||||
|
auto client_ptr = StorageAzureBlob::createClient(configuration, /* is_read_only */ false);
|
||||||
|
object_storage = std::make_unique<AzureObjectStorage>("BackupWriterAzureBlobStorage",
|
||||||
|
std::move(client_ptr),
|
||||||
|
StorageAzureBlob::createSettings(context_),
|
||||||
|
configuration_.container);
|
||||||
|
client = object_storage->getAzureBlobStorageClient();
|
||||||
|
settings = object_storage->getSettings();
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackupWriterAzureBlobStorage::copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
||||||
|
bool copy_encrypted, UInt64 start_pos, UInt64 length)
|
||||||
|
{
|
||||||
|
/// Use the native copy as a more optimal way to copy a file from AzureBlobStorage to AzureBlobStorage if it's possible.
|
||||||
|
auto source_data_source_description = src_disk->getDataSourceDescription();
|
||||||
|
if (source_data_source_description.sameKind(data_source_description) && (source_data_source_description.is_encrypted == copy_encrypted))
|
||||||
|
{
|
||||||
|
/// getBlobPath() can return more than 3 elements if the file is stored as multiple objects in AzureBlobStorage container.
|
||||||
|
/// In this case we can't use the native copy.
|
||||||
|
if (auto blob_path = src_disk->getBlobPath(src_path); blob_path.size() == 2)
|
||||||
|
{
|
||||||
|
LOG_TRACE(log, "Copying file {} from disk {} to AzureBlobStorag", src_path, src_disk->getName());
|
||||||
|
copyAzureBlobStorageFile(
|
||||||
|
src_disk->getObjectStorage()->getAzureBlobStorageClient(),
|
||||||
|
client,
|
||||||
|
/* src_container */ blob_path[1],
|
||||||
|
/* src_path */ blob_path[0],
|
||||||
|
start_pos,
|
||||||
|
length,
|
||||||
|
configuration.container,
|
||||||
|
fs::path(configuration.blob_path) / path_in_backup,
|
||||||
|
settings,
|
||||||
|
read_settings,
|
||||||
|
threadPoolCallbackRunner<void>(getBackupsIOThreadPool().get(), "BackupWRAzure"));
|
||||||
|
return; /// copied!
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fallback to copy through buffers.
|
||||||
|
BackupWriterDefault::copyFileFromDisk(path_in_backup, src_disk, src_path, copy_encrypted, start_pos, length);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackupWriterAzureBlobStorage::copyFile(const String & destination, const String & source, size_t size)
|
||||||
|
{
|
||||||
|
LOG_TRACE(log, "Copying file inside backup from {} to {} ", source, destination);
|
||||||
|
copyAzureBlobStorageFile(
|
||||||
|
client,
|
||||||
|
client,
|
||||||
|
configuration.container,
|
||||||
|
fs::path(source),
|
||||||
|
0,
|
||||||
|
size,
|
||||||
|
/* dest_container */ configuration.container,
|
||||||
|
/* dest_path */ destination,
|
||||||
|
settings,
|
||||||
|
read_settings,
|
||||||
|
threadPoolCallbackRunner<void>(getBackupsIOThreadPool().get(), "BackupWRAzure"),
|
||||||
|
/* for_disk_azure_blob_storage= */ true);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackupWriterAzureBlobStorage::copyDataToFile(const String & path_in_backup, const CreateReadBufferFunction & create_read_buffer, UInt64 start_pos, UInt64 length)
|
||||||
|
{
|
||||||
|
copyDataToAzureBlobStorageFile(create_read_buffer, start_pos, length, client, configuration.container, path_in_backup, settings,
|
||||||
|
threadPoolCallbackRunner<void>(getBackupsIOThreadPool().get(), "BackupWRAzure"));
|
||||||
|
}
|
||||||
|
|
||||||
|
BackupWriterAzureBlobStorage::~BackupWriterAzureBlobStorage() = default;
|
||||||
|
|
||||||
|
bool BackupWriterAzureBlobStorage::fileExists(const String & file_name)
|
||||||
|
{
|
||||||
|
String key;
|
||||||
|
if (startsWith(file_name, "."))
|
||||||
|
{
|
||||||
|
key= configuration.blob_path + file_name;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
key = file_name;
|
||||||
|
}
|
||||||
|
return object_storage->exists(StoredObject(key));
|
||||||
|
}
|
||||||
|
|
||||||
|
UInt64 BackupWriterAzureBlobStorage::getFileSize(const String & file_name)
|
||||||
|
{
|
||||||
|
String key;
|
||||||
|
if (startsWith(file_name, "."))
|
||||||
|
{
|
||||||
|
key= configuration.blob_path + file_name;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
key = file_name;
|
||||||
|
}
|
||||||
|
RelativePathsWithMetadata children;
|
||||||
|
object_storage->listObjects(key,children,/*max_keys*/0);
|
||||||
|
if (children.empty())
|
||||||
|
throw Exception(ErrorCodes::AZURE_BLOB_STORAGE_ERROR, "Object must exist");
|
||||||
|
return children[0].metadata.size_bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<ReadBuffer> BackupWriterAzureBlobStorage::readFile(const String & file_name, size_t /*expected_file_size*/)
|
||||||
|
{
|
||||||
|
String key;
|
||||||
|
if (startsWith(file_name, "."))
|
||||||
|
{
|
||||||
|
key= configuration.blob_path + file_name;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
key = file_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::make_unique<ReadBufferFromAzureBlobStorage>(
|
||||||
|
client, key, read_settings, settings->max_single_read_retries,
|
||||||
|
settings->max_single_download_retries);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unique_ptr<WriteBuffer> BackupWriterAzureBlobStorage::writeFile(const String & file_name)
|
||||||
|
{
|
||||||
|
String key;
|
||||||
|
if (startsWith(file_name, "."))
|
||||||
|
{
|
||||||
|
key= configuration.blob_path + file_name;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
key = file_name;
|
||||||
|
}
|
||||||
|
return std::make_unique<WriteBufferFromAzureBlobStorage>(
|
||||||
|
client,
|
||||||
|
key,
|
||||||
|
settings->max_single_part_upload_size,
|
||||||
|
settings->max_unexpected_write_error_retries,
|
||||||
|
DBMS_DEFAULT_BUFFER_SIZE,
|
||||||
|
write_settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackupWriterAzureBlobStorage::removeFile(const String & file_name)
|
||||||
|
{
|
||||||
|
String key;
|
||||||
|
if (startsWith(file_name, "."))
|
||||||
|
{
|
||||||
|
key= configuration.blob_path + file_name;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
key = file_name;
|
||||||
|
}
|
||||||
|
StoredObject object(key);
|
||||||
|
object_storage->removeObjectIfExists(object);
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackupWriterAzureBlobStorage::removeFiles(const Strings & file_names)
|
||||||
|
{
|
||||||
|
StoredObjects objects;
|
||||||
|
for (const auto & file_name : file_names)
|
||||||
|
objects.emplace_back(file_name);
|
||||||
|
|
||||||
|
object_storage->removeObjectsIfExist(objects);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void BackupWriterAzureBlobStorage::removeFilesBatch(const Strings & file_names)
|
||||||
|
{
|
||||||
|
StoredObjects objects;
|
||||||
|
for (const auto & file_name : file_names)
|
||||||
|
objects.emplace_back(file_name);
|
||||||
|
|
||||||
|
object_storage->removeObjectsIfExist(objects);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
68
src/Backups/BackupIO_AzureBlobStorage.h
Normal file
68
src/Backups/BackupIO_AzureBlobStorage.h
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "config.h"
|
||||||
|
|
||||||
|
#if USE_AZURE_BLOB_STORAGE
|
||||||
|
#include <Backups/BackupIO_Default.h>
|
||||||
|
#include <Disks/DiskType.h>
|
||||||
|
#include <Storages/StorageAzureBlobCluster.h>
|
||||||
|
#include <Interpreters/Context_fwd.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
/// Represents a backup stored to Azure
|
||||||
|
class BackupReaderAzureBlobStorage : public BackupReaderDefault
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
BackupReaderAzureBlobStorage(StorageAzureBlob::Configuration configuration_, const ReadSettings & read_settings_, const WriteSettings & write_settings_, const ContextPtr & context_);
|
||||||
|
~BackupReaderAzureBlobStorage() override;
|
||||||
|
|
||||||
|
bool fileExists(const String & file_name) override;
|
||||||
|
UInt64 getFileSize(const String & file_name) override;
|
||||||
|
std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) override;
|
||||||
|
|
||||||
|
void copyFileToDisk(const String & path_in_backup, size_t file_size, bool encrypted_in_backup,
|
||||||
|
DiskPtr destination_disk, const String & destination_path, WriteMode write_mode) override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
const DataSourceDescription data_source_description;
|
||||||
|
std::shared_ptr<const Azure::Storage::Blobs::BlobContainerClient> client;
|
||||||
|
StorageAzureBlob::Configuration configuration;
|
||||||
|
std::unique_ptr<AzureObjectStorage> object_storage;
|
||||||
|
std::shared_ptr<const AzureObjectStorageSettings> settings;
|
||||||
|
};
|
||||||
|
|
||||||
|
class BackupWriterAzureBlobStorage : public BackupWriterDefault
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
BackupWriterAzureBlobStorage(StorageAzureBlob::Configuration configuration_, const ReadSettings & read_settings_, const WriteSettings & write_settings_, const ContextPtr & context_);
|
||||||
|
~BackupWriterAzureBlobStorage() override;
|
||||||
|
|
||||||
|
bool fileExists(const String & file_name) override;
|
||||||
|
UInt64 getFileSize(const String & file_name) override;
|
||||||
|
std::unique_ptr<WriteBuffer> writeFile(const String & file_name) override;
|
||||||
|
|
||||||
|
void copyDataToFile(const String & path_in_backup, const CreateReadBufferFunction & create_read_buffer, UInt64 start_pos, UInt64 length) override;
|
||||||
|
void copyFileFromDisk(const String & path_in_backup, DiskPtr src_disk, const String & src_path,
|
||||||
|
bool copy_encrypted, UInt64 start_pos, UInt64 length) override;
|
||||||
|
|
||||||
|
void copyFile(const String & destination, const String & source, size_t size) override;
|
||||||
|
|
||||||
|
void removeFile(const String & file_name) override;
|
||||||
|
void removeFiles(const Strings & file_names) override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::unique_ptr<ReadBuffer> readFile(const String & file_name, size_t expected_file_size) override;
|
||||||
|
void removeFilesBatch(const Strings & file_names);
|
||||||
|
const DataSourceDescription data_source_description;
|
||||||
|
std::shared_ptr<const Azure::Storage::Blobs::BlobContainerClient> client;
|
||||||
|
StorageAzureBlob::Configuration configuration;
|
||||||
|
std::unique_ptr<AzureObjectStorage> object_storage;
|
||||||
|
std::shared_ptr<const AzureObjectStorageSettings> settings;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
@ -939,12 +939,12 @@ void BackupImpl::writeFile(const BackupFileInfo & info, BackupEntryPtr entry)
|
|||||||
}
|
}
|
||||||
else if (src_disk && from_immutable_file)
|
else if (src_disk && from_immutable_file)
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "Writing backup for file {} from {} (disk {}): data file #{}", info.data_file_name, src_file_desc, src_disk->getName(), info.data_file_index);
|
LOG_INFO(log, "Writing backup for file {} from {} (disk {}): data file #{}", info.data_file_name, src_file_desc, src_disk->getName(), info.data_file_index);
|
||||||
writer->copyFileFromDisk(info.data_file_name, src_disk, src_file_path, info.encrypted_by_disk, info.base_size, info.size - info.base_size);
|
writer->copyFileFromDisk(info.data_file_name, src_disk, src_file_path, info.encrypted_by_disk, info.base_size, info.size - info.base_size);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "Writing backup for file {} from {}: data file #{}", info.data_file_name, src_file_desc, info.data_file_index);
|
LOG_INFO(log, "Writing backup for file {} from {}: data file #{}", info.data_file_name, src_file_desc, info.data_file_index);
|
||||||
auto create_read_buffer = [entry, read_settings = writer->getReadSettings()] { return entry->getReadBuffer(read_settings); };
|
auto create_read_buffer = [entry, read_settings = writer->getReadSettings()] { return entry->getReadBuffer(read_settings); };
|
||||||
writer->copyDataToFile(info.data_file_name, create_read_buffer, info.base_size, info.size - info.base_size);
|
writer->copyDataToFile(info.data_file_name, create_read_buffer, info.base_size, info.size - info.base_size);
|
||||||
}
|
}
|
||||||
|
@ -57,9 +57,9 @@ namespace
|
|||||||
if (size_t uuid_pos = zookeeper_path_arg.find(table_uuid_str); uuid_pos != String::npos)
|
if (size_t uuid_pos = zookeeper_path_arg.find(table_uuid_str); uuid_pos != String::npos)
|
||||||
zookeeper_path_arg.replace(uuid_pos, table_uuid_str.size(), "{uuid}");
|
zookeeper_path_arg.replace(uuid_pos, table_uuid_str.size(), "{uuid}");
|
||||||
}
|
}
|
||||||
const auto & config = data.global_context->getConfigRef();
|
const auto & server_settings = data.global_context->getServerSettings();
|
||||||
if ((zookeeper_path_arg == StorageReplicatedMergeTree::getDefaultZooKeeperPath(config))
|
if ((zookeeper_path_arg == server_settings.default_replica_path.value)
|
||||||
&& (replica_name_arg == StorageReplicatedMergeTree::getDefaultReplicaName(config))
|
&& (replica_name_arg == server_settings.default_replica_name.value)
|
||||||
&& ((engine_args.size() == 2) || !engine_args[2]->as<ASTLiteral>()))
|
&& ((engine_args.size() == 2) || !engine_args[2]->as<ASTLiteral>()))
|
||||||
{
|
{
|
||||||
engine_args.erase(engine_args.begin(), engine_args.begin() + 2);
|
engine_args.erase(engine_args.begin(), engine_args.begin() + 2);
|
||||||
|
172
src/Backups/registerBackupEngineAzureBlobStorage.cpp
Normal file
172
src/Backups/registerBackupEngineAzureBlobStorage.cpp
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
#include "config.h"
|
||||||
|
|
||||||
|
#include <Backups/BackupFactory.h>
|
||||||
|
#include <Common/Exception.h>
|
||||||
|
|
||||||
|
#if USE_AZURE_BLOB_STORAGE
|
||||||
|
#include <Backups/BackupIO_AzureBlobStorage.h>
|
||||||
|
#include <Storages/StorageAzureBlob.h>
|
||||||
|
#include <Backups/BackupImpl.h>
|
||||||
|
#include <IO/Archives/hasRegisteredArchiveFileExtension.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
|
#include <filesystem>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
|
extern const int SUPPORT_IS_DISABLED;
|
||||||
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if USE_AZURE_BLOB_STORAGE
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
String removeFileNameFromURL(String & url)
|
||||||
|
{
|
||||||
|
Poco::URI url2{url};
|
||||||
|
String path = url2.getPath();
|
||||||
|
size_t slash_pos = path.find_last_of('/');
|
||||||
|
String file_name = path.substr(slash_pos + 1);
|
||||||
|
path.resize(slash_pos + 1);
|
||||||
|
url2.setPath(path);
|
||||||
|
url = url2.toString();
|
||||||
|
return file_name;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void registerBackupEngineAzureBlobStorage(BackupFactory & factory)
|
||||||
|
{
|
||||||
|
auto creator_fn = []([[maybe_unused]] const BackupFactory::CreateParams & params) -> std::unique_ptr<IBackup>
|
||||||
|
{
|
||||||
|
#if USE_AZURE_BLOB_STORAGE
|
||||||
|
const String & id_arg = params.backup_info.id_arg;
|
||||||
|
const auto & args = params.backup_info.args;
|
||||||
|
|
||||||
|
StorageAzureBlob::Configuration configuration;
|
||||||
|
|
||||||
|
if (!id_arg.empty())
|
||||||
|
{
|
||||||
|
const auto & config = params.context->getConfigRef();
|
||||||
|
auto config_prefix = "named_collections." + id_arg;
|
||||||
|
|
||||||
|
if (!config.has(config_prefix))
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no collection named `{}` in config", id_arg);
|
||||||
|
|
||||||
|
if (config.has(config_prefix + ".connection_string"))
|
||||||
|
{
|
||||||
|
configuration.connection_url = config.getString(config_prefix + ".connection_string");
|
||||||
|
configuration.is_connection_string = true;
|
||||||
|
configuration.container = config.getString(config_prefix + ".container");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
configuration.connection_url = config.getString(config_prefix + ".storage_account_url");
|
||||||
|
configuration.is_connection_string = false;
|
||||||
|
configuration.container = config.getString(config_prefix + ".container");
|
||||||
|
configuration.account_name = config.getString(config_prefix + ".account_name");
|
||||||
|
configuration.account_key = config.getString(config_prefix + ".account_key");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (args.size() > 1)
|
||||||
|
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Backup AzureBlobStorage requires 1 or 2 arguments: named_collection, [filename]");
|
||||||
|
|
||||||
|
if (args.size() == 1)
|
||||||
|
configuration.blob_path = args[0].safeGet<String>();
|
||||||
|
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (args.size() == 3)
|
||||||
|
{
|
||||||
|
configuration.connection_url = args[0].safeGet<String>();
|
||||||
|
configuration.is_connection_string = true;
|
||||||
|
|
||||||
|
configuration.container = args[1].safeGet<String>();
|
||||||
|
configuration.blob_path = args[2].safeGet<String>();
|
||||||
|
}
|
||||||
|
else if (args.size() == 5)
|
||||||
|
{
|
||||||
|
configuration.connection_url = args[0].safeGet<String>();
|
||||||
|
configuration.is_connection_string = false;
|
||||||
|
|
||||||
|
configuration.container = args[1].safeGet<String>();
|
||||||
|
configuration.blob_path = args[2].safeGet<String>();
|
||||||
|
configuration.account_name = args[3].safeGet<String>();
|
||||||
|
configuration.account_key = args[4].safeGet<String>();
|
||||||
|
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||||
|
"Backup AzureBlobStorage requires 3 or 5 arguments: connection string>/<url, container, path, [account name], [account key]");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BackupImpl::ArchiveParams archive_params;
|
||||||
|
if (hasRegisteredArchiveFileExtension(configuration.blob_path))
|
||||||
|
{
|
||||||
|
if (params.is_internal_backup)
|
||||||
|
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Using archives with backups on clusters is disabled");
|
||||||
|
|
||||||
|
archive_params.archive_name = removeFileNameFromURL(configuration.blob_path);
|
||||||
|
archive_params.compression_method = params.compression_method;
|
||||||
|
archive_params.compression_level = params.compression_level;
|
||||||
|
archive_params.password = params.password;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (!params.password.empty())
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Password is not applicable, backup cannot be encrypted");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (params.open_mode == IBackup::OpenMode::READ)
|
||||||
|
{
|
||||||
|
auto reader = std::make_shared<BackupReaderAzureBlobStorage>(configuration,
|
||||||
|
params.read_settings,
|
||||||
|
params.write_settings,
|
||||||
|
params.context);
|
||||||
|
|
||||||
|
return std::make_unique<BackupImpl>(
|
||||||
|
params.backup_info,
|
||||||
|
archive_params,
|
||||||
|
params.base_backup_info,
|
||||||
|
reader,
|
||||||
|
params.context,
|
||||||
|
/* use_same_s3_credentials_for_base_backup*/ false);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto writer = std::make_shared<BackupWriterAzureBlobStorage>(configuration,
|
||||||
|
params.read_settings,
|
||||||
|
params.write_settings,
|
||||||
|
params.context);
|
||||||
|
|
||||||
|
return std::make_unique<BackupImpl>(
|
||||||
|
params.backup_info,
|
||||||
|
archive_params,
|
||||||
|
params.base_backup_info,
|
||||||
|
writer,
|
||||||
|
params.context,
|
||||||
|
params.is_internal_backup,
|
||||||
|
params.backup_coordination,
|
||||||
|
params.backup_uuid,
|
||||||
|
params.deduplicate_files,
|
||||||
|
/* use_same_s3_credentials_for_base_backup */ false);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "AzureBlobStorage support is disabled");
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
factory.registerBackupEngine("AzureBlobStorage", creator_fn);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -89,6 +89,7 @@ add_headers_and_sources(clickhouse_common_io Common/SSH)
|
|||||||
add_headers_and_sources(clickhouse_common_io IO)
|
add_headers_and_sources(clickhouse_common_io IO)
|
||||||
add_headers_and_sources(clickhouse_common_io IO/Archives)
|
add_headers_and_sources(clickhouse_common_io IO/Archives)
|
||||||
add_headers_and_sources(clickhouse_common_io IO/S3)
|
add_headers_and_sources(clickhouse_common_io IO/S3)
|
||||||
|
add_headers_and_sources(clickhouse_common_io IO/AzureBlobStorage)
|
||||||
list (REMOVE_ITEM clickhouse_common_io_sources Common/malloc.cpp Common/new_delete.cpp)
|
list (REMOVE_ITEM clickhouse_common_io_sources Common/malloc.cpp Common/new_delete.cpp)
|
||||||
|
|
||||||
|
|
||||||
@ -141,6 +142,7 @@ endif()
|
|||||||
|
|
||||||
if (TARGET ch_contrib::azure_sdk)
|
if (TARGET ch_contrib::azure_sdk)
|
||||||
add_headers_and_sources(dbms Disks/ObjectStorages/AzureBlobStorage)
|
add_headers_and_sources(dbms Disks/ObjectStorages/AzureBlobStorage)
|
||||||
|
add_headers_and_sources(dbms IO/AzureBlobStorage)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (TARGET ch_contrib::hdfs)
|
if (TARGET ch_contrib::hdfs)
|
||||||
@ -496,6 +498,7 @@ if (TARGET ch_contrib::aws_s3)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (TARGET ch_contrib::azure_sdk)
|
if (TARGET ch_contrib::azure_sdk)
|
||||||
|
target_link_libraries (clickhouse_common_io PUBLIC ch_contrib::azure_sdk)
|
||||||
dbms_target_link_libraries (PRIVATE ch_contrib::azure_sdk)
|
dbms_target_link_libraries (PRIVATE ch_contrib::azure_sdk)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
#include <Storages/MergeTree/RequestResponse.h>
|
#include <Storages/MergeTree/RequestResponse.h>
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
@ -27,6 +27,9 @@ class IConnectionPool : private boost::noncopyable
|
|||||||
public:
|
public:
|
||||||
using Entry = PoolBase<Connection>::Entry;
|
using Entry = PoolBase<Connection>::Entry;
|
||||||
|
|
||||||
|
IConnectionPool() = default;
|
||||||
|
IConnectionPool(String host_, UInt16 port_) : host(host_), port(port_), address(host + ":" + toString(port_)) {}
|
||||||
|
|
||||||
virtual ~IConnectionPool() = default;
|
virtual ~IConnectionPool() = default;
|
||||||
|
|
||||||
/// Selects the connection to work.
|
/// Selects the connection to work.
|
||||||
@ -36,7 +39,15 @@ public:
|
|||||||
const Settings & settings,
|
const Settings & settings,
|
||||||
bool force_connected = true) = 0;
|
bool force_connected = true) = 0;
|
||||||
|
|
||||||
|
const std::string & getHost() const { return host; }
|
||||||
|
UInt16 getPort() const { return port; }
|
||||||
|
const String & getAddress() const { return address; }
|
||||||
virtual Priority getPriority() const { return Priority{1}; }
|
virtual Priority getPriority() const { return Priority{1}; }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
const String host;
|
||||||
|
const UInt16 port = 0;
|
||||||
|
const String address;
|
||||||
};
|
};
|
||||||
|
|
||||||
using ConnectionPoolPtr = std::shared_ptr<IConnectionPool>;
|
using ConnectionPoolPtr = std::shared_ptr<IConnectionPool>;
|
||||||
@ -63,10 +74,9 @@ public:
|
|||||||
Protocol::Compression compression_,
|
Protocol::Compression compression_,
|
||||||
Protocol::Secure secure_,
|
Protocol::Secure secure_,
|
||||||
Priority priority_ = Priority{1})
|
Priority priority_ = Priority{1})
|
||||||
: Base(max_connections_,
|
: IConnectionPool(host_, port_),
|
||||||
|
Base(max_connections_,
|
||||||
getLogger("ConnectionPool (" + host_ + ":" + toString(port_) + ")")),
|
getLogger("ConnectionPool (" + host_ + ":" + toString(port_) + ")")),
|
||||||
host(host_),
|
|
||||||
port(port_),
|
|
||||||
default_database(default_database_),
|
default_database(default_database_),
|
||||||
user(user_),
|
user(user_),
|
||||||
password(password_),
|
password(password_),
|
||||||
@ -99,10 +109,6 @@ public:
|
|||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::string & getHost() const
|
|
||||||
{
|
|
||||||
return host;
|
|
||||||
}
|
|
||||||
std::string getDescription() const
|
std::string getDescription() const
|
||||||
{
|
{
|
||||||
return host + ":" + toString(port);
|
return host + ":" + toString(port);
|
||||||
@ -125,8 +131,6 @@ protected:
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
String host;
|
|
||||||
UInt16 port;
|
|
||||||
String default_database;
|
String default_database;
|
||||||
String user;
|
String user;
|
||||||
String password;
|
String password;
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <compare>
|
|
||||||
|
|
||||||
#include <Client/Connection.h>
|
#include <Client/Connection.h>
|
||||||
#include <Storages/MergeTree/RequestResponse.h>
|
#include <Storages/MergeTree/RequestResponse.h>
|
||||||
|
|
||||||
|
@ -203,6 +203,7 @@ void ColumnFixedString::updatePermutation(IColumn::PermutationSortDirection dire
|
|||||||
void ColumnFixedString::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
void ColumnFixedString::insertRangeFrom(const IColumn & src, size_t start, size_t length)
|
||||||
{
|
{
|
||||||
const ColumnFixedString & src_concrete = assert_cast<const ColumnFixedString &>(src);
|
const ColumnFixedString & src_concrete = assert_cast<const ColumnFixedString &>(src);
|
||||||
|
chassert(this->n == src_concrete.n);
|
||||||
|
|
||||||
if (start + length > src_concrete.size())
|
if (start + length > src_concrete.size())
|
||||||
throw Exception(ErrorCodes::PARAMETER_OUT_OF_BOUND, "Parameters start = {}, length = {} are out of bound "
|
throw Exception(ErrorCodes::PARAMETER_OUT_OF_BOUND, "Parameters start = {}, length = {} are out of bound "
|
||||||
|
@ -130,15 +130,21 @@ public:
|
|||||||
int compareAt(size_t p1, size_t p2, const IColumn & rhs_, int /*nan_direction_hint*/) const override
|
int compareAt(size_t p1, size_t p2, const IColumn & rhs_, int /*nan_direction_hint*/) const override
|
||||||
{
|
{
|
||||||
const ColumnFixedString & rhs = assert_cast<const ColumnFixedString &>(rhs_);
|
const ColumnFixedString & rhs = assert_cast<const ColumnFixedString &>(rhs_);
|
||||||
|
chassert(this->n == rhs.n);
|
||||||
return memcmpSmallAllowOverflow15(chars.data() + p1 * n, rhs.chars.data() + p2 * n, n);
|
return memcmpSmallAllowOverflow15(chars.data() + p1 * n, rhs.chars.data() + p2 * n, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
void compareColumn(const IColumn & rhs, size_t rhs_row_num,
|
void compareColumn(
|
||||||
PaddedPODArray<UInt64> * row_indexes, PaddedPODArray<Int8> & compare_results,
|
const IColumn & rhs_,
|
||||||
int direction, int nan_direction_hint) const override
|
size_t rhs_row_num,
|
||||||
|
PaddedPODArray<UInt64> * row_indexes,
|
||||||
|
PaddedPODArray<Int8> & compare_results,
|
||||||
|
int direction,
|
||||||
|
int nan_direction_hint) const override
|
||||||
{
|
{
|
||||||
return doCompareColumn<ColumnFixedString>(assert_cast<const ColumnFixedString &>(rhs), rhs_row_num, row_indexes,
|
const ColumnFixedString & rhs = assert_cast<const ColumnFixedString &>(rhs_);
|
||||||
compare_results, direction, nan_direction_hint);
|
chassert(this->n == rhs.n);
|
||||||
|
return doCompareColumn<ColumnFixedString>(rhs, rhs_row_num, row_indexes, compare_results, direction, nan_direction_hint);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool hasEqualValues() const override
|
bool hasEqualValues() const override
|
||||||
|
@ -593,6 +593,7 @@
|
|||||||
M(711, FILECACHE_ACCESS_DENIED) \
|
M(711, FILECACHE_ACCESS_DENIED) \
|
||||||
M(712, TOO_MANY_MATERIALIZED_VIEWS) \
|
M(712, TOO_MANY_MATERIALIZED_VIEWS) \
|
||||||
M(713, BROKEN_PROJECTION) \
|
M(713, BROKEN_PROJECTION) \
|
||||||
|
M(714, UNEXPECTED_CLUSTER) \
|
||||||
\
|
\
|
||||||
M(999, KEEPER_EXCEPTION) \
|
M(999, KEEPER_EXCEPTION) \
|
||||||
M(1000, POCO_EXCEPTION) \
|
M(1000, POCO_EXCEPTION) \
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
|
#include <base/defines.h>
|
||||||
|
|
||||||
#include <Poco/Channel.h>
|
#include <Poco/Channel.h>
|
||||||
#include <Poco/Logger.h>
|
#include <Poco/Logger.h>
|
||||||
#include <Poco/Message.h>
|
#include <Poco/Message.h>
|
||||||
@ -24,6 +26,16 @@ using LoggerRawPtr = Poco::Logger *;
|
|||||||
*/
|
*/
|
||||||
LoggerPtr getLogger(const std::string & name);
|
LoggerPtr getLogger(const std::string & name);
|
||||||
|
|
||||||
|
/** Get Logger with specified name. If the Logger does not exists, it is created.
|
||||||
|
* This overload was added for specific purpose, when logger is constructed from constexpr string.
|
||||||
|
* Logger is destroyed only during program shutdown.
|
||||||
|
*/
|
||||||
|
template <size_t n>
|
||||||
|
ALWAYS_INLINE LoggerPtr getLogger(const char (&name)[n])
|
||||||
|
{
|
||||||
|
return Poco::Logger::getShared(name, false /*should_be_owned_by_shared_ptr_if_created*/);
|
||||||
|
}
|
||||||
|
|
||||||
/** Create Logger with specified name, channel and logging level.
|
/** Create Logger with specified name, channel and logging level.
|
||||||
* If Logger already exists, throws exception.
|
* If Logger already exists, throws exception.
|
||||||
* Logger is destroyed, when last shared ptr that refers to Logger with specified name is destroyed.
|
* Logger is destroyed, when last shared ptr that refers to Logger with specified name is destroyed.
|
||||||
|
@ -175,15 +175,6 @@ String Macros::expand(const String & s) const
|
|||||||
return expand(s, info);
|
return expand(s, info);
|
||||||
}
|
}
|
||||||
|
|
||||||
String Macros::expand(const String & s, const StorageID & table_id, bool allow_uuid) const
|
|
||||||
{
|
|
||||||
MacroExpansionInfo info;
|
|
||||||
info.table_id = table_id;
|
|
||||||
if (!allow_uuid)
|
|
||||||
info.table_id.uuid = UUIDHelpers::Nil;
|
|
||||||
return expand(s, info);
|
|
||||||
}
|
|
||||||
|
|
||||||
Names Macros::expand(const Names & source_names, size_t level) const
|
Names Macros::expand(const Names & source_names, size_t level) const
|
||||||
{
|
{
|
||||||
Names result_names;
|
Names result_names;
|
||||||
|
@ -57,8 +57,6 @@ public:
|
|||||||
|
|
||||||
String expand(const String & s) const;
|
String expand(const String & s) const;
|
||||||
|
|
||||||
String expand(const String & s, const StorageID & table_id, bool allow_uuid) const;
|
|
||||||
|
|
||||||
|
|
||||||
/** Apply expand for the list.
|
/** Apply expand for the list.
|
||||||
*/
|
*/
|
||||||
|
@ -13,6 +13,9 @@
|
|||||||
#undef __msan_unpoison_string
|
#undef __msan_unpoison_string
|
||||||
|
|
||||||
#define __msan_unpoison(X, Y) /// NOLINT
|
#define __msan_unpoison(X, Y) /// NOLINT
|
||||||
|
/// Given a pointer and **its size**, unpoisons 15 bytes **at the end**
|
||||||
|
/// See memcmpSmall.h / memcpySmall.h
|
||||||
|
#define __msan_unpoison_overflow_15(X, Y) /// NOLINT
|
||||||
#define __msan_test_shadow(X, Y) (false) /// NOLINT
|
#define __msan_test_shadow(X, Y) (false) /// NOLINT
|
||||||
#define __msan_print_shadow(X, Y) /// NOLINT
|
#define __msan_print_shadow(X, Y) /// NOLINT
|
||||||
#define __msan_unpoison_string(X) /// NOLINT
|
#define __msan_unpoison_string(X) /// NOLINT
|
||||||
@ -24,6 +27,8 @@
|
|||||||
# undef __msan_print_shadow
|
# undef __msan_print_shadow
|
||||||
# undef __msan_unpoison_string
|
# undef __msan_unpoison_string
|
||||||
# include <sanitizer/msan_interface.h>
|
# include <sanitizer/msan_interface.h>
|
||||||
|
# undef __msan_unpoison_overflow_15
|
||||||
|
# define __msan_unpoison_overflow_15(PTR, PTR_SIZE) __msan_unpoison(&(PTR)[(PTR_SIZE)], 15)
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
M(Query, "Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries.") \
|
M(Query, "Number of queries to be interpreted and potentially executed. Does not include queries that failed to parse or were rejected due to AST size limits, quota limits or limits on the number of simultaneously running queries. May include internal queries initiated by ClickHouse itself. Does not count subqueries.") \
|
||||||
M(SelectQuery, "Same as Query, but only for SELECT queries.") \
|
M(SelectQuery, "Same as Query, but only for SELECT queries.") \
|
||||||
M(InsertQuery, "Same as Query, but only for INSERT queries.") \
|
M(InsertQuery, "Same as Query, but only for INSERT queries.") \
|
||||||
|
M(InitialQuery, "Same as Query, but only counts initial queries (see is_initial_query).")\
|
||||||
M(QueriesWithSubqueries, "Count queries with all subqueries") \
|
M(QueriesWithSubqueries, "Count queries with all subqueries") \
|
||||||
M(SelectQueriesWithSubqueries, "Count SELECT queries with all subqueries") \
|
M(SelectQueriesWithSubqueries, "Count SELECT queries with all subqueries") \
|
||||||
M(InsertQueriesWithSubqueries, "Count INSERT queries with all subqueries") \
|
M(InsertQueriesWithSubqueries, "Count INSERT queries with all subqueries") \
|
||||||
@ -384,6 +385,10 @@ The server successfully detected this situation and will download merged part fr
|
|||||||
M(S3PutObject, "Number of S3 API PutObject calls.") \
|
M(S3PutObject, "Number of S3 API PutObject calls.") \
|
||||||
M(S3GetObject, "Number of S3 API GetObject calls.") \
|
M(S3GetObject, "Number of S3 API GetObject calls.") \
|
||||||
\
|
\
|
||||||
|
M(AzureUploadPart, "Number of Azure blob storage API UploadPart calls") \
|
||||||
|
M(DiskAzureUploadPart, "Number of Disk Azure blob storage API UploadPart calls") \
|
||||||
|
M(AzureCopyObject, "Number of Azure blob storage API CopyObject calls") \
|
||||||
|
M(DiskAzureCopyObject, "Number of Disk Azure blob storage API CopyObject calls") \
|
||||||
M(AzureDeleteObjects, "Number of Azure blob storage API DeleteObject(s) calls.") \
|
M(AzureDeleteObjects, "Number of Azure blob storage API DeleteObject(s) calls.") \
|
||||||
M(AzureListObjects, "Number of Azure blob storage API ListObjects calls.") \
|
M(AzureListObjects, "Number of Azure blob storage API ListObjects calls.") \
|
||||||
\
|
\
|
||||||
@ -625,6 +630,8 @@ The server successfully detected this situation and will download merged part fr
|
|||||||
M(InterfacePostgreSQLReceiveBytes, "Number of bytes received through PostgreSQL interfaces") \
|
M(InterfacePostgreSQLReceiveBytes, "Number of bytes received through PostgreSQL interfaces") \
|
||||||
\
|
\
|
||||||
M(ParallelReplicasUsedCount, "Number of replicas used to execute a query with task-based parallel replicas") \
|
M(ParallelReplicasUsedCount, "Number of replicas used to execute a query with task-based parallel replicas") \
|
||||||
|
M(ParallelReplicasAvailableCount, "Number of replicas available to execute a query with task-based parallel replicas") \
|
||||||
|
M(ParallelReplicasUnavailableCount, "Number of replicas which was chosen, but found to be unavailable during query execution with task-based parallel replicas") \
|
||||||
|
|
||||||
#ifdef APPLY_FOR_EXTERNAL_EVENTS
|
#ifdef APPLY_FOR_EXTERNAL_EVENTS
|
||||||
#define APPLY_FOR_EVENTS(M) APPLY_FOR_BUILTIN_EVENTS(M) APPLY_FOR_EXTERNAL_EVENTS(M)
|
#define APPLY_FOR_EVENTS(M) APPLY_FOR_BUILTIN_EVENTS(M) APPLY_FOR_EXTERNAL_EVENTS(M)
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <base/constexpr_helpers.h>
|
#include <base/constexpr_helpers.h>
|
||||||
#include <base/demangle.h>
|
#include <base/demangle.h>
|
||||||
|
|
||||||
|
#include <Common/scope_guard_safe.h>
|
||||||
#include <Common/Dwarf.h>
|
#include <Common/Dwarf.h>
|
||||||
#include <Common/Elf.h>
|
#include <Common/Elf.h>
|
||||||
#include <Common/MemorySanitizer.h>
|
#include <Common/MemorySanitizer.h>
|
||||||
@ -24,6 +25,15 @@
|
|||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
|
#include <boost/algorithm/string/split.hpp>
|
||||||
|
|
||||||
|
#if defined(OS_DARWIN)
|
||||||
|
/// This header contains functions like `backtrace` and `backtrace_symbols`
|
||||||
|
/// Which will be used for stack unwinding on Mac.
|
||||||
|
/// Read: https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man3/backtrace.3.html
|
||||||
|
#include "execinfo.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
/// Currently this variable is set up once on server startup.
|
/// Currently this variable is set up once on server startup.
|
||||||
@ -262,6 +272,33 @@ void StackTrace::forEachFrame(
|
|||||||
callback(current_inline_frame);
|
callback(current_inline_frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
callback(current_frame);
|
||||||
|
}
|
||||||
|
#elif defined(OS_DARWIN)
|
||||||
|
UNUSED(fatal);
|
||||||
|
|
||||||
|
/// This function returns an array of string in a special (a little bit weird format)
|
||||||
|
/// The frame number, library name, address in hex, mangled symbol name, `+` sign, the offset.
|
||||||
|
char** strs = ::backtrace_symbols(frame_pointers.data(), static_cast<int>(size));
|
||||||
|
SCOPE_EXIT_SAFE({free(strs);});
|
||||||
|
|
||||||
|
for (size_t i = offset; i < size; ++i)
|
||||||
|
{
|
||||||
|
StackTrace::Frame current_frame;
|
||||||
|
|
||||||
|
std::vector<std::string> split;
|
||||||
|
boost::split(split, strs[i], isWhitespaceASCII);
|
||||||
|
split.erase(
|
||||||
|
std::remove_if(
|
||||||
|
split.begin(), split.end(),
|
||||||
|
[](const std::string & x) { return x.empty(); }),
|
||||||
|
split.end());
|
||||||
|
assert(split.size() == 6);
|
||||||
|
|
||||||
|
current_frame.virtual_addr = frame_pointers[i];
|
||||||
|
current_frame.physical_addr = frame_pointers[i];
|
||||||
|
current_frame.object = split[1];
|
||||||
|
current_frame.symbol = split[3];
|
||||||
callback(current_frame);
|
callback(current_frame);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
@ -306,7 +343,11 @@ StackTrace::StackTrace(const ucontext_t & signal_context)
|
|||||||
|
|
||||||
void StackTrace::tryCapture()
|
void StackTrace::tryCapture()
|
||||||
{
|
{
|
||||||
|
#if defined(OS_DARWIN)
|
||||||
|
size = backtrace(frame_pointers.data(), capacity);
|
||||||
|
#else
|
||||||
size = unw_backtrace(frame_pointers.data(), capacity);
|
size = unw_backtrace(frame_pointers.data(), capacity);
|
||||||
|
#endif
|
||||||
__msan_unpoison(frame_pointers.data(), size * sizeof(frame_pointers[0]));
|
__msan_unpoison(frame_pointers.data(), size * sizeof(frame_pointers[0]));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -376,7 +417,7 @@ toStringEveryLineImpl([[maybe_unused]] bool fatal, const StackTraceRefTriple & s
|
|||||||
return callback("<Empty trace>");
|
return callback("<Empty trace>");
|
||||||
|
|
||||||
size_t frame_index = stack_trace.offset;
|
size_t frame_index = stack_trace.offset;
|
||||||
#if defined(__ELF__) && !defined(OS_FREEBSD)
|
#if (defined(__ELF__) && !defined(OS_FREEBSD)) || defined(OS_DARWIN)
|
||||||
size_t inline_frame_index = 0;
|
size_t inline_frame_index = 0;
|
||||||
auto callback_wrapper = [&](const StackTrace::Frame & frame)
|
auto callback_wrapper = [&](const StackTrace::Frame & frame)
|
||||||
{
|
{
|
||||||
|
@ -401,6 +401,9 @@ ZooKeeper::ZooKeeper(
|
|||||||
keeper_feature_flags.logFlags(log);
|
keeper_feature_flags.logFlags(log);
|
||||||
|
|
||||||
ProfileEvents::increment(ProfileEvents::ZooKeeperInit);
|
ProfileEvents::increment(ProfileEvents::ZooKeeperInit);
|
||||||
|
|
||||||
|
/// Avoid stale reads after connecting
|
||||||
|
sync("/", [](const SyncResponse &){});
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#include <base/simd.h>
|
#include <base/simd.h>
|
||||||
|
|
||||||
#include <Core/Defines.h>
|
#include <Core/Defines.h>
|
||||||
|
#include <Common/MemorySanitizer.h>
|
||||||
|
|
||||||
|
|
||||||
namespace detail
|
namespace detail
|
||||||
@ -26,9 +27,8 @@ inline int cmp(T a, T b)
|
|||||||
|
|
||||||
|
|
||||||
/// We can process uninitialized memory in the functions below.
|
/// We can process uninitialized memory in the functions below.
|
||||||
/// Results don't depend on the values inside uninitialized memory but Memory Sanitizer cannot see it.
|
/// Results don't depend on the values inside uninitialized memory
|
||||||
/// Disable optimized functions if compile with Memory Sanitizer.
|
#if defined(__AVX512BW__) && defined(__AVX512VL__)
|
||||||
#if defined(__AVX512BW__) && defined(__AVX512VL__) && !defined(MEMORY_SANITIZER)
|
|
||||||
# include <immintrin.h>
|
# include <immintrin.h>
|
||||||
|
|
||||||
|
|
||||||
@ -42,6 +42,9 @@ inline int cmp(T a, T b)
|
|||||||
template <typename Char>
|
template <typename Char>
|
||||||
inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char * b, size_t b_size)
|
inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char * b, size_t b_size)
|
||||||
{
|
{
|
||||||
|
__msan_unpoison_overflow_15(a, a_size);
|
||||||
|
__msan_unpoison_overflow_15(b, b_size);
|
||||||
|
|
||||||
size_t min_size = std::min(a_size, b_size);
|
size_t min_size = std::min(a_size, b_size);
|
||||||
|
|
||||||
for (size_t offset = 0; offset < min_size; offset += 16)
|
for (size_t offset = 0; offset < min_size; offset += 16)
|
||||||
@ -74,6 +77,9 @@ inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char
|
|||||||
template <typename Char>
|
template <typename Char>
|
||||||
inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_size, const Char * b, size_t b_size)
|
inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_size, const Char * b, size_t b_size)
|
||||||
{
|
{
|
||||||
|
__msan_unpoison_overflow_15(a, a_size);
|
||||||
|
__msan_unpoison_overflow_15(b, b_size);
|
||||||
|
|
||||||
size_t min_size = std::min(a_size, b_size);
|
size_t min_size = std::min(a_size, b_size);
|
||||||
|
|
||||||
for (size_t offset = 0; offset < min_size; offset += 16)
|
for (size_t offset = 0; offset < min_size; offset += 16)
|
||||||
@ -144,6 +150,9 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
|
|||||||
template <typename Char>
|
template <typename Char>
|
||||||
inline int memcmpSmallAllowOverflow15(const Char * a, const Char * b, size_t size)
|
inline int memcmpSmallAllowOverflow15(const Char * a, const Char * b, size_t size)
|
||||||
{
|
{
|
||||||
|
__msan_unpoison_overflow_15(a, size);
|
||||||
|
__msan_unpoison_overflow_15(b, size);
|
||||||
|
|
||||||
for (size_t offset = 0; offset < size; offset += 16)
|
for (size_t offset = 0; offset < size; offset += 16)
|
||||||
{
|
{
|
||||||
uint16_t mask = _mm_cmp_epi8_mask(
|
uint16_t mask = _mm_cmp_epi8_mask(
|
||||||
@ -174,6 +183,9 @@ inline bool memequalSmallAllowOverflow15(const Char * a, size_t a_size, const Ch
|
|||||||
if (a_size != b_size)
|
if (a_size != b_size)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
__msan_unpoison_overflow_15(a, a_size);
|
||||||
|
__msan_unpoison_overflow_15(b, b_size);
|
||||||
|
|
||||||
for (size_t offset = 0; offset < a_size; offset += 16)
|
for (size_t offset = 0; offset < a_size; offset += 16)
|
||||||
{
|
{
|
||||||
uint16_t mask = _mm_cmp_epi8_mask(
|
uint16_t mask = _mm_cmp_epi8_mask(
|
||||||
@ -246,6 +258,7 @@ inline bool memequal16(const void * a, const void * b)
|
|||||||
/** Compare memory region to zero */
|
/** Compare memory region to zero */
|
||||||
inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
|
inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
|
||||||
{
|
{
|
||||||
|
__msan_unpoison_overflow_15(reinterpret_cast<const char *>(data), size);
|
||||||
const __m128i zero16 = _mm_setzero_si128();
|
const __m128i zero16 = _mm_setzero_si128();
|
||||||
|
|
||||||
for (size_t offset = 0; offset < size; offset += 16)
|
for (size_t offset = 0; offset < size; offset += 16)
|
||||||
@ -263,7 +276,7 @@ inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
#elif defined(__SSE2__) && !defined(MEMORY_SANITIZER)
|
#elif defined(__SSE2__)
|
||||||
# include <emmintrin.h>
|
# include <emmintrin.h>
|
||||||
|
|
||||||
|
|
||||||
@ -277,6 +290,9 @@ inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
|
|||||||
template <typename Char>
|
template <typename Char>
|
||||||
inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char * b, size_t b_size)
|
inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char * b, size_t b_size)
|
||||||
{
|
{
|
||||||
|
__msan_unpoison_overflow_15(a, a_size);
|
||||||
|
__msan_unpoison_overflow_15(b, b_size);
|
||||||
|
|
||||||
size_t min_size = std::min(a_size, b_size);
|
size_t min_size = std::min(a_size, b_size);
|
||||||
|
|
||||||
for (size_t offset = 0; offset < min_size; offset += 16)
|
for (size_t offset = 0; offset < min_size; offset += 16)
|
||||||
@ -309,6 +325,9 @@ inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char
|
|||||||
template <typename Char>
|
template <typename Char>
|
||||||
inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_size, const Char * b, size_t b_size)
|
inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_size, const Char * b, size_t b_size)
|
||||||
{
|
{
|
||||||
|
__msan_unpoison_overflow_15(a, a_size);
|
||||||
|
__msan_unpoison_overflow_15(b, b_size);
|
||||||
|
|
||||||
size_t min_size = std::min(a_size, b_size);
|
size_t min_size = std::min(a_size, b_size);
|
||||||
|
|
||||||
for (size_t offset = 0; offset < min_size; offset += 16)
|
for (size_t offset = 0; offset < min_size; offset += 16)
|
||||||
@ -380,6 +399,9 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
|
|||||||
template <typename Char>
|
template <typename Char>
|
||||||
inline int memcmpSmallAllowOverflow15(const Char * a, const Char * b, size_t size)
|
inline int memcmpSmallAllowOverflow15(const Char * a, const Char * b, size_t size)
|
||||||
{
|
{
|
||||||
|
__msan_unpoison_overflow_15(a, size);
|
||||||
|
__msan_unpoison_overflow_15(b, size);
|
||||||
|
|
||||||
for (size_t offset = 0; offset < size; offset += 16)
|
for (size_t offset = 0; offset < size; offset += 16)
|
||||||
{
|
{
|
||||||
uint16_t mask = _mm_movemask_epi8(_mm_cmpeq_epi8(
|
uint16_t mask = _mm_movemask_epi8(_mm_cmpeq_epi8(
|
||||||
@ -410,6 +432,9 @@ inline bool memequalSmallAllowOverflow15(const Char * a, size_t a_size, const Ch
|
|||||||
if (a_size != b_size)
|
if (a_size != b_size)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
__msan_unpoison_overflow_15(a, a_size);
|
||||||
|
__msan_unpoison_overflow_15(b, b_size);
|
||||||
|
|
||||||
for (size_t offset = 0; offset < a_size; offset += 16)
|
for (size_t offset = 0; offset < a_size; offset += 16)
|
||||||
{
|
{
|
||||||
uint16_t mask = _mm_movemask_epi8(_mm_cmpeq_epi8(
|
uint16_t mask = _mm_movemask_epi8(_mm_cmpeq_epi8(
|
||||||
@ -483,6 +508,8 @@ inline bool memequal16(const void * a, const void * b)
|
|||||||
/** Compare memory region to zero */
|
/** Compare memory region to zero */
|
||||||
inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
|
inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
|
||||||
{
|
{
|
||||||
|
__msan_unpoison_overflow_15(reinterpret_cast<const char *>(data), size);
|
||||||
|
|
||||||
const __m128i zero16 = _mm_setzero_si128();
|
const __m128i zero16 = _mm_setzero_si128();
|
||||||
|
|
||||||
for (size_t offset = 0; offset < size; offset += 16)
|
for (size_t offset = 0; offset < size; offset += 16)
|
||||||
@ -509,6 +536,9 @@ inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
|
|||||||
template <typename Char>
|
template <typename Char>
|
||||||
inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char * b, size_t b_size)
|
inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char * b, size_t b_size)
|
||||||
{
|
{
|
||||||
|
__msan_unpoison_overflow_15(a, a_size);
|
||||||
|
__msan_unpoison_overflow_15(b, b_size);
|
||||||
|
|
||||||
size_t min_size = std::min(a_size, b_size);
|
size_t min_size = std::min(a_size, b_size);
|
||||||
|
|
||||||
for (size_t offset = 0; offset < min_size; offset += 16)
|
for (size_t offset = 0; offset < min_size; offset += 16)
|
||||||
@ -534,6 +564,9 @@ inline int memcmpSmallAllowOverflow15(const Char * a, size_t a_size, const Char
|
|||||||
template <typename Char>
|
template <typename Char>
|
||||||
inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_size, const Char * b, size_t b_size)
|
inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_size, const Char * b, size_t b_size)
|
||||||
{
|
{
|
||||||
|
__msan_unpoison_overflow_15(a, a_size);
|
||||||
|
__msan_unpoison_overflow_15(b, b_size);
|
||||||
|
|
||||||
size_t min_size = std::min(a_size, b_size);
|
size_t min_size = std::min(a_size, b_size);
|
||||||
|
|
||||||
for (size_t offset = 0; offset < min_size; offset += 16)
|
for (size_t offset = 0; offset < min_size; offset += 16)
|
||||||
@ -599,6 +632,9 @@ inline int memcmpSmallLikeZeroPaddedAllowOverflow15(const Char * a, size_t a_siz
|
|||||||
template <typename Char>
|
template <typename Char>
|
||||||
inline int memcmpSmallAllowOverflow15(const Char * a, const Char * b, size_t size)
|
inline int memcmpSmallAllowOverflow15(const Char * a, const Char * b, size_t size)
|
||||||
{
|
{
|
||||||
|
__msan_unpoison_overflow_15(a, size);
|
||||||
|
__msan_unpoison_overflow_15(b, size);
|
||||||
|
|
||||||
for (size_t offset = 0; offset < size; offset += 16)
|
for (size_t offset = 0; offset < size; offset += 16)
|
||||||
{
|
{
|
||||||
uint64_t mask = getNibbleMask(vceqq_u8(
|
uint64_t mask = getNibbleMask(vceqq_u8(
|
||||||
@ -625,6 +661,9 @@ inline bool memequalSmallAllowOverflow15(const Char * a, size_t a_size, const Ch
|
|||||||
if (a_size != b_size)
|
if (a_size != b_size)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
__msan_unpoison_overflow_15(a, a_size);
|
||||||
|
__msan_unpoison_overflow_15(b, b_size);
|
||||||
|
|
||||||
for (size_t offset = 0; offset < a_size; offset += 16)
|
for (size_t offset = 0; offset < a_size; offset += 16)
|
||||||
{
|
{
|
||||||
uint64_t mask = getNibbleMask(vceqq_u8(
|
uint64_t mask = getNibbleMask(vceqq_u8(
|
||||||
@ -683,6 +722,7 @@ inline bool memequal16(const void * a, const void * b)
|
|||||||
|
|
||||||
inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
|
inline bool memoryIsZeroSmallAllowOverflow15(const void * data, size_t size)
|
||||||
{
|
{
|
||||||
|
__msan_unpoison_overflow_15(reinterpret_cast<const char *>(data), size);
|
||||||
for (size_t offset = 0; offset < size; offset += 16)
|
for (size_t offset = 0; offset < size; offset += 16)
|
||||||
{
|
{
|
||||||
uint64_t mask = getNibbleMask(vceqzq_u8(vld1q_u8(reinterpret_cast<const unsigned char *>(data) + offset)));
|
uint64_t mask = getNibbleMask(vceqzq_u8(vld1q_u8(reinterpret_cast<const unsigned char *>(data) + offset)));
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <Common/MemorySanitizer.h>
|
||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <sys/types.h> /// ssize_t
|
#include <sys/types.h> /// ssize_t
|
||||||
|
|
||||||
@ -38,6 +40,7 @@ namespace detail
|
|||||||
{
|
{
|
||||||
inline void memcpySmallAllowReadWriteOverflow15Impl(char * __restrict dst, const char * __restrict src, ssize_t n)
|
inline void memcpySmallAllowReadWriteOverflow15Impl(char * __restrict dst, const char * __restrict src, ssize_t n)
|
||||||
{
|
{
|
||||||
|
__msan_unpoison_overflow_15(src, n);
|
||||||
while (n > 0)
|
while (n > 0)
|
||||||
{
|
{
|
||||||
_mm_storeu_si128(reinterpret_cast<__m128i *>(dst),
|
_mm_storeu_si128(reinterpret_cast<__m128i *>(dst),
|
||||||
@ -64,6 +67,7 @@ namespace detail
|
|||||||
{
|
{
|
||||||
inline void memcpySmallAllowReadWriteOverflow15Impl(char * __restrict dst, const char * __restrict src, ssize_t n)
|
inline void memcpySmallAllowReadWriteOverflow15Impl(char * __restrict dst, const char * __restrict src, ssize_t n)
|
||||||
{
|
{
|
||||||
|
__msan_unpoison_overflow_15(src, n);
|
||||||
while (n > 0)
|
while (n > 0)
|
||||||
{
|
{
|
||||||
vst1q_s8(reinterpret_cast<signed char *>(dst), vld1q_s8(reinterpret_cast<const signed char *>(src)));
|
vst1q_s8(reinterpret_cast<signed char *>(dst), vld1q_s8(reinterpret_cast<const signed char *>(src)));
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include <Poco/NullChannel.h>
|
#include <Poco/NullChannel.h>
|
||||||
#include <Poco/StreamChannel.h>
|
#include <Poco/StreamChannel.h>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
#include <thread>
|
||||||
|
|
||||||
|
|
||||||
TEST(Logger, Log)
|
TEST(Logger, Log)
|
||||||
@ -100,3 +101,75 @@ TEST(Logger, SideEffects)
|
|||||||
|
|
||||||
LOG_TRACE(log, "test no throw {}", getLogMessageParamOrThrow());
|
LOG_TRACE(log, "test no throw {}", getLogMessageParamOrThrow());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(Logger, SharedRawLogger)
|
||||||
|
{
|
||||||
|
{
|
||||||
|
std::ostringstream stream; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||||
|
auto stream_channel = Poco::AutoPtr<Poco::StreamChannel>(new Poco::StreamChannel(stream));
|
||||||
|
|
||||||
|
auto shared_logger = getLogger("Logger_1");
|
||||||
|
shared_logger->setChannel(stream_channel.get());
|
||||||
|
shared_logger->setLevel("trace");
|
||||||
|
|
||||||
|
LOG_TRACE(shared_logger, "SharedLogger1Log1");
|
||||||
|
LOG_TRACE(getRawLogger("Logger_1"), "RawLogger1Log");
|
||||||
|
LOG_TRACE(shared_logger, "SharedLogger1Log2");
|
||||||
|
|
||||||
|
auto actual = stream.str();
|
||||||
|
EXPECT_EQ(actual, "SharedLogger1Log1\nRawLogger1Log\nSharedLogger1Log2\n");
|
||||||
|
}
|
||||||
|
{
|
||||||
|
std::ostringstream stream; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||||
|
auto stream_channel = Poco::AutoPtr<Poco::StreamChannel>(new Poco::StreamChannel(stream));
|
||||||
|
|
||||||
|
auto * raw_logger = getRawLogger("Logger_2");
|
||||||
|
raw_logger->setChannel(stream_channel.get());
|
||||||
|
raw_logger->setLevel("trace");
|
||||||
|
|
||||||
|
LOG_TRACE(getLogger("Logger_2"), "SharedLogger2Log1");
|
||||||
|
LOG_TRACE(raw_logger, "RawLogger2Log");
|
||||||
|
LOG_TRACE(getLogger("Logger_2"), "SharedLogger2Log2");
|
||||||
|
|
||||||
|
auto actual = stream.str();
|
||||||
|
EXPECT_EQ(actual, "SharedLogger2Log1\nRawLogger2Log\nSharedLogger2Log2\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(Logger, SharedLoggersThreadSafety)
|
||||||
|
{
|
||||||
|
static size_t threads_count = std::thread::hardware_concurrency();
|
||||||
|
static constexpr size_t loggers_count = 10;
|
||||||
|
static constexpr size_t logger_get_count = 1000;
|
||||||
|
|
||||||
|
Poco::Logger::root();
|
||||||
|
|
||||||
|
std::vector<std::string> names;
|
||||||
|
|
||||||
|
Poco::Logger::names(names);
|
||||||
|
size_t loggers_size_before = names.size();
|
||||||
|
|
||||||
|
std::vector<std::thread> threads;
|
||||||
|
|
||||||
|
for (size_t thread_index = 0; thread_index < threads_count; ++thread_index)
|
||||||
|
{
|
||||||
|
threads.emplace_back([]()
|
||||||
|
{
|
||||||
|
for (size_t logger_index = 0; logger_index < loggers_count; ++logger_index)
|
||||||
|
{
|
||||||
|
for (size_t iteration = 0; iteration < logger_get_count; ++iteration)
|
||||||
|
{
|
||||||
|
getLogger("Logger_" + std::to_string(logger_index));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto & thread : threads)
|
||||||
|
thread.join();
|
||||||
|
|
||||||
|
Poco::Logger::names(names);
|
||||||
|
size_t loggers_size_after = names.size();
|
||||||
|
|
||||||
|
EXPECT_EQ(loggers_size_before, loggers_size_after);
|
||||||
|
}
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
#include <Coordination/pathUtils.h>
|
#include <Coordination/pathUtils.h>
|
||||||
#include <Coordination/KeeperConstants.h>
|
#include <Coordination/KeeperConstants.h>
|
||||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||||
#include "Core/Field.h"
|
#include <Core/Field.h>
|
||||||
#include <Disks/DiskLocal.h>
|
#include <Disks/DiskLocal.h>
|
||||||
|
|
||||||
|
|
||||||
@ -79,20 +79,20 @@ namespace
|
|||||||
writeBinary(false, out);
|
writeBinary(false, out);
|
||||||
|
|
||||||
/// Serialize stat
|
/// Serialize stat
|
||||||
writeBinary(node.stat.czxid, out);
|
writeBinary(node.czxid, out);
|
||||||
writeBinary(node.stat.mzxid, out);
|
writeBinary(node.mzxid, out);
|
||||||
writeBinary(node.stat.ctime, out);
|
writeBinary(node.ctime(), out);
|
||||||
writeBinary(node.stat.mtime, out);
|
writeBinary(node.mtime, out);
|
||||||
writeBinary(node.stat.version, out);
|
writeBinary(node.version, out);
|
||||||
writeBinary(node.stat.cversion, out);
|
writeBinary(node.cversion, out);
|
||||||
writeBinary(node.stat.aversion, out);
|
writeBinary(node.aversion, out);
|
||||||
writeBinary(node.stat.ephemeralOwner, out);
|
writeBinary(node.ephemeralOwner(), out);
|
||||||
if (version < SnapshotVersion::V6)
|
if (version < SnapshotVersion::V6)
|
||||||
writeBinary(static_cast<int32_t>(node.getData().size()), out);
|
writeBinary(static_cast<int32_t>(node.data_size), out);
|
||||||
writeBinary(node.stat.numChildren, out);
|
writeBinary(node.numChildren(), out);
|
||||||
writeBinary(node.stat.pzxid, out);
|
writeBinary(node.pzxid, out);
|
||||||
|
|
||||||
writeBinary(node.seq_num, out);
|
writeBinary(node.seqNum(), out);
|
||||||
|
|
||||||
if (version >= SnapshotVersion::V4 && version <= SnapshotVersion::V5)
|
if (version >= SnapshotVersion::V4 && version <= SnapshotVersion::V5)
|
||||||
writeBinary(node.sizeInBytes(), out);
|
writeBinary(node.sizeInBytes(), out);
|
||||||
@ -102,7 +102,7 @@ namespace
|
|||||||
{
|
{
|
||||||
String new_data;
|
String new_data;
|
||||||
readBinary(new_data, in);
|
readBinary(new_data, in);
|
||||||
node.setData(std::move(new_data));
|
node.setData(new_data);
|
||||||
|
|
||||||
if (version >= SnapshotVersion::V1)
|
if (version >= SnapshotVersion::V1)
|
||||||
{
|
{
|
||||||
@ -138,22 +138,36 @@ namespace
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Deserialize stat
|
/// Deserialize stat
|
||||||
readBinary(node.stat.czxid, in);
|
readBinary(node.czxid, in);
|
||||||
readBinary(node.stat.mzxid, in);
|
readBinary(node.mzxid, in);
|
||||||
readBinary(node.stat.ctime, in);
|
int64_t ctime;
|
||||||
readBinary(node.stat.mtime, in);
|
readBinary(ctime, in);
|
||||||
readBinary(node.stat.version, in);
|
node.setCtime(ctime);
|
||||||
readBinary(node.stat.cversion, in);
|
readBinary(node.mtime, in);
|
||||||
readBinary(node.stat.aversion, in);
|
readBinary(node.version, in);
|
||||||
readBinary(node.stat.ephemeralOwner, in);
|
readBinary(node.cversion, in);
|
||||||
|
readBinary(node.aversion, in);
|
||||||
|
int64_t ephemeral_owner = 0;
|
||||||
|
readBinary(ephemeral_owner, in);
|
||||||
|
if (ephemeral_owner != 0)
|
||||||
|
node.setEphemeralOwner(ephemeral_owner);
|
||||||
|
|
||||||
if (version < SnapshotVersion::V6)
|
if (version < SnapshotVersion::V6)
|
||||||
{
|
{
|
||||||
int32_t data_length = 0;
|
int32_t data_length = 0;
|
||||||
readBinary(data_length, in);
|
readBinary(data_length, in);
|
||||||
}
|
}
|
||||||
readBinary(node.stat.numChildren, in);
|
int32_t num_children = 0;
|
||||||
readBinary(node.stat.pzxid, in);
|
readBinary(num_children, in);
|
||||||
readBinary(node.seq_num, in);
|
if (ephemeral_owner == 0)
|
||||||
|
node.setNumChildren(num_children);
|
||||||
|
|
||||||
|
readBinary(node.pzxid, in);
|
||||||
|
|
||||||
|
int32_t seq_num = 0;
|
||||||
|
readBinary(seq_num, in);
|
||||||
|
if (ephemeral_owner == 0)
|
||||||
|
node.setSeqNum(seq_num);
|
||||||
|
|
||||||
if (version >= SnapshotVersion::V4 && version <= SnapshotVersion::V5)
|
if (version >= SnapshotVersion::V4 && version <= SnapshotVersion::V5)
|
||||||
{
|
{
|
||||||
@ -238,7 +252,7 @@ void KeeperStorageSnapshot::serialize(const KeeperStorageSnapshot & snapshot, Wr
|
|||||||
/// Benign race condition possible while taking snapshot: NuRaft decide to create snapshot at some log id
|
/// Benign race condition possible while taking snapshot: NuRaft decide to create snapshot at some log id
|
||||||
/// and only after some time we lock storage and enable snapshot mode. So snapshot_container_size can be
|
/// and only after some time we lock storage and enable snapshot mode. So snapshot_container_size can be
|
||||||
/// slightly bigger than required.
|
/// slightly bigger than required.
|
||||||
if (node.stat.mzxid > snapshot.zxid)
|
if (node.mzxid > snapshot.zxid)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
writeBinary(path, out);
|
writeBinary(path, out);
|
||||||
@ -363,11 +377,6 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
|
|||||||
if (recalculate_digest)
|
if (recalculate_digest)
|
||||||
storage.nodes_digest = 0;
|
storage.nodes_digest = 0;
|
||||||
|
|
||||||
const auto is_node_empty = [](const auto & node)
|
|
||||||
{
|
|
||||||
return node.getData().empty() && node.stat == KeeperStorage::Node::Stat{};
|
|
||||||
};
|
|
||||||
|
|
||||||
for (size_t nodes_read = 0; nodes_read < snapshot_container_size; ++nodes_read)
|
for (size_t nodes_read = 0; nodes_read < snapshot_container_size; ++nodes_read)
|
||||||
{
|
{
|
||||||
std::string path;
|
std::string path;
|
||||||
@ -395,7 +404,7 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
|
|||||||
}
|
}
|
||||||
else if (match_result == EXACT)
|
else if (match_result == EXACT)
|
||||||
{
|
{
|
||||||
if (!is_node_empty(node))
|
if (!node.empty())
|
||||||
{
|
{
|
||||||
if (keeper_context->ignoreSystemPathOnStartup() || keeper_context->getServerState() != KeeperContext::Phase::INIT)
|
if (keeper_context->ignoreSystemPathOnStartup() || keeper_context->getServerState() != KeeperContext::Phase::INIT)
|
||||||
{
|
{
|
||||||
@ -412,8 +421,8 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
|
|||||||
}
|
}
|
||||||
|
|
||||||
storage.container.insertOrReplace(path, node);
|
storage.container.insertOrReplace(path, node);
|
||||||
if (node.stat.ephemeralOwner != 0)
|
if (node.isEphemeral())
|
||||||
storage.ephemerals[node.stat.ephemeralOwner].insert(path);
|
storage.ephemerals[node.ephemeralOwner()].insert(path);
|
||||||
|
|
||||||
if (recalculate_digest)
|
if (recalculate_digest)
|
||||||
storage.nodes_digest += node.getDigest(path);
|
storage.nodes_digest += node.getDigest(path);
|
||||||
@ -433,16 +442,16 @@ void KeeperStorageSnapshot::deserialize(SnapshotDeserializationResult & deserial
|
|||||||
{
|
{
|
||||||
if (itr.key != "/")
|
if (itr.key != "/")
|
||||||
{
|
{
|
||||||
if (itr.value.stat.numChildren != static_cast<int32_t>(itr.value.getChildren().size()))
|
if (itr.value.numChildren() != static_cast<int32_t>(itr.value.getChildren().size()))
|
||||||
{
|
{
|
||||||
#ifdef NDEBUG
|
#ifdef NDEBUG
|
||||||
/// TODO (alesapin) remove this, it should be always CORRUPTED_DATA.
|
/// TODO (alesapin) remove this, it should be always CORRUPTED_DATA.
|
||||||
LOG_ERROR(getLogger("KeeperSnapshotManager"), "Children counter in stat.numChildren {}"
|
LOG_ERROR(getLogger("KeeperSnapshotManager"), "Children counter in stat.numChildren {}"
|
||||||
" is different from actual children size {} for node {}", itr.value.stat.numChildren, itr.value.getChildren().size(), itr.key);
|
" is different from actual children size {} for node {}", itr.value.numChildren(), itr.value.getChildren().size(), itr.key);
|
||||||
#else
|
#else
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Children counter in stat.numChildren {}"
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Children counter in stat.numChildren {}"
|
||||||
" is different from actual children size {} for node {}",
|
" is different from actual children size {} for node {}",
|
||||||
itr.value.stat.numChildren, itr.value.getChildren().size(), itr.key);
|
itr.value.numChildren(), itr.value.getChildren().size(), itr.key);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -166,54 +166,132 @@ KeeperStorage::ResponsesForSessions processWatchesImpl(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// When this function is updated, update CURRENT_DIGEST_VERSION!!
|
// When this function is updated, update CURRENT_DIGEST_VERSION!!
|
||||||
uint64_t calculateDigest(std::string_view path, std::string_view data, const KeeperStorage::Node::Stat & stat)
|
uint64_t calculateDigest(std::string_view path, const KeeperStorage::Node & node)
|
||||||
{
|
{
|
||||||
SipHash hash;
|
SipHash hash;
|
||||||
|
|
||||||
hash.update(path);
|
hash.update(path);
|
||||||
|
|
||||||
hash.update(data);
|
auto data = node.getData();
|
||||||
|
if (!data.empty())
|
||||||
|
{
|
||||||
|
chassert(data.data() != nullptr);
|
||||||
|
hash.update(data);
|
||||||
|
}
|
||||||
|
|
||||||
hash.update(stat.czxid);
|
hash.update(node.czxid);
|
||||||
hash.update(stat.mzxid);
|
hash.update(node.mzxid);
|
||||||
hash.update(stat.ctime);
|
hash.update(node.ctime());
|
||||||
hash.update(stat.mtime);
|
hash.update(node.mtime);
|
||||||
hash.update(stat.version);
|
hash.update(node.version);
|
||||||
hash.update(stat.cversion);
|
hash.update(node.cversion);
|
||||||
hash.update(stat.aversion);
|
hash.update(node.aversion);
|
||||||
hash.update(stat.ephemeralOwner);
|
hash.update(node.ephemeralOwner());
|
||||||
hash.update(stat.numChildren);
|
hash.update(node.numChildren());
|
||||||
hash.update(stat.pzxid);
|
hash.update(node.pzxid);
|
||||||
|
|
||||||
return hash.get64();
|
auto digest = hash.get64();
|
||||||
|
|
||||||
|
/// 0 means no cached digest
|
||||||
|
if (digest == 0)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
return digest;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
KeeperStorage::Node & KeeperStorage::Node::operator=(const Node & other)
|
||||||
|
{
|
||||||
|
if (this == &other)
|
||||||
|
return *this;
|
||||||
|
|
||||||
|
czxid = other.czxid;
|
||||||
|
mzxid = other.mzxid;
|
||||||
|
pzxid = other.pzxid;
|
||||||
|
acl_id = other.acl_id;
|
||||||
|
mtime = other.mtime;
|
||||||
|
is_ephemeral_and_ctime = other.is_ephemeral_and_ctime;
|
||||||
|
ephemeral_or_children_data = other.ephemeral_or_children_data;
|
||||||
|
data_size = other.data_size;
|
||||||
|
version = other.version;
|
||||||
|
cversion = other.cversion;
|
||||||
|
aversion = other.aversion;
|
||||||
|
|
||||||
|
if (data_size != 0)
|
||||||
|
{
|
||||||
|
data = std::unique_ptr<char[]>(new char[data_size]);
|
||||||
|
memcpy(data.get(), other.data.get(), data_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
children = other.children;
|
||||||
|
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
KeeperStorage::Node::Node(const Node & other)
|
||||||
|
{
|
||||||
|
*this = other;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool KeeperStorage::Node::empty() const
|
||||||
|
{
|
||||||
|
return data_size == 0 && mzxid == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KeeperStorage::Node::copyStats(const Coordination::Stat & stat)
|
||||||
|
{
|
||||||
|
czxid = stat.czxid;
|
||||||
|
mzxid = stat.mzxid;
|
||||||
|
pzxid = stat.pzxid;
|
||||||
|
|
||||||
|
mtime = stat.mtime;
|
||||||
|
setCtime(stat.ctime);
|
||||||
|
|
||||||
|
version = stat.version;
|
||||||
|
cversion = stat.cversion;
|
||||||
|
aversion = stat.aversion;
|
||||||
|
|
||||||
|
if (stat.ephemeralOwner == 0)
|
||||||
|
{
|
||||||
|
is_ephemeral_and_ctime.is_ephemeral = false;
|
||||||
|
setNumChildren(stat.numChildren);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
setEphemeralOwner(stat.ephemeralOwner);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void KeeperStorage::Node::setResponseStat(Coordination::Stat & response_stat) const
|
void KeeperStorage::Node::setResponseStat(Coordination::Stat & response_stat) const
|
||||||
{
|
{
|
||||||
response_stat.czxid = stat.czxid;
|
response_stat.czxid = czxid;
|
||||||
response_stat.mzxid = stat.mzxid;
|
response_stat.mzxid = mzxid;
|
||||||
response_stat.ctime = stat.ctime;
|
response_stat.ctime = ctime();
|
||||||
response_stat.mtime = stat.mtime;
|
response_stat.mtime = mtime;
|
||||||
response_stat.version = stat.version;
|
response_stat.version = version;
|
||||||
response_stat.cversion = stat.cversion;
|
response_stat.cversion = cversion;
|
||||||
response_stat.aversion = stat.aversion;
|
response_stat.aversion = aversion;
|
||||||
response_stat.ephemeralOwner = stat.ephemeralOwner;
|
response_stat.ephemeralOwner = ephemeralOwner();
|
||||||
response_stat.dataLength = static_cast<int32_t>(data.size());
|
response_stat.dataLength = static_cast<int32_t>(data_size);
|
||||||
response_stat.numChildren = stat.numChildren;
|
response_stat.numChildren = numChildren();
|
||||||
response_stat.pzxid = stat.pzxid;
|
response_stat.pzxid = pzxid;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t KeeperStorage::Node::sizeInBytes() const
|
uint64_t KeeperStorage::Node::sizeInBytes() const
|
||||||
{
|
{
|
||||||
return sizeof(Node) + children.size() * sizeof(StringRef) + data.size();
|
return sizeof(Node) + children.size() * sizeof(StringRef) + data_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
void KeeperStorage::Node::setData(String new_data)
|
void KeeperStorage::Node::setData(const String & new_data)
|
||||||
{
|
{
|
||||||
data = std::move(new_data);
|
data_size = static_cast<uint32_t>(new_data.size());
|
||||||
|
if (data_size != 0)
|
||||||
|
{
|
||||||
|
data = std::unique_ptr<char[]>(new char[new_data.size()]);
|
||||||
|
memcpy(data.get(), new_data.data(), data_size);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void KeeperStorage::Node::addChild(StringRef child_path)
|
void KeeperStorage::Node::addChild(StringRef child_path)
|
||||||
@ -228,25 +306,41 @@ void KeeperStorage::Node::removeChild(StringRef child_path)
|
|||||||
|
|
||||||
void KeeperStorage::Node::invalidateDigestCache() const
|
void KeeperStorage::Node::invalidateDigestCache() const
|
||||||
{
|
{
|
||||||
has_cached_digest = false;
|
cached_digest = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
UInt64 KeeperStorage::Node::getDigest(const std::string_view path) const
|
UInt64 KeeperStorage::Node::getDigest(const std::string_view path) const
|
||||||
{
|
{
|
||||||
if (!has_cached_digest)
|
if (cached_digest == 0)
|
||||||
{
|
cached_digest = calculateDigest(path, *this);
|
||||||
cached_digest = calculateDigest(path, data, stat);
|
|
||||||
has_cached_digest = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return cached_digest;
|
return cached_digest;
|
||||||
};
|
};
|
||||||
|
|
||||||
void KeeperStorage::Node::shallowCopy(const KeeperStorage::Node & other)
|
void KeeperStorage::Node::shallowCopy(const KeeperStorage::Node & other)
|
||||||
{
|
{
|
||||||
stat = other.stat;
|
czxid = other.czxid;
|
||||||
seq_num = other.seq_num;
|
mzxid = other.mzxid;
|
||||||
setData(other.getData());
|
pzxid = other.pzxid;
|
||||||
|
acl_id = other.acl_id; /// 0 -- no ACL by default
|
||||||
|
|
||||||
|
mtime = other.mtime;
|
||||||
|
|
||||||
|
is_ephemeral_and_ctime = other.is_ephemeral_and_ctime;
|
||||||
|
|
||||||
|
ephemeral_or_children_data = other.ephemeral_or_children_data;
|
||||||
|
|
||||||
|
data_size = other.data_size;
|
||||||
|
if (data_size != 0)
|
||||||
|
{
|
||||||
|
data = std::unique_ptr<char[]>(new char[data_size]);
|
||||||
|
memcpy(data.get(), other.data.get(), data_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
version = other.version;
|
||||||
|
cversion = other.cversion;
|
||||||
|
aversion = other.aversion;
|
||||||
|
|
||||||
cached_digest = other.cached_digest;
|
cached_digest = other.cached_digest;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -278,13 +372,13 @@ void KeeperStorage::initializeSystemNodes()
|
|||||||
|
|
||||||
// update root and the digest based on it
|
// update root and the digest based on it
|
||||||
auto current_root_it = container.find("/");
|
auto current_root_it = container.find("/");
|
||||||
assert(current_root_it != container.end());
|
chassert(current_root_it != container.end());
|
||||||
removeDigest(current_root_it->value, "/");
|
removeDigest(current_root_it->value, "/");
|
||||||
auto updated_root_it = container.updateValue(
|
auto updated_root_it = container.updateValue(
|
||||||
"/",
|
"/",
|
||||||
[](auto & node)
|
[](KeeperStorage::Node & node)
|
||||||
{
|
{
|
||||||
++node.stat.numChildren;
|
node.increaseNumChildren();
|
||||||
node.addChild(getBaseNodeName(keeper_system_path));
|
node.addChild(getBaseNodeName(keeper_system_path));
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
@ -294,7 +388,7 @@ void KeeperStorage::initializeSystemNodes()
|
|||||||
// insert child system nodes
|
// insert child system nodes
|
||||||
for (const auto & [path, data] : keeper_context->getSystemNodesWithData())
|
for (const auto & [path, data] : keeper_context->getSystemNodesWithData())
|
||||||
{
|
{
|
||||||
assert(path.starts_with(keeper_system_path));
|
chassert(path.starts_with(keeper_system_path));
|
||||||
Node child_system_node;
|
Node child_system_node;
|
||||||
child_system_node.setData(data);
|
child_system_node.setData(data);
|
||||||
auto [map_key, _] = container.insert(std::string{path}, child_system_node);
|
auto [map_key, _] = container.insert(std::string{path}, child_system_node);
|
||||||
@ -339,7 +433,7 @@ std::shared_ptr<KeeperStorage::Node> KeeperStorage::UncommittedState::tryGetNode
|
|||||||
|
|
||||||
void KeeperStorage::UncommittedState::applyDelta(const Delta & delta)
|
void KeeperStorage::UncommittedState::applyDelta(const Delta & delta)
|
||||||
{
|
{
|
||||||
assert(!delta.path.empty());
|
chassert(!delta.path.empty());
|
||||||
if (!nodes.contains(delta.path))
|
if (!nodes.contains(delta.path))
|
||||||
{
|
{
|
||||||
if (auto storage_node = tryGetNodeFromStorage(delta.path))
|
if (auto storage_node = tryGetNodeFromStorage(delta.path))
|
||||||
@ -355,22 +449,22 @@ void KeeperStorage::UncommittedState::applyDelta(const Delta & delta)
|
|||||||
|
|
||||||
if constexpr (std::same_as<DeltaType, CreateNodeDelta>)
|
if constexpr (std::same_as<DeltaType, CreateNodeDelta>)
|
||||||
{
|
{
|
||||||
assert(!node);
|
chassert(!node);
|
||||||
node = std::make_shared<Node>();
|
node = std::make_shared<Node>();
|
||||||
node->stat = operation.stat;
|
node->copyStats(operation.stat);
|
||||||
node->setData(operation.data);
|
node->setData(operation.data);
|
||||||
acls = operation.acls;
|
acls = operation.acls;
|
||||||
last_applied_zxid = delta.zxid;
|
last_applied_zxid = delta.zxid;
|
||||||
}
|
}
|
||||||
else if constexpr (std::same_as<DeltaType, RemoveNodeDelta>)
|
else if constexpr (std::same_as<DeltaType, RemoveNodeDelta>)
|
||||||
{
|
{
|
||||||
assert(node);
|
chassert(node);
|
||||||
node = nullptr;
|
node = nullptr;
|
||||||
last_applied_zxid = delta.zxid;
|
last_applied_zxid = delta.zxid;
|
||||||
}
|
}
|
||||||
else if constexpr (std::same_as<DeltaType, UpdateNodeDelta>)
|
else if constexpr (std::same_as<DeltaType, UpdateNodeDelta>)
|
||||||
{
|
{
|
||||||
assert(node);
|
chassert(node);
|
||||||
node->invalidateDigestCache();
|
node->invalidateDigestCache();
|
||||||
operation.update_fn(*node);
|
operation.update_fn(*node);
|
||||||
last_applied_zxid = delta.zxid;
|
last_applied_zxid = delta.zxid;
|
||||||
@ -384,6 +478,40 @@ void KeeperStorage::UncommittedState::applyDelta(const Delta & delta)
|
|||||||
delta.operation);
|
delta.operation);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool KeeperStorage::UncommittedState::hasACL(int64_t session_id, bool is_local, std::function<bool(const AuthID &)> predicate) const
|
||||||
|
{
|
||||||
|
const auto check_auth = [&](const auto & auth_ids)
|
||||||
|
{
|
||||||
|
for (const auto & auth : auth_ids)
|
||||||
|
{
|
||||||
|
using TAuth = std::remove_reference_t<decltype(auth)>;
|
||||||
|
|
||||||
|
const AuthID * auth_ptr = nullptr;
|
||||||
|
if constexpr (std::is_pointer_v<TAuth>)
|
||||||
|
auth_ptr = auth;
|
||||||
|
else
|
||||||
|
auth_ptr = &auth;
|
||||||
|
|
||||||
|
if (predicate(*auth_ptr))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (is_local)
|
||||||
|
return check_auth(storage.session_and_auth[session_id]);
|
||||||
|
|
||||||
|
if (check_auth(storage.session_and_auth[session_id]))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
// check if there are uncommitted
|
||||||
|
const auto auth_it = session_and_auth.find(session_id);
|
||||||
|
if (auth_it == session_and_auth.end())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return check_auth(auth_it->second);
|
||||||
|
}
|
||||||
|
|
||||||
void KeeperStorage::UncommittedState::addDelta(Delta new_delta)
|
void KeeperStorage::UncommittedState::addDelta(Delta new_delta)
|
||||||
{
|
{
|
||||||
const auto & added_delta = deltas.emplace_back(std::move(new_delta));
|
const auto & added_delta = deltas.emplace_back(std::move(new_delta));
|
||||||
@ -408,7 +536,7 @@ void KeeperStorage::UncommittedState::addDeltas(std::vector<Delta> new_deltas)
|
|||||||
|
|
||||||
void KeeperStorage::UncommittedState::commit(int64_t commit_zxid)
|
void KeeperStorage::UncommittedState::commit(int64_t commit_zxid)
|
||||||
{
|
{
|
||||||
assert(deltas.empty() || deltas.front().zxid >= commit_zxid);
|
chassert(deltas.empty() || deltas.front().zxid >= commit_zxid);
|
||||||
|
|
||||||
// collect nodes that have no further modification in the current transaction
|
// collect nodes that have no further modification in the current transaction
|
||||||
std::unordered_set<std::string> modified_nodes;
|
std::unordered_set<std::string> modified_nodes;
|
||||||
@ -426,7 +554,7 @@ void KeeperStorage::UncommittedState::commit(int64_t commit_zxid)
|
|||||||
if (!front_delta.path.empty())
|
if (!front_delta.path.empty())
|
||||||
{
|
{
|
||||||
auto & path_deltas = deltas_for_path.at(front_delta.path);
|
auto & path_deltas = deltas_for_path.at(front_delta.path);
|
||||||
assert(path_deltas.front() == &front_delta);
|
chassert(path_deltas.front() == &front_delta);
|
||||||
path_deltas.pop_front();
|
path_deltas.pop_front();
|
||||||
if (path_deltas.empty())
|
if (path_deltas.empty())
|
||||||
{
|
{
|
||||||
@ -444,7 +572,7 @@ void KeeperStorage::UncommittedState::commit(int64_t commit_zxid)
|
|||||||
else if (auto * add_auth = std::get_if<AddAuthDelta>(&front_delta.operation))
|
else if (auto * add_auth = std::get_if<AddAuthDelta>(&front_delta.operation))
|
||||||
{
|
{
|
||||||
auto & uncommitted_auth = session_and_auth[add_auth->session_id];
|
auto & uncommitted_auth = session_and_auth[add_auth->session_id];
|
||||||
assert(!uncommitted_auth.empty() && uncommitted_auth.front() == &add_auth->auth_id);
|
chassert(!uncommitted_auth.empty() && uncommitted_auth.front() == &add_auth->auth_id);
|
||||||
uncommitted_auth.pop_front();
|
uncommitted_auth.pop_front();
|
||||||
if (uncommitted_auth.empty())
|
if (uncommitted_auth.empty())
|
||||||
session_and_auth.erase(add_auth->session_id);
|
session_and_auth.erase(add_auth->session_id);
|
||||||
@ -484,7 +612,7 @@ void KeeperStorage::UncommittedState::rollback(int64_t rollback_zxid)
|
|||||||
if (delta_it->zxid < rollback_zxid)
|
if (delta_it->zxid < rollback_zxid)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
assert(delta_it->zxid == rollback_zxid);
|
chassert(delta_it->zxid == rollback_zxid);
|
||||||
if (!delta_it->path.empty())
|
if (!delta_it->path.empty())
|
||||||
{
|
{
|
||||||
std::visit(
|
std::visit(
|
||||||
@ -671,7 +799,7 @@ Coordination::Error KeeperStorage::commit(int64_t commit_zxid)
|
|||||||
if (node_it == container.end())
|
if (node_it == container.end())
|
||||||
onStorageInconsistency();
|
onStorageInconsistency();
|
||||||
|
|
||||||
if (operation.version != -1 && operation.version != node_it->value.stat.version)
|
if (operation.version != -1 && operation.version != node_it->value.version)
|
||||||
onStorageInconsistency();
|
onStorageInconsistency();
|
||||||
|
|
||||||
removeDigest(node_it->value, path);
|
removeDigest(node_it->value, path);
|
||||||
@ -693,7 +821,7 @@ Coordination::Error KeeperStorage::commit(int64_t commit_zxid)
|
|||||||
if (node_it == container.end())
|
if (node_it == container.end())
|
||||||
onStorageInconsistency();
|
onStorageInconsistency();
|
||||||
|
|
||||||
if (operation.version != -1 && operation.version != node_it->value.stat.aversion)
|
if (operation.version != -1 && operation.version != node_it->value.aversion)
|
||||||
onStorageInconsistency();
|
onStorageInconsistency();
|
||||||
|
|
||||||
acl_map.removeUsage(node_it->value.acl_id);
|
acl_map.removeUsage(node_it->value.acl_id);
|
||||||
@ -738,7 +866,7 @@ Coordination::Error KeeperStorage::commit(int64_t commit_zxid)
|
|||||||
bool KeeperStorage::createNode(
|
bool KeeperStorage::createNode(
|
||||||
const std::string & path,
|
const std::string & path,
|
||||||
String data,
|
String data,
|
||||||
const KeeperStorage::Node::Stat & stat,
|
const Coordination::Stat & stat,
|
||||||
Coordination::ACLs node_acls)
|
Coordination::ACLs node_acls)
|
||||||
{
|
{
|
||||||
auto parent_path = parentNodePath(path);
|
auto parent_path = parentNodePath(path);
|
||||||
@ -747,7 +875,7 @@ bool KeeperStorage::createNode(
|
|||||||
if (node_it == container.end())
|
if (node_it == container.end())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (node_it->value.stat.ephemeralOwner != 0)
|
if (node_it->value.isEphemeral())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (container.contains(path))
|
if (container.contains(path))
|
||||||
@ -759,8 +887,8 @@ bool KeeperStorage::createNode(
|
|||||||
acl_map.addUsage(acl_id);
|
acl_map.addUsage(acl_id);
|
||||||
|
|
||||||
created_node.acl_id = acl_id;
|
created_node.acl_id = acl_id;
|
||||||
created_node.stat = stat;
|
created_node.copyStats(stat);
|
||||||
created_node.setData(std::move(data));
|
created_node.setData(data);
|
||||||
auto [map_key, _] = container.insert(path, created_node);
|
auto [map_key, _] = container.insert(path, created_node);
|
||||||
/// Take child path from key owned by map.
|
/// Take child path from key owned by map.
|
||||||
auto child_path = getBaseNodeName(map_key->getKey());
|
auto child_path = getBaseNodeName(map_key->getKey());
|
||||||
@ -769,7 +897,7 @@ bool KeeperStorage::createNode(
|
|||||||
[child_path](KeeperStorage::Node & parent)
|
[child_path](KeeperStorage::Node & parent)
|
||||||
{
|
{
|
||||||
parent.addChild(child_path);
|
parent.addChild(child_path);
|
||||||
chassert(parent.stat.numChildren == static_cast<int32_t>(parent.getChildren().size()));
|
chassert(parent.numChildren() == static_cast<int32_t>(parent.getChildren().size()));
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -783,21 +911,22 @@ bool KeeperStorage::removeNode(const std::string & path, int32_t version)
|
|||||||
if (node_it == container.end())
|
if (node_it == container.end())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (version != -1 && version != node_it->value.stat.version)
|
if (version != -1 && version != node_it->value.version)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (node_it->value.stat.numChildren)
|
if (node_it->value.numChildren())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
auto prev_node = node_it->value;
|
KeeperStorage::Node prev_node;
|
||||||
acl_map.removeUsage(prev_node.acl_id);
|
prev_node.shallowCopy(node_it->value);
|
||||||
|
acl_map.removeUsage(node_it->value.acl_id);
|
||||||
|
|
||||||
container.updateValue(
|
container.updateValue(
|
||||||
parentNodePath(path),
|
parentNodePath(path),
|
||||||
[child_basename = getBaseNodeName(node_it->key)](KeeperStorage::Node & parent)
|
[child_basename = getBaseNodeName(node_it->key)](KeeperStorage::Node & parent)
|
||||||
{
|
{
|
||||||
parent.removeChild(child_basename);
|
parent.removeChild(child_basename);
|
||||||
chassert(parent.stat.numChildren == static_cast<int32_t>(parent.getChildren().size()));
|
chassert(parent.numChildren() == static_cast<int32_t>(parent.getChildren().size()));
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -957,7 +1086,7 @@ struct KeeperStorageCreateRequestProcessor final : public KeeperStorageRequestPr
|
|||||||
if (parent_node == nullptr)
|
if (parent_node == nullptr)
|
||||||
return {KeeperStorage::Delta{zxid, Coordination::Error::ZNONODE}};
|
return {KeeperStorage::Delta{zxid, Coordination::Error::ZNONODE}};
|
||||||
|
|
||||||
else if (parent_node->stat.ephemeralOwner != 0)
|
else if (parent_node->isEphemeral())
|
||||||
return {KeeperStorage::Delta{zxid, Coordination::Error::ZNOCHILDRENFOREPHEMERALS}};
|
return {KeeperStorage::Delta{zxid, Coordination::Error::ZNOCHILDRENFOREPHEMERALS}};
|
||||||
|
|
||||||
std::string path_created = request.path;
|
std::string path_created = request.path;
|
||||||
@ -966,7 +1095,7 @@ struct KeeperStorageCreateRequestProcessor final : public KeeperStorageRequestPr
|
|||||||
if (request.not_exists)
|
if (request.not_exists)
|
||||||
return {KeeperStorage::Delta{zxid, Coordination::Error::ZBADARGUMENTS}};
|
return {KeeperStorage::Delta{zxid, Coordination::Error::ZBADARGUMENTS}};
|
||||||
|
|
||||||
auto seq_num = parent_node->seq_num;
|
auto seq_num = parent_node->seqNum();
|
||||||
|
|
||||||
std::stringstream seq_num_str; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
std::stringstream seq_num_str; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||||
seq_num_str.exceptions(std::ios::failbit);
|
seq_num_str.exceptions(std::ios::failbit);
|
||||||
@ -1006,20 +1135,20 @@ struct KeeperStorageCreateRequestProcessor final : public KeeperStorageRequestPr
|
|||||||
auto parent_update = [parent_cversion, zxid](KeeperStorage::Node & node)
|
auto parent_update = [parent_cversion, zxid](KeeperStorage::Node & node)
|
||||||
{
|
{
|
||||||
/// Increment sequential number even if node is not sequential
|
/// Increment sequential number even if node is not sequential
|
||||||
++node.seq_num;
|
node.increaseSeqNum();
|
||||||
if (parent_cversion == -1)
|
if (parent_cversion == -1)
|
||||||
++node.stat.cversion;
|
++node.cversion;
|
||||||
else if (parent_cversion > node.stat.cversion)
|
else if (parent_cversion > node.cversion)
|
||||||
node.stat.cversion = parent_cversion;
|
node.cversion = parent_cversion;
|
||||||
|
|
||||||
if (zxid > node.stat.pzxid)
|
if (zxid > node.pzxid)
|
||||||
node.stat.pzxid = zxid;
|
node.pzxid = zxid;
|
||||||
++node.stat.numChildren;
|
node.increaseNumChildren();
|
||||||
};
|
};
|
||||||
|
|
||||||
new_deltas.emplace_back(std::string{parent_path}, zxid, KeeperStorage::UpdateNodeDelta{std::move(parent_update)});
|
new_deltas.emplace_back(std::string{parent_path}, zxid, KeeperStorage::UpdateNodeDelta{std::move(parent_update)});
|
||||||
|
|
||||||
KeeperStorage::Node::Stat stat;
|
Coordination::Stat stat;
|
||||||
stat.czxid = zxid;
|
stat.czxid = zxid;
|
||||||
stat.mzxid = zxid;
|
stat.mzxid = zxid;
|
||||||
stat.pzxid = zxid;
|
stat.pzxid = zxid;
|
||||||
@ -1133,7 +1262,8 @@ struct KeeperStorageGetRequestProcessor final : public KeeperStorageRequestProce
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
node_it->value.setResponseStat(response.stat);
|
node_it->value.setResponseStat(response.stat);
|
||||||
response.data = node_it->value.getData();
|
auto data = node_it->value.getData();
|
||||||
|
response.data = std::string(data);
|
||||||
response.error = Coordination::Error::ZOK;
|
response.error = Coordination::Error::ZOK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1190,8 +1320,8 @@ struct KeeperStorageRemoveRequestProcessor final : public KeeperStorageRequestPr
|
|||||||
{
|
{
|
||||||
[zxid](KeeperStorage::Node & parent)
|
[zxid](KeeperStorage::Node & parent)
|
||||||
{
|
{
|
||||||
if (parent.stat.pzxid < zxid)
|
if (parent.pzxid < zxid)
|
||||||
parent.stat.pzxid = zxid;
|
parent.pzxid = zxid;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
@ -1205,9 +1335,9 @@ struct KeeperStorageRemoveRequestProcessor final : public KeeperStorageRequestPr
|
|||||||
update_parent_pzxid();
|
update_parent_pzxid();
|
||||||
return {KeeperStorage::Delta{zxid, Coordination::Error::ZNONODE}};
|
return {KeeperStorage::Delta{zxid, Coordination::Error::ZNONODE}};
|
||||||
}
|
}
|
||||||
else if (request.version != -1 && request.version != node->stat.version)
|
else if (request.version != -1 && request.version != node->version)
|
||||||
return {KeeperStorage::Delta{zxid, Coordination::Error::ZBADVERSION}};
|
return {KeeperStorage::Delta{zxid, Coordination::Error::ZBADVERSION}};
|
||||||
else if (node->stat.numChildren != 0)
|
else if (node->numChildren() != 0)
|
||||||
return {KeeperStorage::Delta{zxid, Coordination::Error::ZNOTEMPTY}};
|
return {KeeperStorage::Delta{zxid, Coordination::Error::ZNOTEMPTY}};
|
||||||
|
|
||||||
if (request.restored_from_zookeeper_log)
|
if (request.restored_from_zookeeper_log)
|
||||||
@ -1218,14 +1348,14 @@ struct KeeperStorageRemoveRequestProcessor final : public KeeperStorageRequestPr
|
|||||||
zxid,
|
zxid,
|
||||||
KeeperStorage::UpdateNodeDelta{[](KeeperStorage::Node & parent)
|
KeeperStorage::UpdateNodeDelta{[](KeeperStorage::Node & parent)
|
||||||
{
|
{
|
||||||
++parent.stat.cversion;
|
++parent.cversion;
|
||||||
--parent.stat.numChildren;
|
parent.decreaseNumChildren();
|
||||||
}});
|
}});
|
||||||
|
|
||||||
new_deltas.emplace_back(request.path, zxid, KeeperStorage::RemoveNodeDelta{request.version, node->stat.ephemeralOwner});
|
new_deltas.emplace_back(request.path, zxid, KeeperStorage::RemoveNodeDelta{request.version, node->ephemeralOwner()});
|
||||||
|
|
||||||
if (node->stat.ephemeralOwner != 0)
|
if (node->isEphemeral())
|
||||||
storage.unregisterEphemeralPath(node->stat.ephemeralOwner, request.path);
|
storage.unregisterEphemeralPath(node->ephemeralOwner(), request.path);
|
||||||
|
|
||||||
digest = storage.calculateNodesDigest(digest, new_deltas);
|
digest = storage.calculateNodesDigest(digest, new_deltas);
|
||||||
|
|
||||||
@ -1339,7 +1469,7 @@ struct KeeperStorageSetRequestProcessor final : public KeeperStorageRequestProce
|
|||||||
|
|
||||||
auto node = storage.uncommitted_state.getNode(request.path);
|
auto node = storage.uncommitted_state.getNode(request.path);
|
||||||
|
|
||||||
if (request.version != -1 && request.version != node->stat.version)
|
if (request.version != -1 && request.version != node->version)
|
||||||
return {KeeperStorage::Delta{zxid, Coordination::Error::ZBADVERSION}};
|
return {KeeperStorage::Delta{zxid, Coordination::Error::ZBADVERSION}};
|
||||||
|
|
||||||
new_deltas.emplace_back(
|
new_deltas.emplace_back(
|
||||||
@ -1348,9 +1478,9 @@ struct KeeperStorageSetRequestProcessor final : public KeeperStorageRequestProce
|
|||||||
KeeperStorage::UpdateNodeDelta{
|
KeeperStorage::UpdateNodeDelta{
|
||||||
[zxid, data = request.data, time](KeeperStorage::Node & value)
|
[zxid, data = request.data, time](KeeperStorage::Node & value)
|
||||||
{
|
{
|
||||||
value.stat.version++;
|
value.version++;
|
||||||
value.stat.mzxid = zxid;
|
value.mzxid = zxid;
|
||||||
value.stat.mtime = time;
|
value.mtime = time;
|
||||||
value.setData(data);
|
value.setData(data);
|
||||||
},
|
},
|
||||||
request.version});
|
request.version});
|
||||||
@ -1362,7 +1492,7 @@ struct KeeperStorageSetRequestProcessor final : public KeeperStorageRequestProce
|
|||||||
{
|
{
|
||||||
[](KeeperStorage::Node & parent)
|
[](KeeperStorage::Node & parent)
|
||||||
{
|
{
|
||||||
parent.stat.cversion++;
|
parent.cversion++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
@ -1464,9 +1594,7 @@ struct KeeperStorageListRequestProcessor final : public KeeperStorageRequestProc
|
|||||||
|
|
||||||
auto list_request_type = ALL;
|
auto list_request_type = ALL;
|
||||||
if (auto * filtered_list = dynamic_cast<Coordination::ZooKeeperFilteredListRequest *>(&request))
|
if (auto * filtered_list = dynamic_cast<Coordination::ZooKeeperFilteredListRequest *>(&request))
|
||||||
{
|
|
||||||
list_request_type = filtered_list->list_request_type;
|
list_request_type = filtered_list->list_request_type;
|
||||||
}
|
|
||||||
|
|
||||||
if (list_request_type == ALL)
|
if (list_request_type == ALL)
|
||||||
return true;
|
return true;
|
||||||
@ -1476,7 +1604,7 @@ struct KeeperStorageListRequestProcessor final : public KeeperStorageRequestProc
|
|||||||
if (child_it == container.end())
|
if (child_it == container.end())
|
||||||
onStorageInconsistency();
|
onStorageInconsistency();
|
||||||
|
|
||||||
const auto is_ephemeral = child_it->value.stat.ephemeralOwner != 0;
|
const auto is_ephemeral = child_it->value.isEphemeral();
|
||||||
return (is_ephemeral && list_request_type == EPHEMERAL_ONLY) || (!is_ephemeral && list_request_type == PERSISTENT_ONLY);
|
return (is_ephemeral && list_request_type == EPHEMERAL_ONLY) || (!is_ephemeral && list_request_type == PERSISTENT_ONLY);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1529,7 +1657,7 @@ struct KeeperStorageCheckRequestProcessor final : public KeeperStorageRequestPro
|
|||||||
auto node = storage.uncommitted_state.getNode(request.path);
|
auto node = storage.uncommitted_state.getNode(request.path);
|
||||||
if (check_not_exists)
|
if (check_not_exists)
|
||||||
{
|
{
|
||||||
if (node && (request.version == -1 || request.version == node->stat.version))
|
if (node && (request.version == -1 || request.version == node->version))
|
||||||
return {KeeperStorage::Delta{zxid, Coordination::Error::ZNODEEXISTS}};
|
return {KeeperStorage::Delta{zxid, Coordination::Error::ZNODEEXISTS}};
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -1537,7 +1665,7 @@ struct KeeperStorageCheckRequestProcessor final : public KeeperStorageRequestPro
|
|||||||
if (!node)
|
if (!node)
|
||||||
return {KeeperStorage::Delta{zxid, Coordination::Error::ZNONODE}};
|
return {KeeperStorage::Delta{zxid, Coordination::Error::ZNONODE}};
|
||||||
|
|
||||||
if (request.version != -1 && request.version != node->stat.version)
|
if (request.version != -1 && request.version != node->version)
|
||||||
return {KeeperStorage::Delta{zxid, Coordination::Error::ZBADVERSION}};
|
return {KeeperStorage::Delta{zxid, Coordination::Error::ZBADVERSION}};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1573,7 +1701,7 @@ struct KeeperStorageCheckRequestProcessor final : public KeeperStorageRequestPro
|
|||||||
|
|
||||||
if (check_not_exists)
|
if (check_not_exists)
|
||||||
{
|
{
|
||||||
if (node_it != container.end() && (request.version == -1 || request.version == node_it->value.stat.version))
|
if (node_it != container.end() && (request.version == -1 || request.version == node_it->value.version))
|
||||||
on_error(Coordination::Error::ZNODEEXISTS);
|
on_error(Coordination::Error::ZNODEEXISTS);
|
||||||
else
|
else
|
||||||
response.error = Coordination::Error::ZOK;
|
response.error = Coordination::Error::ZOK;
|
||||||
@ -1582,7 +1710,7 @@ struct KeeperStorageCheckRequestProcessor final : public KeeperStorageRequestPro
|
|||||||
{
|
{
|
||||||
if (node_it == container.end())
|
if (node_it == container.end())
|
||||||
on_error(Coordination::Error::ZNONODE);
|
on_error(Coordination::Error::ZNONODE);
|
||||||
else if (request.version != -1 && request.version != node_it->value.stat.version)
|
else if (request.version != -1 && request.version != node_it->value.version)
|
||||||
on_error(Coordination::Error::ZBADVERSION);
|
on_error(Coordination::Error::ZBADVERSION);
|
||||||
else
|
else
|
||||||
response.error = Coordination::Error::ZOK;
|
response.error = Coordination::Error::ZOK;
|
||||||
@ -1635,7 +1763,7 @@ struct KeeperStorageSetACLRequestProcessor final : public KeeperStorageRequestPr
|
|||||||
|
|
||||||
auto node = uncommitted_state.getNode(request.path);
|
auto node = uncommitted_state.getNode(request.path);
|
||||||
|
|
||||||
if (request.version != -1 && request.version != node->stat.aversion)
|
if (request.version != -1 && request.version != node->aversion)
|
||||||
return {KeeperStorage::Delta{zxid, Coordination::Error::ZBADVERSION}};
|
return {KeeperStorage::Delta{zxid, Coordination::Error::ZBADVERSION}};
|
||||||
|
|
||||||
|
|
||||||
@ -1655,7 +1783,7 @@ struct KeeperStorageSetACLRequestProcessor final : public KeeperStorageRequestPr
|
|||||||
zxid,
|
zxid,
|
||||||
KeeperStorage::UpdateNodeDelta
|
KeeperStorage::UpdateNodeDelta
|
||||||
{
|
{
|
||||||
[](KeeperStorage::Node & n) { ++n.stat.aversion; }
|
[](KeeperStorage::Node & n) { ++n.aversion; }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -1824,7 +1952,7 @@ struct KeeperStorageMultiRequestProcessor final : public KeeperStorageRequestPro
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(request.requests.empty() || operation_type.has_value());
|
chassert(request.requests.empty() || operation_type.has_value());
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<KeeperStorage::Delta>
|
std::vector<KeeperStorage::Delta>
|
||||||
@ -1873,7 +2001,7 @@ struct KeeperStorageMultiRequestProcessor final : public KeeperStorageRequestPro
|
|||||||
|
|
||||||
auto & deltas = storage.uncommitted_state.deltas;
|
auto & deltas = storage.uncommitted_state.deltas;
|
||||||
// the deltas will have at least SubDeltaEnd or FailedMultiDelta
|
// the deltas will have at least SubDeltaEnd or FailedMultiDelta
|
||||||
assert(!deltas.empty());
|
chassert(!deltas.empty());
|
||||||
if (auto * failed_multi = std::get_if<KeeperStorage::FailedMultiDelta>(&deltas.front().operation))
|
if (auto * failed_multi = std::get_if<KeeperStorage::FailedMultiDelta>(&deltas.front().operation))
|
||||||
{
|
{
|
||||||
for (size_t i = 0; i < concrete_requests.size(); ++i)
|
for (size_t i = 0; i < concrete_requests.size(); ++i)
|
||||||
@ -2073,7 +2201,7 @@ UInt64 KeeperStorage::calculateNodesDigest(UInt64 current_digest, const std::vec
|
|||||||
[&](const CreateNodeDelta & create_delta)
|
[&](const CreateNodeDelta & create_delta)
|
||||||
{
|
{
|
||||||
auto node = std::make_shared<Node>();
|
auto node = std::make_shared<Node>();
|
||||||
node->stat = create_delta.stat;
|
node->copyStats(create_delta.stat);
|
||||||
node->setData(create_delta.data);
|
node->setData(create_delta.data);
|
||||||
updated_nodes.emplace(delta.path, node);
|
updated_nodes.emplace(delta.path, node);
|
||||||
},
|
},
|
||||||
@ -2196,8 +2324,8 @@ void KeeperStorage::preprocessRequest(
|
|||||||
{
|
{
|
||||||
[ephemeral_path](Node & parent)
|
[ephemeral_path](Node & parent)
|
||||||
{
|
{
|
||||||
++parent.stat.cversion;
|
++parent.cversion;
|
||||||
--parent.stat.numChildren;
|
parent.decreaseNumChildren();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
@ -2300,7 +2428,7 @@ KeeperStorage::ResponsesForSessions KeeperStorage::processRequest(
|
|||||||
|
|
||||||
if (is_local)
|
if (is_local)
|
||||||
{
|
{
|
||||||
assert(zk_request->isReadRequest());
|
chassert(zk_request->isReadRequest());
|
||||||
if (check_acl && !request_processor->checkAuth(*this, session_id, true))
|
if (check_acl && !request_processor->checkAuth(*this, session_id, true))
|
||||||
{
|
{
|
||||||
response = zk_request->makeResponse();
|
response = zk_request->makeResponse();
|
||||||
|
@ -5,17 +5,15 @@
|
|||||||
#include <Coordination/ACLMap.h>
|
#include <Coordination/ACLMap.h>
|
||||||
#include <Coordination/SessionExpiryQueue.h>
|
#include <Coordination/SessionExpiryQueue.h>
|
||||||
#include <Coordination/SnapshotableHashTable.h>
|
#include <Coordination/SnapshotableHashTable.h>
|
||||||
#include <IO/WriteBufferFromString.h>
|
|
||||||
#include <Common/ConcurrentBoundedQueue.h>
|
|
||||||
#include <Common/ZooKeeper/IKeeper.h>
|
|
||||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
|
||||||
#include <Coordination/KeeperContext.h>
|
|
||||||
|
|
||||||
#include <absl/container/flat_hash_set.h>
|
#include <absl/container/flat_hash_set.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
class KeeperContext;
|
||||||
|
using KeeperContextPtr = std::shared_ptr<KeeperContext>;
|
||||||
|
|
||||||
struct KeeperStorageRequestProcessor;
|
struct KeeperStorageRequestProcessor;
|
||||||
using KeeperStorageRequestProcessorPtr = std::shared_ptr<KeeperStorageRequestProcessor>;
|
using KeeperStorageRequestProcessorPtr = std::shared_ptr<KeeperStorageRequestProcessor>;
|
||||||
using ResponseCallback = std::function<void(const Coordination::ZooKeeperResponsePtr &)>;
|
using ResponseCallback = std::function<void(const Coordination::ZooKeeperResponsePtr &)>;
|
||||||
@ -35,40 +33,113 @@ public:
|
|||||||
/// New fields should be added to the struct only if it's really necessary
|
/// New fields should be added to the struct only if it's really necessary
|
||||||
struct Node
|
struct Node
|
||||||
{
|
{
|
||||||
/// to reduce size of the Node struct we use a custom Stat without dataLength
|
int64_t czxid{0};
|
||||||
struct Stat
|
int64_t mzxid{0};
|
||||||
{
|
int64_t pzxid{0};
|
||||||
int64_t czxid{0};
|
|
||||||
int64_t mzxid{0};
|
|
||||||
int64_t ctime{0};
|
|
||||||
int64_t mtime{0};
|
|
||||||
int32_t version{0};
|
|
||||||
int32_t cversion{0};
|
|
||||||
int32_t aversion{0};
|
|
||||||
int32_t numChildren{0}; /// NOLINT
|
|
||||||
int64_t ephemeralOwner{0}; /// NOLINT
|
|
||||||
int64_t pzxid{0};
|
|
||||||
|
|
||||||
bool operator==(const Stat &) const = default;
|
|
||||||
};
|
|
||||||
|
|
||||||
uint64_t acl_id = 0; /// 0 -- no ACL by default
|
uint64_t acl_id = 0; /// 0 -- no ACL by default
|
||||||
Stat stat{};
|
|
||||||
int32_t seq_num = 0;
|
|
||||||
|
|
||||||
/// we cannot use `std::optional<uint64_t> because we want to
|
int64_t mtime{0};
|
||||||
/// pack the boolean with seq_num above
|
|
||||||
mutable bool has_cached_digest = false;
|
std::unique_ptr<char[]> data{nullptr};
|
||||||
|
uint32_t data_size{0};
|
||||||
|
|
||||||
|
int32_t version{0};
|
||||||
|
int32_t cversion{0};
|
||||||
|
int32_t aversion{0};
|
||||||
|
|
||||||
mutable uint64_t cached_digest = 0;
|
mutable uint64_t cached_digest = 0;
|
||||||
|
|
||||||
|
Node() = default;
|
||||||
|
|
||||||
|
Node & operator=(const Node & other);
|
||||||
|
|
||||||
|
Node(const Node & other);
|
||||||
|
|
||||||
|
bool empty() const;
|
||||||
|
|
||||||
|
bool isEphemeral() const
|
||||||
|
{
|
||||||
|
return is_ephemeral_and_ctime.is_ephemeral;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t ephemeralOwner() const
|
||||||
|
{
|
||||||
|
if (isEphemeral())
|
||||||
|
return ephemeral_or_children_data.ephemeral_owner;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setEphemeralOwner(int64_t ephemeral_owner)
|
||||||
|
{
|
||||||
|
is_ephemeral_and_ctime.is_ephemeral = ephemeral_owner != 0;
|
||||||
|
ephemeral_or_children_data.ephemeral_owner = ephemeral_owner;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t numChildren() const
|
||||||
|
{
|
||||||
|
if (isEphemeral())
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return ephemeral_or_children_data.children_info.num_children;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setNumChildren(int32_t num_children)
|
||||||
|
{
|
||||||
|
ephemeral_or_children_data.children_info.num_children = num_children;
|
||||||
|
}
|
||||||
|
|
||||||
|
void increaseNumChildren()
|
||||||
|
{
|
||||||
|
chassert(!isEphemeral());
|
||||||
|
++ephemeral_or_children_data.children_info.num_children;
|
||||||
|
}
|
||||||
|
|
||||||
|
void decreaseNumChildren()
|
||||||
|
{
|
||||||
|
chassert(!isEphemeral());
|
||||||
|
--ephemeral_or_children_data.children_info.num_children;
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t seqNum() const
|
||||||
|
{
|
||||||
|
if (isEphemeral())
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return ephemeral_or_children_data.children_info.seq_num;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setSeqNum(int32_t seq_num)
|
||||||
|
{
|
||||||
|
ephemeral_or_children_data.children_info.seq_num = seq_num;
|
||||||
|
}
|
||||||
|
|
||||||
|
void increaseSeqNum()
|
||||||
|
{
|
||||||
|
chassert(!isEphemeral());
|
||||||
|
++ephemeral_or_children_data.children_info.seq_num;
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t ctime() const
|
||||||
|
{
|
||||||
|
return is_ephemeral_and_ctime.ctime;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setCtime(uint64_t ctime)
|
||||||
|
{
|
||||||
|
is_ephemeral_and_ctime.ctime = ctime;
|
||||||
|
}
|
||||||
|
|
||||||
|
void copyStats(const Coordination::Stat & stat);
|
||||||
|
|
||||||
void setResponseStat(Coordination::Stat & response_stat) const;
|
void setResponseStat(Coordination::Stat & response_stat) const;
|
||||||
|
|
||||||
/// Object memory size
|
/// Object memory size
|
||||||
uint64_t sizeInBytes() const;
|
uint64_t sizeInBytes() const;
|
||||||
|
|
||||||
void setData(String new_data);
|
void setData(const String & new_data);
|
||||||
|
|
||||||
const auto & getData() const noexcept { return data; }
|
std::string_view getData() const noexcept { return {data.get(), data_size}; }
|
||||||
|
|
||||||
void addChild(StringRef child_path);
|
void addChild(StringRef child_path);
|
||||||
|
|
||||||
@ -87,19 +158,46 @@ public:
|
|||||||
// (e.g. we don't need to copy list of children)
|
// (e.g. we don't need to copy list of children)
|
||||||
void shallowCopy(const Node & other);
|
void shallowCopy(const Node & other);
|
||||||
private:
|
private:
|
||||||
String data;
|
/// as ctime can't be negative because it stores the timestamp when the
|
||||||
|
/// node was created, we can use the MSB for a bool
|
||||||
|
struct
|
||||||
|
{
|
||||||
|
bool is_ephemeral : 1;
|
||||||
|
int64_t ctime : 63;
|
||||||
|
} is_ephemeral_and_ctime{false, 0};
|
||||||
|
|
||||||
|
/// ephemeral notes cannot have children so a node can set either
|
||||||
|
/// ephemeral_owner OR seq_num + num_children
|
||||||
|
union
|
||||||
|
{
|
||||||
|
int64_t ephemeral_owner;
|
||||||
|
struct
|
||||||
|
{
|
||||||
|
int32_t seq_num;
|
||||||
|
int32_t num_children;
|
||||||
|
} children_info;
|
||||||
|
} ephemeral_or_children_data{0};
|
||||||
|
|
||||||
ChildrenSet children{};
|
ChildrenSet children{};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#if !defined(ADDRESS_SANITIZER) && !defined(MEMORY_SANITIZER)
|
||||||
|
static_assert(
|
||||||
|
sizeof(ListNode<Node>) <= 144,
|
||||||
|
"std::list node containing ListNode<Node> is > 160 bytes (sizeof(ListNode<Node>) + 16 bytes for pointers) which will increase "
|
||||||
|
"memory consumption");
|
||||||
|
#endif
|
||||||
|
|
||||||
enum DigestVersion : uint8_t
|
enum DigestVersion : uint8_t
|
||||||
{
|
{
|
||||||
NO_DIGEST = 0,
|
NO_DIGEST = 0,
|
||||||
V1 = 1,
|
V1 = 1,
|
||||||
V2 = 2, // added system nodes that modify the digest on startup so digest from V0 is invalid
|
V2 = 2, // added system nodes that modify the digest on startup so digest from V0 is invalid
|
||||||
V3 = 3 // fixed bug with casting, removed duplicate czxid usage
|
V3 = 3, // fixed bug with casting, removed duplicate czxid usage
|
||||||
|
V4 = 4 // 0 is not a valid digest value
|
||||||
};
|
};
|
||||||
|
|
||||||
static constexpr auto CURRENT_DIGEST_VERSION = DigestVersion::V3;
|
static constexpr auto CURRENT_DIGEST_VERSION = DigestVersion::V4;
|
||||||
|
|
||||||
struct ResponseForSession
|
struct ResponseForSession
|
||||||
{
|
{
|
||||||
@ -169,7 +267,7 @@ public:
|
|||||||
// - quickly commit the changes to the storage
|
// - quickly commit the changes to the storage
|
||||||
struct CreateNodeDelta
|
struct CreateNodeDelta
|
||||||
{
|
{
|
||||||
KeeperStorage::Node::Stat stat;
|
Coordination::Stat stat;
|
||||||
Coordination::ACLs acls;
|
Coordination::ACLs acls;
|
||||||
String data;
|
String data;
|
||||||
};
|
};
|
||||||
@ -243,39 +341,7 @@ public:
|
|||||||
|
|
||||||
void applyDelta(const Delta & delta);
|
void applyDelta(const Delta & delta);
|
||||||
|
|
||||||
bool hasACL(int64_t session_id, bool is_local, std::function<bool(const AuthID &)> predicate)
|
bool hasACL(int64_t session_id, bool is_local, std::function<bool(const AuthID &)> predicate) const;
|
||||||
{
|
|
||||||
const auto check_auth = [&](const auto & auth_ids)
|
|
||||||
{
|
|
||||||
for (const auto & auth : auth_ids)
|
|
||||||
{
|
|
||||||
using TAuth = std::remove_reference_t<decltype(auth)>;
|
|
||||||
|
|
||||||
const AuthID * auth_ptr = nullptr;
|
|
||||||
if constexpr (std::is_pointer_v<TAuth>)
|
|
||||||
auth_ptr = auth;
|
|
||||||
else
|
|
||||||
auth_ptr = &auth;
|
|
||||||
|
|
||||||
if (predicate(*auth_ptr))
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
|
|
||||||
if (is_local)
|
|
||||||
return check_auth(storage.session_and_auth[session_id]);
|
|
||||||
|
|
||||||
if (check_auth(storage.session_and_auth[session_id]))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
// check if there are uncommitted
|
|
||||||
const auto auth_it = session_and_auth.find(session_id);
|
|
||||||
if (auth_it == session_and_auth.end())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return check_auth(auth_it->second);
|
|
||||||
}
|
|
||||||
|
|
||||||
void forEachAuthInSession(int64_t session_id, std::function<void(const AuthID &)> func) const;
|
void forEachAuthInSession(int64_t session_id, std::function<void(const AuthID &)> func) const;
|
||||||
|
|
||||||
@ -334,7 +400,7 @@ public:
|
|||||||
bool createNode(
|
bool createNode(
|
||||||
const std::string & path,
|
const std::string & path,
|
||||||
String data,
|
String data,
|
||||||
const KeeperStorage::Node::Stat & stat,
|
const Coordination::Stat & stat,
|
||||||
Coordination::ACLs node_acls);
|
Coordination::ACLs node_acls);
|
||||||
|
|
||||||
// Remove node in the storage
|
// Remove node in the storage
|
||||||
|
@ -2,72 +2,58 @@
|
|||||||
#include <base/StringRef.h>
|
#include <base/StringRef.h>
|
||||||
#include <Common/HashTable/HashMap.h>
|
#include <Common/HashTable/HashMap.h>
|
||||||
#include <Common/ArenaUtils.h>
|
#include <Common/ArenaUtils.h>
|
||||||
#include <list>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int LOGICAL_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename V>
|
template<typename V>
|
||||||
struct ListNode
|
struct ListNode
|
||||||
{
|
{
|
||||||
StringRef key;
|
StringRef key;
|
||||||
V value;
|
V value;
|
||||||
|
|
||||||
/// |* * ****** |
|
struct
|
||||||
/// ^ ^ ^
|
{
|
||||||
/// active_in_map free_key version
|
bool active_in_map : 1;
|
||||||
/// (1 byte) (1 byte) (6 bytes)
|
bool free_key : 1;
|
||||||
uint64_t node_metadata = 0;
|
uint64_t version : 62;
|
||||||
|
} node_metadata{false, false, 0};
|
||||||
|
|
||||||
void setInactiveInMap()
|
void setInactiveInMap()
|
||||||
{
|
{
|
||||||
node_metadata &= ~active_in_map_mask;
|
node_metadata.active_in_map = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setActiveInMap()
|
void setActiveInMap()
|
||||||
{
|
{
|
||||||
node_metadata |= active_in_map_mask;
|
node_metadata.active_in_map = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isActiveInMap()
|
bool isActiveInMap()
|
||||||
{
|
{
|
||||||
return node_metadata & active_in_map_mask;
|
return node_metadata.active_in_map;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setFreeKey()
|
void setFreeKey()
|
||||||
{
|
{
|
||||||
node_metadata |= free_key_mask;
|
node_metadata.free_key = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool getFreeKey()
|
bool getFreeKey()
|
||||||
{
|
{
|
||||||
return node_metadata & free_key_mask;
|
return node_metadata.free_key;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t getVersion()
|
uint64_t getVersion()
|
||||||
{
|
{
|
||||||
return node_metadata & version_mask;
|
return node_metadata.version;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setVersion(uint64_t version)
|
void setVersion(uint64_t version)
|
||||||
{
|
{
|
||||||
if (version > version_mask)
|
node_metadata.version = version;
|
||||||
throw Exception(
|
|
||||||
ErrorCodes::LOGICAL_ERROR, "Snapshot version {} is larger than maximum allowed value {}", version, version_mask);
|
|
||||||
|
|
||||||
node_metadata &= ~version_mask;
|
|
||||||
node_metadata |= version;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr uint64_t active_in_map_mask = static_cast<uint64_t>(1) << 63;
|
|
||||||
static constexpr uint64_t free_key_mask = static_cast<uint64_t>(1) << 62;
|
|
||||||
static constexpr uint64_t version_mask = ~(static_cast<uint64_t>(3) << 62);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class V>
|
template <class V>
|
||||||
|
@ -101,30 +101,37 @@ int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in, LoggerP
|
|||||||
KeeperStorage::Node node{};
|
KeeperStorage::Node node{};
|
||||||
String data;
|
String data;
|
||||||
Coordination::read(data, in);
|
Coordination::read(data, in);
|
||||||
node.setData(std::move(data));
|
node.setData(data);
|
||||||
Coordination::read(node.acl_id, in);
|
Coordination::read(node.acl_id, in);
|
||||||
|
|
||||||
/// Deserialize stat
|
/// Deserialize stat
|
||||||
Coordination::read(node.stat.czxid, in);
|
Coordination::read(node.czxid, in);
|
||||||
Coordination::read(node.stat.mzxid, in);
|
Coordination::read(node.mzxid, in);
|
||||||
/// For some reason ZXID specified in filename can be smaller
|
/// For some reason ZXID specified in filename can be smaller
|
||||||
/// then actual zxid from nodes. In this case we will use zxid from nodes.
|
/// then actual zxid from nodes. In this case we will use zxid from nodes.
|
||||||
max_zxid = std::max(max_zxid, node.stat.mzxid);
|
max_zxid = std::max(max_zxid, node.mzxid);
|
||||||
|
|
||||||
Coordination::read(node.stat.ctime, in);
|
int64_t ctime;
|
||||||
Coordination::read(node.stat.mtime, in);
|
Coordination::read(ctime, in);
|
||||||
Coordination::read(node.stat.version, in);
|
node.setCtime(ctime);
|
||||||
Coordination::read(node.stat.cversion, in);
|
Coordination::read(node.mtime, in);
|
||||||
Coordination::read(node.stat.aversion, in);
|
Coordination::read(node.version, in);
|
||||||
Coordination::read(node.stat.ephemeralOwner, in);
|
Coordination::read(node.cversion, in);
|
||||||
Coordination::read(node.stat.pzxid, in);
|
Coordination::read(node.aversion, in);
|
||||||
|
int64_t ephemeral_owner;
|
||||||
|
Coordination::read(ephemeral_owner, in);
|
||||||
|
if (ephemeral_owner != 0)
|
||||||
|
node.setEphemeralOwner(ephemeral_owner);
|
||||||
|
Coordination::read(node.pzxid, in);
|
||||||
if (!path.empty())
|
if (!path.empty())
|
||||||
{
|
{
|
||||||
node.seq_num = node.stat.cversion;
|
if (ephemeral_owner == 0)
|
||||||
|
node.setSeqNum(node.cversion);
|
||||||
|
|
||||||
storage.container.insertOrReplace(path, node);
|
storage.container.insertOrReplace(path, node);
|
||||||
|
|
||||||
if (node.stat.ephemeralOwner != 0)
|
if (ephemeral_owner != 0)
|
||||||
storage.ephemerals[node.stat.ephemeralOwner].insert(path);
|
storage.ephemerals[ephemeral_owner].insert(path);
|
||||||
|
|
||||||
storage.acl_map.addUsage(node.acl_id);
|
storage.acl_map.addUsage(node.acl_id);
|
||||||
}
|
}
|
||||||
@ -139,7 +146,7 @@ int64_t deserializeStorageData(KeeperStorage & storage, ReadBuffer & in, LoggerP
|
|||||||
if (itr.key != "/")
|
if (itr.key != "/")
|
||||||
{
|
{
|
||||||
auto parent_path = parentNodePath(itr.key);
|
auto parent_path = parentNodePath(itr.key);
|
||||||
storage.container.updateValue(parent_path, [my_path = itr.key] (KeeperStorage::Node & value) { value.addChild(getBaseNodeName(my_path)); ++value.stat.numChildren; });
|
storage.container.updateValue(parent_path, [my_path = itr.key] (KeeperStorage::Node & value) { value.addChild(getBaseNodeName(my_path)); value.increaseNumChildren(); });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1508,7 +1508,7 @@ void addNode(DB::KeeperStorage & storage, const std::string & path, const std::s
|
|||||||
using Node = DB::KeeperStorage::Node;
|
using Node = DB::KeeperStorage::Node;
|
||||||
Node node{};
|
Node node{};
|
||||||
node.setData(data);
|
node.setData(data);
|
||||||
node.stat.ephemeralOwner = ephemeral_owner;
|
node.setEphemeralOwner(ephemeral_owner);
|
||||||
storage.container.insertOrReplace(path, node);
|
storage.container.insertOrReplace(path, node);
|
||||||
auto child_it = storage.container.find(path);
|
auto child_it = storage.container.find(path);
|
||||||
auto child_path = DB::getBaseNodeName(child_it->key);
|
auto child_path = DB::getBaseNodeName(child_it->key);
|
||||||
@ -1517,7 +1517,7 @@ void addNode(DB::KeeperStorage & storage, const std::string & path, const std::s
|
|||||||
[&](auto & parent)
|
[&](auto & parent)
|
||||||
{
|
{
|
||||||
parent.addChild(child_path);
|
parent.addChild(child_path);
|
||||||
parent.stat.numChildren++;
|
parent.increaseNumChildren();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1530,12 +1530,12 @@ TEST_P(CoordinationTest, TestStorageSnapshotSimple)
|
|||||||
DB::KeeperSnapshotManager manager(3, keeper_context, params.enable_compression);
|
DB::KeeperSnapshotManager manager(3, keeper_context, params.enable_compression);
|
||||||
|
|
||||||
DB::KeeperStorage storage(500, "", keeper_context);
|
DB::KeeperStorage storage(500, "", keeper_context);
|
||||||
addNode(storage, "/hello", "world", 1);
|
addNode(storage, "/hello1", "world", 1);
|
||||||
addNode(storage, "/hello/somepath", "somedata", 3);
|
addNode(storage, "/hello2", "somedata", 3);
|
||||||
storage.session_id_counter = 5;
|
storage.session_id_counter = 5;
|
||||||
storage.zxid = 2;
|
storage.zxid = 2;
|
||||||
storage.ephemerals[3] = {"/hello"};
|
storage.ephemerals[3] = {"/hello2"};
|
||||||
storage.ephemerals[1] = {"/hello/somepath"};
|
storage.ephemerals[1] = {"/hello1"};
|
||||||
storage.getSessionID(130);
|
storage.getSessionID(130);
|
||||||
storage.getSessionID(130);
|
storage.getSessionID(130);
|
||||||
|
|
||||||
@ -1556,13 +1556,13 @@ TEST_P(CoordinationTest, TestStorageSnapshotSimple)
|
|||||||
auto [restored_storage, snapshot_meta, _] = manager.deserializeSnapshotFromBuffer(debuf);
|
auto [restored_storage, snapshot_meta, _] = manager.deserializeSnapshotFromBuffer(debuf);
|
||||||
|
|
||||||
EXPECT_EQ(restored_storage->container.size(), 6);
|
EXPECT_EQ(restored_storage->container.size(), 6);
|
||||||
EXPECT_EQ(restored_storage->container.getValue("/").getChildren().size(), 2);
|
EXPECT_EQ(restored_storage->container.getValue("/").getChildren().size(), 3);
|
||||||
EXPECT_EQ(restored_storage->container.getValue("/hello").getChildren().size(), 1);
|
EXPECT_EQ(restored_storage->container.getValue("/hello1").getChildren().size(), 0);
|
||||||
EXPECT_EQ(restored_storage->container.getValue("/hello/somepath").getChildren().size(), 0);
|
EXPECT_EQ(restored_storage->container.getValue("/hello2").getChildren().size(), 0);
|
||||||
|
|
||||||
EXPECT_EQ(restored_storage->container.getValue("/").getData(), "");
|
EXPECT_EQ(restored_storage->container.getValue("/").getData(), "");
|
||||||
EXPECT_EQ(restored_storage->container.getValue("/hello").getData(), "world");
|
EXPECT_EQ(restored_storage->container.getValue("/hello1").getData(), "world");
|
||||||
EXPECT_EQ(restored_storage->container.getValue("/hello/somepath").getData(), "somedata");
|
EXPECT_EQ(restored_storage->container.getValue("/hello2").getData(), "somedata");
|
||||||
EXPECT_EQ(restored_storage->session_id_counter, 7);
|
EXPECT_EQ(restored_storage->session_id_counter, 7);
|
||||||
EXPECT_EQ(restored_storage->zxid, 2);
|
EXPECT_EQ(restored_storage->zxid, 2);
|
||||||
EXPECT_EQ(restored_storage->ephemerals.size(), 2);
|
EXPECT_EQ(restored_storage->ephemerals.size(), 2);
|
||||||
@ -2251,12 +2251,12 @@ TEST_P(CoordinationTest, TestStorageSnapshotDifferentCompressions)
|
|||||||
DB::KeeperSnapshotManager manager(3, keeper_context, params.enable_compression);
|
DB::KeeperSnapshotManager manager(3, keeper_context, params.enable_compression);
|
||||||
|
|
||||||
DB::KeeperStorage storage(500, "", keeper_context);
|
DB::KeeperStorage storage(500, "", keeper_context);
|
||||||
addNode(storage, "/hello", "world", 1);
|
addNode(storage, "/hello1", "world", 1);
|
||||||
addNode(storage, "/hello/somepath", "somedata", 3);
|
addNode(storage, "/hello2", "somedata", 3);
|
||||||
storage.session_id_counter = 5;
|
storage.session_id_counter = 5;
|
||||||
storage.zxid = 2;
|
storage.zxid = 2;
|
||||||
storage.ephemerals[3] = {"/hello"};
|
storage.ephemerals[3] = {"/hello2"};
|
||||||
storage.ephemerals[1] = {"/hello/somepath"};
|
storage.ephemerals[1] = {"/hello1"};
|
||||||
storage.getSessionID(130);
|
storage.getSessionID(130);
|
||||||
storage.getSessionID(130);
|
storage.getSessionID(130);
|
||||||
|
|
||||||
@ -2273,13 +2273,13 @@ TEST_P(CoordinationTest, TestStorageSnapshotDifferentCompressions)
|
|||||||
auto [restored_storage, snapshot_meta, _] = new_manager.deserializeSnapshotFromBuffer(debuf);
|
auto [restored_storage, snapshot_meta, _] = new_manager.deserializeSnapshotFromBuffer(debuf);
|
||||||
|
|
||||||
EXPECT_EQ(restored_storage->container.size(), 6);
|
EXPECT_EQ(restored_storage->container.size(), 6);
|
||||||
EXPECT_EQ(restored_storage->container.getValue("/").getChildren().size(), 2);
|
EXPECT_EQ(restored_storage->container.getValue("/").getChildren().size(), 3);
|
||||||
EXPECT_EQ(restored_storage->container.getValue("/hello").getChildren().size(), 1);
|
EXPECT_EQ(restored_storage->container.getValue("/hello1").getChildren().size(), 0);
|
||||||
EXPECT_EQ(restored_storage->container.getValue("/hello/somepath").getChildren().size(), 0);
|
EXPECT_EQ(restored_storage->container.getValue("/hello2").getChildren().size(), 0);
|
||||||
|
|
||||||
EXPECT_EQ(restored_storage->container.getValue("/").getData(), "");
|
EXPECT_EQ(restored_storage->container.getValue("/").getData(), "");
|
||||||
EXPECT_EQ(restored_storage->container.getValue("/hello").getData(), "world");
|
EXPECT_EQ(restored_storage->container.getValue("/hello1").getData(), "world");
|
||||||
EXPECT_EQ(restored_storage->container.getValue("/hello/somepath").getData(), "somedata");
|
EXPECT_EQ(restored_storage->container.getValue("/hello2").getData(), "somedata");
|
||||||
EXPECT_EQ(restored_storage->session_id_counter, 7);
|
EXPECT_EQ(restored_storage->session_id_counter, 7);
|
||||||
EXPECT_EQ(restored_storage->zxid, 2);
|
EXPECT_EQ(restored_storage->zxid, 2);
|
||||||
EXPECT_EQ(restored_storage->ephemerals.size(), 2);
|
EXPECT_EQ(restored_storage->ephemerals.size(), 2);
|
||||||
@ -2948,7 +2948,7 @@ TEST_P(CoordinationTest, TestCheckNotExistsRequest)
|
|||||||
create_path("/test_node");
|
create_path("/test_node");
|
||||||
auto node_it = storage.container.find("/test_node");
|
auto node_it = storage.container.find("/test_node");
|
||||||
ASSERT_NE(node_it, storage.container.end());
|
ASSERT_NE(node_it, storage.container.end());
|
||||||
auto node_version = node_it->value.stat.version;
|
auto node_version = node_it->value.version;
|
||||||
|
|
||||||
{
|
{
|
||||||
SCOPED_TRACE("CheckNotExists returns ZNODEEXISTS");
|
SCOPED_TRACE("CheckNotExists returns ZNODEEXISTS");
|
||||||
|
@ -115,6 +115,8 @@ namespace DB
|
|||||||
M(Bool, storage_metadata_write_full_object_key, false, "Write disk metadata files with VERSION_FULL_OBJECT_KEY format", 0) \
|
M(Bool, storage_metadata_write_full_object_key, false, "Write disk metadata files with VERSION_FULL_OBJECT_KEY format", 0) \
|
||||||
M(UInt64, max_materialized_views_count_for_table, 0, "A limit on the number of materialized views attached to a table.", 0) \
|
M(UInt64, max_materialized_views_count_for_table, 0, "A limit on the number of materialized views attached to a table.", 0) \
|
||||||
M(UInt32, max_database_replicated_create_table_thread_pool_size, 1, "The number of threads to create tables during replica recovery in DatabaseReplicated. Zero means number of threads equal number of cores.", 0) \
|
M(UInt32, max_database_replicated_create_table_thread_pool_size, 1, "The number of threads to create tables during replica recovery in DatabaseReplicated. Zero means number of threads equal number of cores.", 0) \
|
||||||
|
M(String, default_replica_path, "/clickhouse/tables/{uuid}/{shard}", "The path to the table in ZooKeeper", 0) \
|
||||||
|
M(String, default_replica_name, "{replica}", "The replica name in ZooKeeper", 0) \
|
||||||
|
|
||||||
/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp
|
/// If you add a setting which can be updated at runtime, please update 'changeable_settings' map in StorageSystemServerSettings.cpp
|
||||||
|
|
||||||
|
@ -14,6 +14,11 @@ namespace ErrorCodes
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ServerUUID::load(const fs::path & server_uuid_file, Poco::Logger * log)
|
void ServerUUID::load(const fs::path & server_uuid_file, Poco::Logger * log)
|
||||||
|
{
|
||||||
|
server_uuid = loadServerUUID(server_uuid_file, log);
|
||||||
|
}
|
||||||
|
|
||||||
|
UUID loadServerUUID(const fs::path & server_uuid_file, Poco::Logger * log)
|
||||||
{
|
{
|
||||||
/// Write a uuid file containing a unique uuid if the file doesn't already exist during server start.
|
/// Write a uuid file containing a unique uuid if the file doesn't already exist during server start.
|
||||||
|
|
||||||
@ -25,8 +30,7 @@ void ServerUUID::load(const fs::path & server_uuid_file, Poco::Logger * log)
|
|||||||
ReadBufferFromFile in(server_uuid_file);
|
ReadBufferFromFile in(server_uuid_file);
|
||||||
readUUIDText(uuid, in);
|
readUUIDText(uuid, in);
|
||||||
assertEOF(in);
|
assertEOF(in);
|
||||||
server_uuid = uuid;
|
return uuid;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -44,7 +48,7 @@ void ServerUUID::load(const fs::path & server_uuid_file, Poco::Logger * log)
|
|||||||
out.write(uuid_str.data(), uuid_str.size());
|
out.write(uuid_str.data(), uuid_str.size());
|
||||||
out.sync();
|
out.sync();
|
||||||
out.finalize();
|
out.finalize();
|
||||||
server_uuid = new_uuid;
|
return new_uuid;
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
|
@ -21,4 +21,6 @@ public:
|
|||||||
static void load(const fs::path & server_uuid_file, Poco::Logger * log);
|
static void load(const fs::path & server_uuid_file, Poco::Logger * log);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
UUID loadServerUUID(const fs::path & server_uuid_file, Poco::Logger * log);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -82,7 +82,8 @@ class IColumn;
|
|||||||
M(UInt64, s3_upload_part_size_multiply_parts_count_threshold, 500, "Each time this number of parts was uploaded to S3, s3_min_upload_part_size is multiplied by s3_upload_part_size_multiply_factor.", 0) \
|
M(UInt64, s3_upload_part_size_multiply_parts_count_threshold, 500, "Each time this number of parts was uploaded to S3, s3_min_upload_part_size is multiplied by s3_upload_part_size_multiply_factor.", 0) \
|
||||||
M(UInt64, s3_max_inflight_parts_for_one_file, 20, "The maximum number of a concurrent loaded parts in multipart upload request. 0 means unlimited. You ", 0) \
|
M(UInt64, s3_max_inflight_parts_for_one_file, 20, "The maximum number of a concurrent loaded parts in multipart upload request. 0 means unlimited. You ", 0) \
|
||||||
M(UInt64, s3_max_single_part_upload_size, 32*1024*1024, "The maximum size of object to upload using singlepart upload to S3.", 0) \
|
M(UInt64, s3_max_single_part_upload_size, 32*1024*1024, "The maximum size of object to upload using singlepart upload to S3.", 0) \
|
||||||
M(UInt64, azure_max_single_part_upload_size, 100*1024*1024, "The maximum size of object to upload using singlepart upload to Azure blob storage.", 0) \
|
M(UInt64, azure_max_single_part_upload_size, 100*1024*1024, "The maximum size of object to upload using singlepart upload to Azure blob storage.", 0) \
|
||||||
|
M(UInt64, azure_max_single_part_copy_size, 256*1024*1024, "The maximum size of object to copy using single part copy to Azure blob storage.", 0) \
|
||||||
M(UInt64, s3_max_single_read_retries, 4, "The maximum number of retries during single S3 read.", 0) \
|
M(UInt64, s3_max_single_read_retries, 4, "The maximum number of retries during single S3 read.", 0) \
|
||||||
M(UInt64, azure_max_single_read_retries, 4, "The maximum number of retries during single Azure blob storage read.", 0) \
|
M(UInt64, azure_max_single_read_retries, 4, "The maximum number of retries during single Azure blob storage read.", 0) \
|
||||||
M(UInt64, azure_max_unexpected_write_error_retries, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write", 0) \
|
M(UInt64, azure_max_unexpected_write_error_retries, 4, "The maximum number of retries in case of unexpected errors during Azure blob storage write", 0) \
|
||||||
@ -773,7 +774,7 @@ class IColumn;
|
|||||||
M(Bool, load_marks_asynchronously, false, "Load MergeTree marks asynchronously", 0) \
|
M(Bool, load_marks_asynchronously, false, "Load MergeTree marks asynchronously", 0) \
|
||||||
M(Bool, enable_filesystem_read_prefetches_log, false, "Log to system.filesystem prefetch_log during query. Should be used only for testing or debugging, not recommended to be turned on by default", 0) \
|
M(Bool, enable_filesystem_read_prefetches_log, false, "Log to system.filesystem prefetch_log during query. Should be used only for testing or debugging, not recommended to be turned on by default", 0) \
|
||||||
M(Bool, allow_prefetched_read_pool_for_remote_filesystem, true, "Prefer prefetched threadpool if all parts are on remote filesystem", 0) \
|
M(Bool, allow_prefetched_read_pool_for_remote_filesystem, true, "Prefer prefetched threadpool if all parts are on remote filesystem", 0) \
|
||||||
M(Bool, allow_prefetched_read_pool_for_local_filesystem, false, "Prefer prefetched threadpool if all parts are on remote filesystem", 0) \
|
M(Bool, allow_prefetched_read_pool_for_local_filesystem, false, "Prefer prefetched threadpool if all parts are on local filesystem", 0) \
|
||||||
\
|
\
|
||||||
M(UInt64, prefetch_buffer_size, DBMS_DEFAULT_BUFFER_SIZE, "The maximum size of the prefetch buffer to read from the filesystem.", 0) \
|
M(UInt64, prefetch_buffer_size, DBMS_DEFAULT_BUFFER_SIZE, "The maximum size of the prefetch buffer to read from the filesystem.", 0) \
|
||||||
M(UInt64, filesystem_prefetch_step_bytes, 0, "Prefetch step in bytes. Zero means `auto` - approximately the best prefetch step will be auto deduced, but might not be 100% the best. The actual value might be different because of setting filesystem_prefetch_min_bytes_for_single_read_task", 0) \
|
M(UInt64, filesystem_prefetch_step_bytes, 0, "Prefetch step in bytes. Zero means `auto` - approximately the best prefetch step will be auto deduced, but might not be 100% the best. The actual value might be different because of setting filesystem_prefetch_min_bytes_for_single_read_task", 0) \
|
||||||
@ -857,7 +858,7 @@ class IColumn;
|
|||||||
M(UInt64, grace_hash_join_max_buckets, 1024, "Limit on the number of grace hash join buckets", 0) \
|
M(UInt64, grace_hash_join_max_buckets, 1024, "Limit on the number of grace hash join buckets", 0) \
|
||||||
M(Bool, optimize_distinct_in_order, true, "Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \
|
M(Bool, optimize_distinct_in_order, true, "Enable DISTINCT optimization if some columns in DISTINCT form a prefix of sorting. For example, prefix of sorting key in merge tree or ORDER BY statement", 0) \
|
||||||
M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \
|
M(Bool, keeper_map_strict_mode, false, "Enforce additional checks during operations on KeeperMap. E.g. throw an exception on an insert for already existing key", 0) \
|
||||||
M(UInt64, extract_kvp_max_pairs_per_row, 1000, "Max number pairs that can be produced by extractKeyValuePairs function. Used to safeguard against consuming too much memory.", 0) \
|
M(UInt64, extract_key_value_pairs_max_pairs_per_row, 1000, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory.", 0) ALIAS(extract_kvp_max_pairs_per_row) \
|
||||||
M(Timezone, session_timezone, "", "This setting can be removed in the future due to potential caveats. It is experimental and is not suitable for production usage. The default timezone for current session or query. The server default timezone if empty.", 0) \
|
M(Timezone, session_timezone, "", "This setting can be removed in the future due to potential caveats. It is experimental and is not suitable for production usage. The default timezone for current session or query. The server default timezone if empty.", 0) \
|
||||||
M(Bool, allow_create_index_without_type, false, "Allow CREATE INDEX query without TYPE. Query will be ignored. Made for SQL compatibility tests.", 0) \
|
M(Bool, allow_create_index_without_type, false, "Allow CREATE INDEX query without TYPE. Query will be ignored. Made for SQL compatibility tests.", 0) \
|
||||||
M(Bool, create_index_ignore_unique, false, "Ignore UNIQUE keyword in CREATE UNIQUE INDEX. Made for SQL compatibility tests.", 0) \
|
M(Bool, create_index_ignore_unique, false, "Ignore UNIQUE keyword in CREATE UNIQUE INDEX. Made for SQL compatibility tests.", 0) \
|
||||||
|
@ -92,7 +92,9 @@ static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> sett
|
|||||||
{"async_insert_busy_timeout_increase_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout increases"},
|
{"async_insert_busy_timeout_increase_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout increases"},
|
||||||
{"async_insert_busy_timeout_decrease_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout decreases"},
|
{"async_insert_busy_timeout_decrease_rate", 0.2, 0.2, "The exponential growth rate at which the adaptive asynchronous insert timeout decreases"},
|
||||||
{"split_parts_ranges_into_intersecting_and_non_intersecting_final", true, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"},
|
{"split_parts_ranges_into_intersecting_and_non_intersecting_final", true, true, "Allow to split parts ranges into intersecting and non intersecting during FINAL optimization"},
|
||||||
{"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"}}},
|
{"split_intersecting_parts_ranges_into_layers_final", true, true, "Allow to split intersecting parts ranges into layers during FINAL optimization"},
|
||||||
|
{"azure_max_single_part_copy_size", 256*1024*1024, 256*1024*1024, "The maximum size of object to copy using single part copy to Azure blob storage."},
|
||||||
|
{"extract_key_value_pairs_max_pairs_per_row", 0, 0, "Max number of pairs that can be produced by the `extractKeyValuePairs` function. Used as a safeguard against consuming too much memory."}}},
|
||||||
{"24.1", {{"print_pretty_type_names", false, true, "Better user experience."},
|
{"24.1", {{"print_pretty_type_names", false, true, "Better user experience."},
|
||||||
{"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"},
|
{"input_format_json_read_bools_as_strings", false, true, "Allow to read bools as strings in JSON formats by default"},
|
||||||
{"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"},
|
{"output_format_arrow_use_signed_indexes_for_dictionary", false, true, "Use signed indexes type for Arrow dictionaries by default as it's recommended"},
|
||||||
|
@ -20,6 +20,7 @@ namespace ErrorCodes
|
|||||||
{
|
{
|
||||||
extern const int ILLEGAL_COLUMN;
|
extern const int ILLEGAL_COLUMN;
|
||||||
extern const int TYPE_MISMATCH;
|
extern const int TYPE_MISMATCH;
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr recursiveRemoveLowCardinality(const DataTypePtr & type)
|
DataTypePtr recursiveRemoveLowCardinality(const DataTypePtr & type)
|
||||||
@ -55,62 +56,61 @@ DataTypePtr recursiveRemoveLowCardinality(const DataTypePtr & type)
|
|||||||
|
|
||||||
ColumnPtr recursiveRemoveLowCardinality(const ColumnPtr & column)
|
ColumnPtr recursiveRemoveLowCardinality(const ColumnPtr & column)
|
||||||
{
|
{
|
||||||
if (!column)
|
ColumnPtr res = column;
|
||||||
return column;
|
|
||||||
|
|
||||||
if (const auto * column_array = typeid_cast<const ColumnArray *>(column.get()))
|
if (const auto * column_array = typeid_cast<const ColumnArray *>(column.get()))
|
||||||
{
|
{
|
||||||
const auto & data = column_array->getDataPtr();
|
const auto & data = column_array->getDataPtr();
|
||||||
auto data_no_lc = recursiveRemoveLowCardinality(data);
|
auto data_no_lc = recursiveRemoveLowCardinality(data);
|
||||||
if (data.get() == data_no_lc.get())
|
if (data.get() != data_no_lc.get())
|
||||||
return column;
|
res = ColumnArray::create(data_no_lc, column_array->getOffsetsPtr());
|
||||||
|
|
||||||
return ColumnArray::create(data_no_lc, column_array->getOffsetsPtr());
|
|
||||||
}
|
}
|
||||||
|
else if (const auto * column_const = typeid_cast<const ColumnConst *>(column.get()))
|
||||||
if (const auto * column_const = typeid_cast<const ColumnConst *>(column.get()))
|
|
||||||
{
|
{
|
||||||
const auto & nested = column_const->getDataColumnPtr();
|
const auto & nested = column_const->getDataColumnPtr();
|
||||||
auto nested_no_lc = recursiveRemoveLowCardinality(nested);
|
auto nested_no_lc = recursiveRemoveLowCardinality(nested);
|
||||||
if (nested.get() == nested_no_lc.get())
|
if (nested.get() != nested_no_lc.get())
|
||||||
return column;
|
res = ColumnConst::create(nested_no_lc, column_const->size());
|
||||||
|
|
||||||
return ColumnConst::create(nested_no_lc, column_const->size());
|
|
||||||
}
|
}
|
||||||
|
else if (const auto * column_tuple = typeid_cast<const ColumnTuple *>(column.get()))
|
||||||
if (const auto * column_tuple = typeid_cast<const ColumnTuple *>(column.get()))
|
|
||||||
{
|
{
|
||||||
auto columns = column_tuple->getColumns();
|
auto columns = column_tuple->getColumns();
|
||||||
for (auto & element : columns)
|
for (auto & element : columns)
|
||||||
element = recursiveRemoveLowCardinality(element);
|
element = recursiveRemoveLowCardinality(element);
|
||||||
return ColumnTuple::create(columns);
|
res = ColumnTuple::create(columns);
|
||||||
}
|
}
|
||||||
|
else if (const auto * column_map = typeid_cast<const ColumnMap *>(column.get()))
|
||||||
if (const auto * column_map = typeid_cast<const ColumnMap *>(column.get()))
|
|
||||||
{
|
{
|
||||||
const auto & nested = column_map->getNestedColumnPtr();
|
const auto & nested = column_map->getNestedColumnPtr();
|
||||||
auto nested_no_lc = recursiveRemoveLowCardinality(nested);
|
auto nested_no_lc = recursiveRemoveLowCardinality(nested);
|
||||||
if (nested.get() == nested_no_lc.get())
|
if (nested.get() != nested_no_lc.get())
|
||||||
return column;
|
res = ColumnMap::create(nested_no_lc);
|
||||||
|
|
||||||
return ColumnMap::create(nested_no_lc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Special case when column is a lazy argument of short circuit function.
|
/// Special case when column is a lazy argument of short circuit function.
|
||||||
/// We should call recursiveRemoveLowCardinality on the result column
|
/// We should call recursiveRemoveLowCardinality on the result column
|
||||||
/// when function will be executed.
|
/// when function will be executed.
|
||||||
if (const auto * column_function = typeid_cast<const ColumnFunction *>(column.get()))
|
else if (const auto * column_function = typeid_cast<const ColumnFunction *>(column.get()))
|
||||||
{
|
{
|
||||||
if (!column_function->isShortCircuitArgument())
|
if (column_function->isShortCircuitArgument())
|
||||||
return column;
|
res = column_function->recursivelyConvertResultToFullColumnIfLowCardinality();
|
||||||
|
}
|
||||||
return column_function->recursivelyConvertResultToFullColumnIfLowCardinality();
|
else if (const auto * column_low_cardinality = typeid_cast<const ColumnLowCardinality *>(column.get()))
|
||||||
|
{
|
||||||
|
res = column_low_cardinality->convertToFullColumn();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (const auto * column_low_cardinality = typeid_cast<const ColumnLowCardinality *>(column.get()))
|
if (res != column)
|
||||||
return column_low_cardinality->convertToFullColumn();
|
{
|
||||||
|
/// recursiveRemoveLowCardinality() must not change the size of a passed column!
|
||||||
|
if (res->size() != column->size())
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"recursiveRemoveLowCardinality() somehow changed the size of column {}. Old size={}, new size={}. It's a bug",
|
||||||
|
column->getName(), column->size(), res->size());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return column;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnPtr recursiveLowCardinalityTypeConversion(const ColumnPtr & column, const DataTypePtr & from_type, const DataTypePtr & to_type)
|
ColumnPtr recursiveLowCardinalityTypeConversion(const ColumnPtr & column, const DataTypePtr & from_type, const DataTypePtr & to_type)
|
||||||
|
@ -3,9 +3,7 @@
|
|||||||
#include <memory>
|
#include <memory>
|
||||||
#include <base/chrono_io.h>
|
#include <base/chrono_io.h>
|
||||||
|
|
||||||
#include <Core/Defines.h>
|
|
||||||
#include <Common/CurrentMetrics.h>
|
#include <Common/CurrentMetrics.h>
|
||||||
#include <Common/HashTable/Hash.h>
|
|
||||||
#include <Common/HashTable/HashSet.h>
|
#include <Common/HashTable/HashSet.h>
|
||||||
#include <Common/ProfileEvents.h>
|
#include <Common/ProfileEvents.h>
|
||||||
#include <Common/ProfilingScopedRWLock.h>
|
#include <Common/ProfilingScopedRWLock.h>
|
||||||
|
@ -4,9 +4,9 @@
|
|||||||
#include <Disks/DiskFactory.h>
|
#include <Disks/DiskFactory.h>
|
||||||
#include <IO/FileEncryptionCommon.h>
|
#include <IO/FileEncryptionCommon.h>
|
||||||
#include <IO/ReadBufferFromEncryptedFile.h>
|
#include <IO/ReadBufferFromEncryptedFile.h>
|
||||||
|
#include <IO/ReadBufferFromFileDecorator.h>
|
||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
#include <IO/WriteBufferFromEncryptedFile.h>
|
#include <IO/WriteBufferFromEncryptedFile.h>
|
||||||
#include <IO/ReadBufferFromEmptyFile.h>
|
|
||||||
#include <boost/algorithm/hex.hpp>
|
#include <boost/algorithm/hex.hpp>
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
@ -374,7 +374,7 @@ std::unique_ptr<ReadBufferFromFileBase> DiskEncrypted::readFile(
|
|||||||
{
|
{
|
||||||
/// File is empty, that's a normal case, see DiskEncrypted::truncateFile().
|
/// File is empty, that's a normal case, see DiskEncrypted::truncateFile().
|
||||||
/// There is no header so we just return `ReadBufferFromString("")`.
|
/// There is no header so we just return `ReadBufferFromString("")`.
|
||||||
return std::make_unique<ReadBufferFromEmptyFile>(wrapped_path);
|
return std::make_unique<ReadBufferFromFileDecorator>(std::make_unique<ReadBufferFromString>(std::string_view{}), wrapped_path);
|
||||||
}
|
}
|
||||||
auto encryption_settings = current_settings.get();
|
auto encryption_settings = current_settings.get();
|
||||||
FileEncryption::Header header = readHeader(*buffer);
|
FileEncryption::Header header = readHeader(*buffer);
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <boost/algorithm/hex.hpp>
|
#include <boost/algorithm/hex.hpp>
|
||||||
#include <IO/ReadBufferFromEncryptedFile.h>
|
#include <IO/ReadBufferFromEncryptedFile.h>
|
||||||
|
#include <IO/ReadBufferFromFileDecorator.h>
|
||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
#include <IO/WriteBufferFromEncryptedFile.h>
|
#include <IO/WriteBufferFromEncryptedFile.h>
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include "ReadBufferFromRemoteFSGather.h"
|
#include "ReadBufferFromRemoteFSGather.h"
|
||||||
|
|
||||||
#include <IO/ReadBufferFromFileBase.h>
|
#include <IO/SeekableReadBuffer.h>
|
||||||
|
|
||||||
#include <Disks/IO/CachedOnDiskReadBufferFromFile.h>
|
#include <Disks/IO/CachedOnDiskReadBufferFromFile.h>
|
||||||
#include <Disks/ObjectStorages/Cached/CachedObjectStorage.h>
|
#include <Disks/ObjectStorages/Cached/CachedObjectStorage.h>
|
||||||
@ -62,7 +62,7 @@ ReadBufferFromRemoteFSGather::ReadBufferFromRemoteFSGather(
|
|||||||
current_object = blobs_to_read.front();
|
current_object = blobs_to_read.front();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<ReadBufferFromFileBase> ReadBufferFromRemoteFSGather::createImplementationBuffer(const StoredObject & object)
|
SeekableReadBufferPtr ReadBufferFromRemoteFSGather::createImplementationBuffer(const StoredObject & object)
|
||||||
{
|
{
|
||||||
if (current_buf && !with_cache)
|
if (current_buf && !with_cache)
|
||||||
{
|
{
|
||||||
@ -79,7 +79,7 @@ std::unique_ptr<ReadBufferFromFileBase> ReadBufferFromRemoteFSGather::createImpl
|
|||||||
if (with_cache)
|
if (with_cache)
|
||||||
{
|
{
|
||||||
auto cache_key = settings.remote_fs_cache->createKeyForPath(object_path);
|
auto cache_key = settings.remote_fs_cache->createKeyForPath(object_path);
|
||||||
return std::make_unique<CachedOnDiskReadBufferFromFile>(
|
return std::make_shared<CachedOnDiskReadBufferFromFile>(
|
||||||
object_path,
|
object_path,
|
||||||
cache_key,
|
cache_key,
|
||||||
settings.remote_fs_cache,
|
settings.remote_fs_cache,
|
||||||
|
@ -53,7 +53,7 @@ public:
|
|||||||
bool isContentCached(size_t offset, size_t size) override;
|
bool isContentCached(size_t offset, size_t size) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::unique_ptr<ReadBufferFromFileBase> createImplementationBuffer(const StoredObject & object);
|
SeekableReadBufferPtr createImplementationBuffer(const StoredObject & object);
|
||||||
|
|
||||||
bool nextImpl() override;
|
bool nextImpl() override;
|
||||||
|
|
||||||
@ -80,7 +80,7 @@ private:
|
|||||||
|
|
||||||
StoredObject current_object;
|
StoredObject current_object;
|
||||||
size_t current_buf_idx = 0;
|
size_t current_buf_idx = 0;
|
||||||
std::unique_ptr<ReadBufferFromFileBase> current_buf;
|
SeekableReadBufferPtr current_buf;
|
||||||
|
|
||||||
LoggerPtr log;
|
LoggerPtr log;
|
||||||
};
|
};
|
||||||
|
@ -39,7 +39,7 @@ std::unique_ptr<ReadBufferFromFileBase> createReadBufferFromFileBase(
|
|||||||
size_t alignment)
|
size_t alignment)
|
||||||
{
|
{
|
||||||
if (file_size.has_value() && !*file_size)
|
if (file_size.has_value() && !*file_size)
|
||||||
return std::make_unique<ReadBufferFromEmptyFile>(filename);
|
return std::make_unique<ReadBufferFromEmptyFile>();
|
||||||
|
|
||||||
size_t estimated_size = 0;
|
size_t estimated_size = 0;
|
||||||
if (read_hint.has_value())
|
if (read_hint.has_value())
|
||||||
|
@ -166,6 +166,9 @@ std::unique_ptr<AzureObjectStorageSettings> getAzureBlobStorageSettings(const Po
|
|||||||
config.getInt(config_prefix + ".max_single_read_retries", 3),
|
config.getInt(config_prefix + ".max_single_read_retries", 3),
|
||||||
config.getInt(config_prefix + ".max_single_download_retries", 3),
|
config.getInt(config_prefix + ".max_single_download_retries", 3),
|
||||||
config.getInt(config_prefix + ".list_object_keys_size", 1000),
|
config.getInt(config_prefix + ".list_object_keys_size", 1000),
|
||||||
|
config.getUInt64(config_prefix + ".max_upload_part_size", 5ULL * 1024 * 1024 * 1024),
|
||||||
|
config.getUInt64(config_prefix + ".max_single_part_copy_size", context->getSettings().azure_max_single_part_copy_size),
|
||||||
|
config.getBool(config_prefix + ".use_native_copy", false),
|
||||||
config.getUInt64(config_prefix + ".max_unexpected_write_error_retries", context->getSettings().azure_max_unexpected_write_error_retries)
|
config.getUInt64(config_prefix + ".max_unexpected_write_error_retries", context->getSettings().azure_max_unexpected_write_error_retries)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -92,10 +92,12 @@ private:
|
|||||||
AzureObjectStorage::AzureObjectStorage(
|
AzureObjectStorage::AzureObjectStorage(
|
||||||
const String & name_,
|
const String & name_,
|
||||||
AzureClientPtr && client_,
|
AzureClientPtr && client_,
|
||||||
SettingsPtr && settings_)
|
SettingsPtr && settings_,
|
||||||
|
const String & container_)
|
||||||
: name(name_)
|
: name(name_)
|
||||||
, client(std::move(client_))
|
, client(std::move(client_))
|
||||||
, settings(std::move(settings_))
|
, settings(std::move(settings_))
|
||||||
|
, container(container_)
|
||||||
, log(getLogger("AzureObjectStorage"))
|
, log(getLogger("AzureObjectStorage"))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -376,7 +378,8 @@ std::unique_ptr<IObjectStorage> AzureObjectStorage::cloneObjectStorage(const std
|
|||||||
return std::make_unique<AzureObjectStorage>(
|
return std::make_unique<AzureObjectStorage>(
|
||||||
name,
|
name,
|
||||||
getAzureBlobContainerClient(config, config_prefix),
|
getAzureBlobContainerClient(config, config_prefix),
|
||||||
getAzureBlobStorageSettings(config, config_prefix, context)
|
getAzureBlobStorageSettings(config, config_prefix, context),
|
||||||
|
container
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,12 +24,18 @@ struct AzureObjectStorageSettings
|
|||||||
int max_single_read_retries_,
|
int max_single_read_retries_,
|
||||||
int max_single_download_retries_,
|
int max_single_download_retries_,
|
||||||
int list_object_keys_size_,
|
int list_object_keys_size_,
|
||||||
|
size_t max_upload_part_size_,
|
||||||
|
size_t max_single_part_copy_size_,
|
||||||
|
bool use_native_copy_,
|
||||||
size_t max_unexpected_write_error_retries_)
|
size_t max_unexpected_write_error_retries_)
|
||||||
: max_single_part_upload_size(max_single_part_upload_size_)
|
: max_single_part_upload_size(max_single_part_upload_size_)
|
||||||
, min_bytes_for_seek(min_bytes_for_seek_)
|
, min_bytes_for_seek(min_bytes_for_seek_)
|
||||||
, max_single_read_retries(max_single_read_retries_)
|
, max_single_read_retries(max_single_read_retries_)
|
||||||
, max_single_download_retries(max_single_download_retries_)
|
, max_single_download_retries(max_single_download_retries_)
|
||||||
, list_object_keys_size(list_object_keys_size_)
|
, list_object_keys_size(list_object_keys_size_)
|
||||||
|
, max_upload_part_size(max_upload_part_size_)
|
||||||
|
, max_single_part_copy_size(max_single_part_copy_size_)
|
||||||
|
, use_native_copy(use_native_copy_)
|
||||||
, max_unexpected_write_error_retries (max_unexpected_write_error_retries_)
|
, max_unexpected_write_error_retries (max_unexpected_write_error_retries_)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -41,6 +47,10 @@ struct AzureObjectStorageSettings
|
|||||||
size_t max_single_read_retries = 3;
|
size_t max_single_read_retries = 3;
|
||||||
size_t max_single_download_retries = 3;
|
size_t max_single_download_retries = 3;
|
||||||
int list_object_keys_size = 1000;
|
int list_object_keys_size = 1000;
|
||||||
|
size_t min_upload_part_size = 16 * 1024 * 1024;
|
||||||
|
size_t max_upload_part_size = 5ULL * 1024 * 1024 * 1024;
|
||||||
|
size_t max_single_part_copy_size = 256 * 1024 * 1024;
|
||||||
|
bool use_native_copy = false;
|
||||||
size_t max_unexpected_write_error_retries = 4;
|
size_t max_unexpected_write_error_retries = 4;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -56,7 +66,8 @@ public:
|
|||||||
AzureObjectStorage(
|
AzureObjectStorage(
|
||||||
const String & name_,
|
const String & name_,
|
||||||
AzureClientPtr && client_,
|
AzureClientPtr && client_,
|
||||||
SettingsPtr && settings_);
|
SettingsPtr && settings_,
|
||||||
|
const String & container_);
|
||||||
|
|
||||||
void listObjects(const std::string & path, RelativePathsWithMetadata & children, int max_keys) const override;
|
void listObjects(const std::string & path, RelativePathsWithMetadata & children, int max_keys) const override;
|
||||||
|
|
||||||
@ -119,7 +130,7 @@ public:
|
|||||||
const std::string & config_prefix,
|
const std::string & config_prefix,
|
||||||
ContextPtr context) override;
|
ContextPtr context) override;
|
||||||
|
|
||||||
String getObjectsNamespace() const override { return ""; }
|
String getObjectsNamespace() const override { return container ; }
|
||||||
|
|
||||||
std::unique_ptr<IObjectStorage> cloneObjectStorage(
|
std::unique_ptr<IObjectStorage> cloneObjectStorage(
|
||||||
const std::string & new_namespace,
|
const std::string & new_namespace,
|
||||||
@ -131,11 +142,19 @@ public:
|
|||||||
|
|
||||||
bool isRemote() const override { return true; }
|
bool isRemote() const override { return true; }
|
||||||
|
|
||||||
|
std::shared_ptr<const AzureObjectStorageSettings> getSettings() { return settings.get(); }
|
||||||
|
|
||||||
|
std::shared_ptr<const Azure::Storage::Blobs::BlobContainerClient> getAzureBlobStorageClient() override
|
||||||
|
{
|
||||||
|
return client.get();
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const String name;
|
const String name;
|
||||||
/// client used to access the files in the Blob Storage cloud
|
/// client used to access the files in the Blob Storage cloud
|
||||||
MultiVersion<Azure::Storage::Blobs::BlobContainerClient> client;
|
MultiVersion<Azure::Storage::Blobs::BlobContainerClient> client;
|
||||||
MultiVersion<AzureObjectStorageSettings> settings;
|
MultiVersion<AzureObjectStorageSettings> settings;
|
||||||
|
const String container;
|
||||||
|
|
||||||
LoggerPtr log;
|
LoggerPtr log;
|
||||||
};
|
};
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user